nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/locators.py | python | PyPIRPCLocator.__init__ | (self, url, **kwargs) | Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor. | Initialise an instance. | [
"Initialise",
"an",
"instance",
"."
] | def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0) | [
"def",
"__init__",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"PyPIRPCLocator",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"base_url",
"=",
"url",
"self",
".",
"client",
"=",
"ServerProxy",
"(",
"url",
",",
"timeout",
"=",
"3.0",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/locators.py#L344-L353 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/floatspin.py | python | FloatSpin.__init__ | (self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=(95,-1), style=0, value=0.0, min_val=None, max_val=None,
increment=1.0, digits=-1, agwStyle=FS_LEFT,
name="FloatSpin") | Default class constructor.
:param `parent`: the :class:`FloatSpin` parent;
:param `id`: an identifier for the control: a value of -1 is taken to mean a default;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the window style;
:param `value`: is the current value for :class:`FloatSpin`;
:param `min_val`: the minimum value, ignored if ``None``;
:param `max_val`: the maximum value, ignored if ``None``;
:param `increment`: the increment for every :class:`FloatSpinEvent` event;
:param `digits`: number of representative digits for your floating point numbers;
:param `agwStyle`: one of the following bits:
=============== =========== ==================================================
Window Styles Hex Value Description
=============== =========== ==================================================
``FS_READONLY`` 0x1 Sets :class:`FloatSpin` as read-only control.
``FS_LEFT`` 0x2 Horizontally align the underlying :class:`TextCtrl` on the left.
``FS_CENTRE`` 0x4 Horizontally align the underlying :class:`TextCtrl` on center.
``FS_RIGHT`` 0x8 Horizontally align the underlying :class:`TextCtrl` on the right.
=============== =========== ==================================================
:param `name`: the window name. | Default class constructor. | [
"Default",
"class",
"constructor",
"."
] | def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=(95,-1), style=0, value=0.0, min_val=None, max_val=None,
increment=1.0, digits=-1, agwStyle=FS_LEFT,
name="FloatSpin"):
"""
Default class constructor.
:param `parent`: the :class:`FloatSpin` parent;
:param `id`: an identifier for the control: a value of -1 is taken to mean a default;
:param `pos`: the control position. A value of (-1, -1) indicates a default position,
chosen by either the windowing system or wxPython, depending on platform;
:param `size`: the control size. A value of (-1, -1) indicates a default size,
chosen by either the windowing system or wxPython, depending on platform;
:param `style`: the window style;
:param `value`: is the current value for :class:`FloatSpin`;
:param `min_val`: the minimum value, ignored if ``None``;
:param `max_val`: the maximum value, ignored if ``None``;
:param `increment`: the increment for every :class:`FloatSpinEvent` event;
:param `digits`: number of representative digits for your floating point numbers;
:param `agwStyle`: one of the following bits:
=============== =========== ==================================================
Window Styles Hex Value Description
=============== =========== ==================================================
``FS_READONLY`` 0x1 Sets :class:`FloatSpin` as read-only control.
``FS_LEFT`` 0x2 Horizontally align the underlying :class:`TextCtrl` on the left.
``FS_CENTRE`` 0x4 Horizontally align the underlying :class:`TextCtrl` on center.
``FS_RIGHT`` 0x8 Horizontally align the underlying :class:`TextCtrl` on the right.
=============== =========== ==================================================
:param `name`: the window name.
"""
wx.PyControl.__init__(self, parent, id, pos, size, style|wx.NO_BORDER|
wx.NO_FULL_REPAINT_ON_RESIZE | wx.CLIP_CHILDREN,
wx.DefaultValidator, name)
# Don't call SetRange here, because it will try to modify
# self._value whose value doesn't exist yet.
self.SetRangeDontClampValue(min_val, max_val)
self._value = self.ClampValue(FixedPoint(str(value), 20))
self._defaultvalue = self._value
self._increment = FixedPoint(str(increment), 20)
self._spinmodifier = FixedPoint(str(1.0), 20)
self._digits = digits
self._snapticks = False
self._spinbutton = None
self._textctrl = None
self._spinctrl_bestsize = wx.Size(-999, -999)
# start Philip Semanchuk addition
# The textbox & spin button are drawn slightly differently
# depending on the platform. The difference is most pronounced
# under OS X.
if "__WXMAC__" in wx.PlatformInfo:
self._gap = 8
self._spin_top = 3
self._text_left = 4
self._text_top = 4
elif "__WXMSW__" in wx.PlatformInfo:
self._gap = 1
self._spin_top = 0
self._text_left = 0
self._text_top = 0
else:
# GTK
self._gap = -1
self._spin_top = 0
self._text_left = 0
self._text_top = 0
# end Philip Semanchuk addition
self.SetLabel(name)
self.SetForegroundColour(parent.GetForegroundColour())
width = size[0]
height = size[1]
best_size = self.DoGetBestSize()
if width == -1:
width = best_size.GetWidth()
if height == -1:
height = best_size.GetHeight()
self._validkeycode = [43, 44, 45, 46, 69, 101, 127, 314]
self._validkeycode.extend(range(48, 58))
self._validkeycode.extend([wx.WXK_RETURN, wx.WXK_TAB, wx.WXK_BACK,
wx.WXK_LEFT, wx.WXK_RIGHT])
self._spinbutton = wx.SpinButton(self, wx.ID_ANY, wx.DefaultPosition,
size=(-1, height),
style=wx.SP_ARROW_KEYS | wx.SP_VERTICAL |
wx.SP_WRAP)
txtstyle = wx.TE_NOHIDESEL | wx.TE_PROCESS_ENTER
if agwStyle & FS_RIGHT:
txtstyle = txtstyle | wx.TE_RIGHT
elif agwStyle & FS_CENTRE:
txtstyle = txtstyle | wx.TE_CENTER
if agwStyle & FS_READONLY:
txtstyle = txtstyle | wx.TE_READONLY
self._textctrl = FloatTextCtrl(self, wx.ID_ANY, str(self._value),
wx.DefaultPosition,
(width-self._spinbutton.GetSize().GetWidth(), height),
txtstyle)
# start Philip Semanchuk addition
# Setting the textctrl's size in the ctor also sets its min size.
# But the textctrl is entirely controlled by the parent floatspin
# control and should accept whatever size its parent dictates, so
# here we tell it to forget its min size.
self._textctrl.SetMinSize(wx.DefaultSize)
# Setting the spin buttons's size in the ctor also sets its min size.
# Under OS X that results in a rendering artifact because spin buttons
# are a little shorter than textboxes.
# Setting the min size to the default allows OS X to draw the spin
# button correctly. However, Windows and KDE take the call to
# SetMinSize() as a cue to size the spin button taller than the
# textbox, so we avoid the call there.
if "__WXMAC__" in wx.PlatformInfo:
self._spinbutton.SetMinSize(wx.DefaultSize)
# end Philip Semanchuk addition
self._mainsizer = wx.BoxSizer(wx.HORIZONTAL)
# Ensure the spin button is shown, and the text widget takes
# all remaining free space
self._mainsizer.Add(self._textctrl, 1)
self._mainsizer.Add(self._spinbutton, 0)
self.SetSizer(self._mainsizer)
self._mainsizer.Layout()
self.SetFormat()
self.SetDigits(digits)
# set the value here without generating an event
decimal = locale.localeconv()["decimal_point"]
strs = ("%100." + str(self._digits) + self._textformat[1])%self._value
strs = strs.replace(".", decimal)
strs = strs.strip()
strs = self.ReplaceDoubleZero(strs)
self._textctrl.SetValue(strs)
if not (agwStyle & FS_READONLY):
self.Bind(wx.EVT_SPIN_UP, self.OnSpinUp)
self.Bind(wx.EVT_SPIN_DOWN, self.OnSpinDown)
self._spinbutton.Bind(wx.EVT_LEFT_DOWN, self.OnSpinMouseDown)
self._textctrl.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter)
self._textctrl.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self._spinbutton.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_SET_FOCUS, self.OnFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_SIZE, self.OnSize)
# start Philip Semanchuk move
self.SetBestSize((width, height)) | [
"def",
"__init__",
"(",
"self",
",",
"parent",
",",
"id",
"=",
"wx",
".",
"ID_ANY",
",",
"pos",
"=",
"wx",
".",
"DefaultPosition",
",",
"size",
"=",
"(",
"95",
",",
"-",
"1",
")",
",",
"style",
"=",
"0",
",",
"value",
"=",
"0.0",
",",
"min_val",
"=",
"None",
",",
"max_val",
"=",
"None",
",",
"increment",
"=",
"1.0",
",",
"digits",
"=",
"-",
"1",
",",
"agwStyle",
"=",
"FS_LEFT",
",",
"name",
"=",
"\"FloatSpin\"",
")",
":",
"wx",
".",
"PyControl",
".",
"__init__",
"(",
"self",
",",
"parent",
",",
"id",
",",
"pos",
",",
"size",
",",
"style",
"|",
"wx",
".",
"NO_BORDER",
"|",
"wx",
".",
"NO_FULL_REPAINT_ON_RESIZE",
"|",
"wx",
".",
"CLIP_CHILDREN",
",",
"wx",
".",
"DefaultValidator",
",",
"name",
")",
"# Don't call SetRange here, because it will try to modify",
"# self._value whose value doesn't exist yet.",
"self",
".",
"SetRangeDontClampValue",
"(",
"min_val",
",",
"max_val",
")",
"self",
".",
"_value",
"=",
"self",
".",
"ClampValue",
"(",
"FixedPoint",
"(",
"str",
"(",
"value",
")",
",",
"20",
")",
")",
"self",
".",
"_defaultvalue",
"=",
"self",
".",
"_value",
"self",
".",
"_increment",
"=",
"FixedPoint",
"(",
"str",
"(",
"increment",
")",
",",
"20",
")",
"self",
".",
"_spinmodifier",
"=",
"FixedPoint",
"(",
"str",
"(",
"1.0",
")",
",",
"20",
")",
"self",
".",
"_digits",
"=",
"digits",
"self",
".",
"_snapticks",
"=",
"False",
"self",
".",
"_spinbutton",
"=",
"None",
"self",
".",
"_textctrl",
"=",
"None",
"self",
".",
"_spinctrl_bestsize",
"=",
"wx",
".",
"Size",
"(",
"-",
"999",
",",
"-",
"999",
")",
"# start Philip Semanchuk addition",
"# The textbox & spin button are drawn slightly differently ",
"# depending on the platform. The difference is most pronounced",
"# under OS X.",
"if",
"\"__WXMAC__\"",
"in",
"wx",
".",
"PlatformInfo",
":",
"self",
".",
"_gap",
"=",
"8",
"self",
".",
"_spin_top",
"=",
"3",
"self",
".",
"_text_left",
"=",
"4",
"self",
".",
"_text_top",
"=",
"4",
"elif",
"\"__WXMSW__\"",
"in",
"wx",
".",
"PlatformInfo",
":",
"self",
".",
"_gap",
"=",
"1",
"self",
".",
"_spin_top",
"=",
"0",
"self",
".",
"_text_left",
"=",
"0",
"self",
".",
"_text_top",
"=",
"0",
"else",
":",
"# GTK",
"self",
".",
"_gap",
"=",
"-",
"1",
"self",
".",
"_spin_top",
"=",
"0",
"self",
".",
"_text_left",
"=",
"0",
"self",
".",
"_text_top",
"=",
"0",
"# end Philip Semanchuk addition",
"self",
".",
"SetLabel",
"(",
"name",
")",
"self",
".",
"SetForegroundColour",
"(",
"parent",
".",
"GetForegroundColour",
"(",
")",
")",
"width",
"=",
"size",
"[",
"0",
"]",
"height",
"=",
"size",
"[",
"1",
"]",
"best_size",
"=",
"self",
".",
"DoGetBestSize",
"(",
")",
"if",
"width",
"==",
"-",
"1",
":",
"width",
"=",
"best_size",
".",
"GetWidth",
"(",
")",
"if",
"height",
"==",
"-",
"1",
":",
"height",
"=",
"best_size",
".",
"GetHeight",
"(",
")",
"self",
".",
"_validkeycode",
"=",
"[",
"43",
",",
"44",
",",
"45",
",",
"46",
",",
"69",
",",
"101",
",",
"127",
",",
"314",
"]",
"self",
".",
"_validkeycode",
".",
"extend",
"(",
"range",
"(",
"48",
",",
"58",
")",
")",
"self",
".",
"_validkeycode",
".",
"extend",
"(",
"[",
"wx",
".",
"WXK_RETURN",
",",
"wx",
".",
"WXK_TAB",
",",
"wx",
".",
"WXK_BACK",
",",
"wx",
".",
"WXK_LEFT",
",",
"wx",
".",
"WXK_RIGHT",
"]",
")",
"self",
".",
"_spinbutton",
"=",
"wx",
".",
"SpinButton",
"(",
"self",
",",
"wx",
".",
"ID_ANY",
",",
"wx",
".",
"DefaultPosition",
",",
"size",
"=",
"(",
"-",
"1",
",",
"height",
")",
",",
"style",
"=",
"wx",
".",
"SP_ARROW_KEYS",
"|",
"wx",
".",
"SP_VERTICAL",
"|",
"wx",
".",
"SP_WRAP",
")",
"txtstyle",
"=",
"wx",
".",
"TE_NOHIDESEL",
"|",
"wx",
".",
"TE_PROCESS_ENTER",
"if",
"agwStyle",
"&",
"FS_RIGHT",
":",
"txtstyle",
"=",
"txtstyle",
"|",
"wx",
".",
"TE_RIGHT",
"elif",
"agwStyle",
"&",
"FS_CENTRE",
":",
"txtstyle",
"=",
"txtstyle",
"|",
"wx",
".",
"TE_CENTER",
"if",
"agwStyle",
"&",
"FS_READONLY",
":",
"txtstyle",
"=",
"txtstyle",
"|",
"wx",
".",
"TE_READONLY",
"self",
".",
"_textctrl",
"=",
"FloatTextCtrl",
"(",
"self",
",",
"wx",
".",
"ID_ANY",
",",
"str",
"(",
"self",
".",
"_value",
")",
",",
"wx",
".",
"DefaultPosition",
",",
"(",
"width",
"-",
"self",
".",
"_spinbutton",
".",
"GetSize",
"(",
")",
".",
"GetWidth",
"(",
")",
",",
"height",
")",
",",
"txtstyle",
")",
"# start Philip Semanchuk addition",
"# Setting the textctrl's size in the ctor also sets its min size. ",
"# But the textctrl is entirely controlled by the parent floatspin ",
"# control and should accept whatever size its parent dictates, so",
"# here we tell it to forget its min size.",
"self",
".",
"_textctrl",
".",
"SetMinSize",
"(",
"wx",
".",
"DefaultSize",
")",
"# Setting the spin buttons's size in the ctor also sets its min size. ",
"# Under OS X that results in a rendering artifact because spin buttons",
"# are a little shorter than textboxes. ",
"# Setting the min size to the default allows OS X to draw the spin",
"# button correctly. However, Windows and KDE take the call to",
"# SetMinSize() as a cue to size the spin button taller than the",
"# textbox, so we avoid the call there.",
"if",
"\"__WXMAC__\"",
"in",
"wx",
".",
"PlatformInfo",
":",
"self",
".",
"_spinbutton",
".",
"SetMinSize",
"(",
"wx",
".",
"DefaultSize",
")",
"# end Philip Semanchuk addition",
"self",
".",
"_mainsizer",
"=",
"wx",
".",
"BoxSizer",
"(",
"wx",
".",
"HORIZONTAL",
")",
"# Ensure the spin button is shown, and the text widget takes",
"# all remaining free space",
"self",
".",
"_mainsizer",
".",
"Add",
"(",
"self",
".",
"_textctrl",
",",
"1",
")",
"self",
".",
"_mainsizer",
".",
"Add",
"(",
"self",
".",
"_spinbutton",
",",
"0",
")",
"self",
".",
"SetSizer",
"(",
"self",
".",
"_mainsizer",
")",
"self",
".",
"_mainsizer",
".",
"Layout",
"(",
")",
"self",
".",
"SetFormat",
"(",
")",
"self",
".",
"SetDigits",
"(",
"digits",
")",
"# set the value here without generating an event",
"decimal",
"=",
"locale",
".",
"localeconv",
"(",
")",
"[",
"\"decimal_point\"",
"]",
"strs",
"=",
"(",
"\"%100.\"",
"+",
"str",
"(",
"self",
".",
"_digits",
")",
"+",
"self",
".",
"_textformat",
"[",
"1",
"]",
")",
"%",
"self",
".",
"_value",
"strs",
"=",
"strs",
".",
"replace",
"(",
"\".\"",
",",
"decimal",
")",
"strs",
"=",
"strs",
".",
"strip",
"(",
")",
"strs",
"=",
"self",
".",
"ReplaceDoubleZero",
"(",
"strs",
")",
"self",
".",
"_textctrl",
".",
"SetValue",
"(",
"strs",
")",
"if",
"not",
"(",
"agwStyle",
"&",
"FS_READONLY",
")",
":",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_SPIN_UP",
",",
"self",
".",
"OnSpinUp",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_SPIN_DOWN",
",",
"self",
".",
"OnSpinDown",
")",
"self",
".",
"_spinbutton",
".",
"Bind",
"(",
"wx",
".",
"EVT_LEFT_DOWN",
",",
"self",
".",
"OnSpinMouseDown",
")",
"self",
".",
"_textctrl",
".",
"Bind",
"(",
"wx",
".",
"EVT_TEXT_ENTER",
",",
"self",
".",
"OnTextEnter",
")",
"self",
".",
"_textctrl",
".",
"Bind",
"(",
"wx",
".",
"EVT_MOUSEWHEEL",
",",
"self",
".",
"OnMouseWheel",
")",
"self",
".",
"_spinbutton",
".",
"Bind",
"(",
"wx",
".",
"EVT_MOUSEWHEEL",
",",
"self",
".",
"OnMouseWheel",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_SET_FOCUS",
",",
"self",
".",
"OnFocus",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_KILL_FOCUS",
",",
"self",
".",
"OnKillFocus",
")",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_SIZE",
",",
"self",
".",
"OnSize",
")",
"# start Philip Semanchuk move",
"self",
".",
"SetBestSize",
"(",
"(",
"width",
",",
"height",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/floatspin.py#L332-L495 |
||
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/ops/tensor_array_grad.py | python | _TensorArrayReadGrad | (op, grad) | return [None, None, w_g.flow] | Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`. | Gradient for TensorArrayRead. | [
"Gradient",
"for",
"TensorArrayRead",
"."
] | def _TensorArrayReadGrad(op, grad):
"""Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
w_g = g.write(index, grad)
return [None, None, w_g.flow] | [
"def",
"_TensorArrayReadGrad",
"(",
"op",
",",
"grad",
")",
":",
"# Note: the forward flow dependency in the call to grad() is necessary for",
"# the case of dynamic sized TensorArrays. When creating the gradient",
"# TensorArray, the final size of the forward array must be known.",
"# For this we need to wait until it has been created by depending on",
"# the input flow of the original op.",
"handle",
"=",
"op",
".",
"inputs",
"[",
"0",
"]",
"index",
"=",
"op",
".",
"inputs",
"[",
"1",
"]",
"flow",
"=",
"op",
".",
"inputs",
"[",
"2",
"]",
"dtype",
"=",
"op",
".",
"get_attr",
"(",
"\"dtype\"",
")",
"grad_source",
"=",
"_GetGradSource",
"(",
"grad",
")",
"g",
"=",
"tensor_array_ops",
".",
"TensorArray",
"(",
"dtype",
"=",
"dtype",
",",
"handle",
"=",
"handle",
")",
".",
"grad",
"(",
"source",
"=",
"grad_source",
",",
"flow",
"=",
"flow",
")",
"w_g",
"=",
"g",
".",
"write",
"(",
"index",
",",
"grad",
")",
"return",
"[",
"None",
",",
"None",
",",
"w_g",
".",
"flow",
"]"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/tensor_array_grad.py#L69-L93 |
|
stan-dev/math | 5fd79f89933269a4ca4d8dd1fde2a36d53d4768c | runChecks.py | python | grep_patterns | (type, folder, patterns_and_messages, exclude_filters=[]) | return errors | Checks the files in the provided folder for matches
with any of the patterns. It returns an array of
messages and the provided type with
the line number. This check ignores comments.
@param type: type or group of the check, listed with the error
@param folder: folder in which to check for the pattern
@param patterns_and_messages: a list of patterns and messages that
are printed if the pattern is matched
@param exclude_filter a list of files or folder that are excluded from
the check | Checks the files in the provided folder for matches
with any of the patterns. It returns an array of
messages and the provided type with
the line number. This check ignores comments. | [
"Checks",
"the",
"files",
"in",
"the",
"provided",
"folder",
"for",
"matches",
"with",
"any",
"of",
"the",
"patterns",
".",
"It",
"returns",
"an",
"array",
"of",
"messages",
"and",
"the",
"provided",
"type",
"with",
"the",
"line",
"number",
".",
"This",
"check",
"ignores",
"comments",
"."
] | def grep_patterns(type, folder, patterns_and_messages, exclude_filters=[]):
"""Checks the files in the provided folder for matches
with any of the patterns. It returns an array of
messages and the provided type with
the line number. This check ignores comments.
@param type: type or group of the check, listed with the error
@param folder: folder in which to check for the pattern
@param patterns_and_messages: a list of patterns and messages that
are printed if the pattern is matched
@param exclude_filter a list of files or folder that are excluded from
the check
"""
errors = []
folder.replace("/", os.sep)
exclude_files = []
for excl in exclude_filters:
exclude_files.extend(files_in_folder(excl))
files = files_in_folder(folder + os.sep + "**")
files = [x for x in files if x not in exclude_files]
for filepath in files:
if os.path.isfile(filepath):
line_num = 0
multi_line_comment = False
old_state_multi_line_comment = False
with open(filepath, "r") as f:
for line in f:
line_num += 1
# exclude multi line comments
if multi_line_comment:
if re.search("\*/", line):
multi_line_comment = False
else:
if re.search("/\*", line):
multi_line_comment = True
# parse the first line in a multi line comment for rare and weird case of
# "pattern /*""
if not multi_line_comment or (
multi_line_comment and not old_state_multi_line_comment
):
for p in patterns_and_messages:
# cover the edge cases where matched patterns
# are behind "//", "/*" or before "*/"
if (
not re.search(".*" + p["pattern"] + ".*\*/.*", line)
and not re.search(".*/\*.*" + p["pattern"], line)
and not re.search(".*//.*" + p["pattern"], line)
and re.search(p["pattern"], line)
):
errors.append(
filepath
+ " at line "
+ str(line_num)
+ ":\n\t"
+ "["
+ type
+ "] "
+ p["message"]
)
old_state_multi_line_comment = multi_line_comment
return errors | [
"def",
"grep_patterns",
"(",
"type",
",",
"folder",
",",
"patterns_and_messages",
",",
"exclude_filters",
"=",
"[",
"]",
")",
":",
"errors",
"=",
"[",
"]",
"folder",
".",
"replace",
"(",
"\"/\"",
",",
"os",
".",
"sep",
")",
"exclude_files",
"=",
"[",
"]",
"for",
"excl",
"in",
"exclude_filters",
":",
"exclude_files",
".",
"extend",
"(",
"files_in_folder",
"(",
"excl",
")",
")",
"files",
"=",
"files_in_folder",
"(",
"folder",
"+",
"os",
".",
"sep",
"+",
"\"**\"",
")",
"files",
"=",
"[",
"x",
"for",
"x",
"in",
"files",
"if",
"x",
"not",
"in",
"exclude_files",
"]",
"for",
"filepath",
"in",
"files",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"line_num",
"=",
"0",
"multi_line_comment",
"=",
"False",
"old_state_multi_line_comment",
"=",
"False",
"with",
"open",
"(",
"filepath",
",",
"\"r\"",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"line_num",
"+=",
"1",
"# exclude multi line comments",
"if",
"multi_line_comment",
":",
"if",
"re",
".",
"search",
"(",
"\"\\*/\"",
",",
"line",
")",
":",
"multi_line_comment",
"=",
"False",
"else",
":",
"if",
"re",
".",
"search",
"(",
"\"/\\*\"",
",",
"line",
")",
":",
"multi_line_comment",
"=",
"True",
"# parse the first line in a multi line comment for rare and weird case of",
"# \"pattern /*\"\"",
"if",
"not",
"multi_line_comment",
"or",
"(",
"multi_line_comment",
"and",
"not",
"old_state_multi_line_comment",
")",
":",
"for",
"p",
"in",
"patterns_and_messages",
":",
"# cover the edge cases where matched patterns",
"# are behind \"//\", \"/*\" or before \"*/\"",
"if",
"(",
"not",
"re",
".",
"search",
"(",
"\".*\"",
"+",
"p",
"[",
"\"pattern\"",
"]",
"+",
"\".*\\*/.*\"",
",",
"line",
")",
"and",
"not",
"re",
".",
"search",
"(",
"\".*/\\*.*\"",
"+",
"p",
"[",
"\"pattern\"",
"]",
",",
"line",
")",
"and",
"not",
"re",
".",
"search",
"(",
"\".*//.*\"",
"+",
"p",
"[",
"\"pattern\"",
"]",
",",
"line",
")",
"and",
"re",
".",
"search",
"(",
"p",
"[",
"\"pattern\"",
"]",
",",
"line",
")",
")",
":",
"errors",
".",
"append",
"(",
"filepath",
"+",
"\" at line \"",
"+",
"str",
"(",
"line_num",
")",
"+",
"\":\\n\\t\"",
"+",
"\"[\"",
"+",
"type",
"+",
"\"] \"",
"+",
"p",
"[",
"\"message\"",
"]",
")",
"old_state_multi_line_comment",
"=",
"multi_line_comment",
"return",
"errors"
] | https://github.com/stan-dev/math/blob/5fd79f89933269a4ca4d8dd1fde2a36d53d4768c/runChecks.py#L66-L125 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/recfunctions.py | python | get_names_flat | (adtype) | return tuple(listnames) | Returns the field names of the input datatype as a tuple. Nested structure
are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb') | Returns the field names of the input datatype as a tuple. Nested structure
are flattened beforehand. | [
"Returns",
"the",
"field",
"names",
"of",
"the",
"input",
"datatype",
"as",
"a",
"tuple",
".",
"Nested",
"structure",
"are",
"flattened",
"beforehand",
"."
] | def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
Traceback (most recent call last):
...
AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(get_names_flat(current))
return tuple(listnames) | [
"def",
"get_names_flat",
"(",
"adtype",
")",
":",
"listnames",
"=",
"[",
"]",
"names",
"=",
"adtype",
".",
"names",
"for",
"name",
"in",
"names",
":",
"listnames",
".",
"append",
"(",
"name",
")",
"current",
"=",
"adtype",
"[",
"name",
"]",
"if",
"current",
".",
"names",
"is",
"not",
"None",
":",
"listnames",
".",
"extend",
"(",
"get_names_flat",
"(",
"current",
")",
")",
"return",
"tuple",
"(",
"listnames",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/recfunctions.py#L149-L181 |
|
RobotLocomotion/drake | 0e18a34604c45ed65bc9018a54f7610f91cdad5b | tools/lint/find_data.py | python | find_data | (relpath) | Given a relpath like drake/pkg/res.txt or external/repo/pkg/res.txt,
find the data file and return its path | Given a relpath like drake/pkg/res.txt or external/repo/pkg/res.txt,
find the data file and return its path | [
"Given",
"a",
"relpath",
"like",
"drake",
"/",
"pkg",
"/",
"res",
".",
"txt",
"or",
"external",
"/",
"repo",
"/",
"pkg",
"/",
"res",
".",
"txt",
"find",
"the",
"data",
"file",
"and",
"return",
"its",
"path"
] | def find_data(relpath):
"""Given a relpath like drake/pkg/res.txt or external/repo/pkg/res.txt,
find the data file and return its path"""
# Because we are in a py_binary, Bazel's wrapper script sets up our
# $PYTHONPATH to have our resources somewhere on a sys.path entry.
for one_path in sys.path:
possible = os.path.join(one_path, relpath)
if os.path.exists(possible):
return possible
raise IOError(
errno.ENOENT,
"Could not find data {}".format(relpath),
relpath) | [
"def",
"find_data",
"(",
"relpath",
")",
":",
"# Because we are in a py_binary, Bazel's wrapper script sets up our",
"# $PYTHONPATH to have our resources somewhere on a sys.path entry.",
"for",
"one_path",
"in",
"sys",
".",
"path",
":",
"possible",
"=",
"os",
".",
"path",
".",
"join",
"(",
"one_path",
",",
"relpath",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"possible",
")",
":",
"return",
"possible",
"raise",
"IOError",
"(",
"errno",
".",
"ENOENT",
",",
"\"Could not find data {}\"",
".",
"format",
"(",
"relpath",
")",
",",
"relpath",
")"
] | https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/tools/lint/find_data.py#L12-L24 |
||
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | caffe2/python/onnx/backend.py | python | Caffe2Backend.prepare | (cls, model, device='CPU', raw_values_dict=None, **kwargs) | return retval | For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,
for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
there is no way we can know which blob is the input of the predict_graph. | For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph, | [
"For",
"Onnx",
"Caffe2Backend",
"we",
"require",
"that",
"init_graph",
"don",
"t",
"initialize",
"the",
"actual",
"input",
"of",
"the",
"predict_graph"
] | def prepare(cls, model, device='CPU', raw_values_dict=None, **kwargs):
'''
For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,
for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
there is no way we can know which blob is the input of the predict_graph.
'''
if not kwargs.pop('no_check_UNSAFE', False):
super(Caffe2Backend, cls).prepare(model, device, **kwargs)
opset_version = None
for imp in model.opset_import:
if not imp.HasField("domain") or imp.domain == "":
opset_version = imp.version
if imp.version > cls._known_opset_version:
warnings.warn("This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.".format(cls._known_opset_version, imp.version))
else:
warnings.warn("Unrecognized operator set {}".format(imp.domain))
if opset_version is None:
if model.ir_version >= 0x00000003:
raise RuntimeError("Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)")
else:
opset_version = 1
# Prior to onnx version update to onnx-1.8.0, errors caused by failures in
# in the onnx shape inference call were being supressed. Hence a try-catch block
# is added around the infer_shapes call to avoid these failures and preserve status
try:
model = onnx.shape_inference.infer_shapes(model)
except RuntimeError:
warnings.warn("ShapeInferenceWarning: Inferred shape and existing shape differ in rank")
ws = Workspace()
device_option = get_device_option(Device(device))
init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)
if raw_values_dict:
cls._external_value_resolution_pass(model, raw_values_dict)
# Directly load initializer data into blobs in workspace
cls._direct_initialize_parameters(
model.graph.initializer,
ws,
device_option,
)
initialized = {init.name for init in model.graph.initializer}
cls._direct_initialize_inputs(
model.graph.input,
initialized,
ws,
device_option,
)
uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]
retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
return retval | [
"def",
"prepare",
"(",
"cls",
",",
"model",
",",
"device",
"=",
"'CPU'",
",",
"raw_values_dict",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"kwargs",
".",
"pop",
"(",
"'no_check_UNSAFE'",
",",
"False",
")",
":",
"super",
"(",
"Caffe2Backend",
",",
"cls",
")",
".",
"prepare",
"(",
"model",
",",
"device",
",",
"*",
"*",
"kwargs",
")",
"opset_version",
"=",
"None",
"for",
"imp",
"in",
"model",
".",
"opset_import",
":",
"if",
"not",
"imp",
".",
"HasField",
"(",
"\"domain\"",
")",
"or",
"imp",
".",
"domain",
"==",
"\"\"",
":",
"opset_version",
"=",
"imp",
".",
"version",
"if",
"imp",
".",
"version",
">",
"cls",
".",
"_known_opset_version",
":",
"warnings",
".",
"warn",
"(",
"\"This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.\"",
".",
"format",
"(",
"cls",
".",
"_known_opset_version",
",",
"imp",
".",
"version",
")",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"Unrecognized operator set {}\"",
".",
"format",
"(",
"imp",
".",
"domain",
")",
")",
"if",
"opset_version",
"is",
"None",
":",
"if",
"model",
".",
"ir_version",
">=",
"0x00000003",
":",
"raise",
"RuntimeError",
"(",
"\"Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)\"",
")",
"else",
":",
"opset_version",
"=",
"1",
"# Prior to onnx version update to onnx-1.8.0, errors caused by failures in",
"# in the onnx shape inference call were being supressed. Hence a try-catch block",
"# is added around the infer_shapes call to avoid these failures and preserve status",
"try",
":",
"model",
"=",
"onnx",
".",
"shape_inference",
".",
"infer_shapes",
"(",
"model",
")",
"except",
"RuntimeError",
":",
"warnings",
".",
"warn",
"(",
"\"ShapeInferenceWarning: Inferred shape and existing shape differ in rank\"",
")",
"ws",
"=",
"Workspace",
"(",
")",
"device_option",
"=",
"get_device_option",
"(",
"Device",
"(",
"device",
")",
")",
"init_net",
",",
"predict_net",
"=",
"cls",
".",
"_onnx_model_to_caffe2_net",
"(",
"model",
",",
"device",
",",
"opset_version",
",",
"False",
")",
"if",
"raw_values_dict",
":",
"cls",
".",
"_external_value_resolution_pass",
"(",
"model",
",",
"raw_values_dict",
")",
"# Directly load initializer data into blobs in workspace",
"cls",
".",
"_direct_initialize_parameters",
"(",
"model",
".",
"graph",
".",
"initializer",
",",
"ws",
",",
"device_option",
",",
")",
"initialized",
"=",
"{",
"init",
".",
"name",
"for",
"init",
"in",
"model",
".",
"graph",
".",
"initializer",
"}",
"cls",
".",
"_direct_initialize_inputs",
"(",
"model",
".",
"graph",
".",
"input",
",",
"initialized",
",",
"ws",
",",
"device_option",
",",
")",
"uninitialized",
"=",
"[",
"value_info",
".",
"name",
"for",
"value_info",
"in",
"model",
".",
"graph",
".",
"input",
"if",
"value_info",
".",
"name",
"not",
"in",
"initialized",
"]",
"retval",
"=",
"Caffe2Rep",
"(",
"init_net",
",",
"predict_net",
",",
"ws",
",",
"uninitialized",
")",
"return",
"retval"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/onnx/backend.py#L672-L731 |
|
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/entity_object/conversion/swgbcc/genie_unit.py | python | SWGBMonkGroup.is_unique | (self) | return False | Groups are unique if they belong to a specific civ.
:returns: True if the civ id is not Gaia's and no alternative lines
for this unit line exist. | Groups are unique if they belong to a specific civ. | [
"Groups",
"are",
"unique",
"if",
"they",
"belong",
"to",
"a",
"specific",
"civ",
"."
] | def is_unique(self):
"""
Groups are unique if they belong to a specific civ.
:returns: True if the civ id is not Gaia's and no alternative lines
for this unit line exist.
"""
return False | [
"def",
"is_unique",
"(",
"self",
")",
":",
"return",
"False"
] | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/entity_object/conversion/swgbcc/genie_unit.py#L225-L232 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py | python | ResourceManager.resource_listdir | (self, package_or_requirement, resource_name) | return get_provider(package_or_requirement).resource_listdir(
resource_name
) | List the contents of the named resource directory | List the contents of the named resource directory | [
"List",
"the",
"contents",
"of",
"the",
"named",
"resource",
"directory"
] | def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
) | [
"def",
"resource_listdir",
"(",
"self",
",",
"package_or_requirement",
",",
"resource_name",
")",
":",
"return",
"get_provider",
"(",
"package_or_requirement",
")",
".",
"resource_listdir",
"(",
"resource_name",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L1160-L1164 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/peacock/ExodusViewer/plugins/BackgroundPlugin.py | python | BackgroundPlugin._callbackBlackPreset | (self, value) | Called when the black preset is toggled. | Called when the black preset is toggled. | [
"Called",
"when",
"the",
"black",
"preset",
"is",
"toggled",
"."
] | def _callbackBlackPreset(self, value):
"""
Called when the black preset is toggled.
"""
self.BlackPreset.setChecked(value)
if value:
self.GradientToggle.blockSignals(True)
self.GradientToggle.setChecked(False)
self.GradientToggle.blockSignals(False)
self.WhitePreset.blockSignals(True)
self.WhitePreset.setChecked(False)
self.WhitePreset.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(False)
self.ColorbarBlackFontToggle.blockSignals(False)
else:
self.GradientToggle.blockSignals(True)
self.GradientToggle.setChecked(self._gradient_state)
self.GradientToggle.blockSignals(False)
self.ColorbarBlackFontToggle.blockSignals(True)
self.ColorbarBlackFontToggle.setChecked(self._black_font_state)
self.ColorbarBlackFontToggle.blockSignals(False)
self.updateOptions()
self.windowRequiresUpdate.emit() | [
"def",
"_callbackBlackPreset",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"BlackPreset",
".",
"setChecked",
"(",
"value",
")",
"if",
"value",
":",
"self",
".",
"GradientToggle",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"GradientToggle",
".",
"setChecked",
"(",
"False",
")",
"self",
".",
"GradientToggle",
".",
"blockSignals",
"(",
"False",
")",
"self",
".",
"WhitePreset",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"WhitePreset",
".",
"setChecked",
"(",
"False",
")",
"self",
".",
"WhitePreset",
".",
"blockSignals",
"(",
"False",
")",
"self",
".",
"ColorbarBlackFontToggle",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"ColorbarBlackFontToggle",
".",
"setChecked",
"(",
"False",
")",
"self",
".",
"ColorbarBlackFontToggle",
".",
"blockSignals",
"(",
"False",
")",
"else",
":",
"self",
".",
"GradientToggle",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"GradientToggle",
".",
"setChecked",
"(",
"self",
".",
"_gradient_state",
")",
"self",
".",
"GradientToggle",
".",
"blockSignals",
"(",
"False",
")",
"self",
".",
"ColorbarBlackFontToggle",
".",
"blockSignals",
"(",
"True",
")",
"self",
".",
"ColorbarBlackFontToggle",
".",
"setChecked",
"(",
"self",
".",
"_black_font_state",
")",
"self",
".",
"ColorbarBlackFontToggle",
".",
"blockSignals",
"(",
"False",
")",
"self",
".",
"updateOptions",
"(",
")",
"self",
".",
"windowRequiresUpdate",
".",
"emit",
"(",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/peacock/ExodusViewer/plugins/BackgroundPlugin.py#L299-L327 |
||
qt/qtbase | 81b9ee66b8e40ed145185fe46b7c91929688cafd | util/cmake/qmake_parser.py | python | flatten_list | (input_list) | Flattens an irregular nested list into a simple list. | Flattens an irregular nested list into a simple list. | [
"Flattens",
"an",
"irregular",
"nested",
"list",
"into",
"a",
"simple",
"list",
"."
] | def flatten_list(input_list):
""" Flattens an irregular nested list into a simple list."""
for el in input_list:
if isinstance(el, collections.abc.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten_list(el)
else:
yield el | [
"def",
"flatten_list",
"(",
"input_list",
")",
":",
"for",
"el",
"in",
"input_list",
":",
"if",
"isinstance",
"(",
"el",
",",
"collections",
".",
"abc",
".",
"Iterable",
")",
"and",
"not",
"isinstance",
"(",
"el",
",",
"(",
"str",
",",
"bytes",
")",
")",
":",
"yield",
"from",
"flatten_list",
"(",
"el",
")",
"else",
":",
"yield",
"el"
] | https://github.com/qt/qtbase/blob/81b9ee66b8e40ed145185fe46b7c91929688cafd/util/cmake/qmake_parser.py#L75-L81 |
||
larroy/clearskies_core | 3574ddf0edc8555454c7044126e786a6c29444dc | tools/gyp/pylib/gyp/generator/android.py | python | AndroidMkWriter.ComputeDeps | (self, spec) | return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) | Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps). | Compute the dependencies of a gyp spec. | [
"Compute",
"the",
"dependencies",
"of",
"a",
"gyp",
"spec",
"."
] | def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) | [
"def",
"ComputeDeps",
"(",
"self",
",",
"spec",
")",
":",
"deps",
"=",
"[",
"]",
"link_deps",
"=",
"[",
"]",
"if",
"'dependencies'",
"in",
"spec",
":",
"deps",
".",
"extend",
"(",
"[",
"target_outputs",
"[",
"dep",
"]",
"for",
"dep",
"in",
"spec",
"[",
"'dependencies'",
"]",
"if",
"target_outputs",
"[",
"dep",
"]",
"]",
")",
"for",
"dep",
"in",
"spec",
"[",
"'dependencies'",
"]",
":",
"if",
"dep",
"in",
"target_link_deps",
":",
"link_deps",
".",
"append",
"(",
"target_link_deps",
"[",
"dep",
"]",
")",
"deps",
".",
"extend",
"(",
"link_deps",
")",
"return",
"(",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"deps",
")",
",",
"gyp",
".",
"common",
".",
"uniquer",
"(",
"link_deps",
")",
")"
] | https://github.com/larroy/clearskies_core/blob/3574ddf0edc8555454c7044126e786a6c29444dc/tools/gyp/pylib/gyp/generator/android.py#L773-L789 |
|
apache/impala | 8ddac48f3428c86f2cbd037ced89cfb903298b12 | shell/impala_shell.py | python | ImpalaShell.print_runtime_profile | (self, profile, failed_profile,
profile_display_mode=QueryAttemptDisplayModes.LATEST, status=False) | Prints the given runtime profiles to the console. Optionally prints the failed
profile if the query was retried. The format the profiles are printed is controlled
by the option profile_display_mode, see QueryProfileDisplayModes docs above. | Prints the given runtime profiles to the console. Optionally prints the failed
profile if the query was retried. The format the profiles are printed is controlled
by the option profile_display_mode, see QueryProfileDisplayModes docs above. | [
"Prints",
"the",
"given",
"runtime",
"profiles",
"to",
"the",
"console",
".",
"Optionally",
"prints",
"the",
"failed",
"profile",
"if",
"the",
"query",
"was",
"retried",
".",
"The",
"format",
"the",
"profiles",
"are",
"printed",
"is",
"controlled",
"by",
"the",
"option",
"profile_display_mode",
"see",
"QueryProfileDisplayModes",
"docs",
"above",
"."
] | def print_runtime_profile(self, profile, failed_profile,
profile_display_mode=QueryAttemptDisplayModes.LATEST, status=False):
"""Prints the given runtime profiles to the console. Optionally prints the failed
profile if the query was retried. The format the profiles are printed is controlled
by the option profile_display_mode, see QueryProfileDisplayModes docs above.
"""
if self.show_profiles or status:
if profile:
query_profile_prefix = "Query Runtime Profile:\n"
if profile_display_mode == QueryAttemptDisplayModes.ALL:
print(query_profile_prefix + profile)
if failed_profile:
print("Failed Query Runtime Profile(s):\n" + failed_profile)
elif profile_display_mode == QueryAttemptDisplayModes.LATEST:
print(query_profile_prefix + profile)
elif profile_display_mode == QueryAttemptDisplayModes.ORIGINAL:
print(query_profile_prefix + failed_profile if failed_profile else profile)
else:
raise FatalShellException("Invalid value for query profile display mode") | [
"def",
"print_runtime_profile",
"(",
"self",
",",
"profile",
",",
"failed_profile",
",",
"profile_display_mode",
"=",
"QueryAttemptDisplayModes",
".",
"LATEST",
",",
"status",
"=",
"False",
")",
":",
"if",
"self",
".",
"show_profiles",
"or",
"status",
":",
"if",
"profile",
":",
"query_profile_prefix",
"=",
"\"Query Runtime Profile:\\n\"",
"if",
"profile_display_mode",
"==",
"QueryAttemptDisplayModes",
".",
"ALL",
":",
"print",
"(",
"query_profile_prefix",
"+",
"profile",
")",
"if",
"failed_profile",
":",
"print",
"(",
"\"Failed Query Runtime Profile(s):\\n\"",
"+",
"failed_profile",
")",
"elif",
"profile_display_mode",
"==",
"QueryAttemptDisplayModes",
".",
"LATEST",
":",
"print",
"(",
"query_profile_prefix",
"+",
"profile",
")",
"elif",
"profile_display_mode",
"==",
"QueryAttemptDisplayModes",
".",
"ORIGINAL",
":",
"print",
"(",
"query_profile_prefix",
"+",
"failed_profile",
"if",
"failed_profile",
"else",
"profile",
")",
"else",
":",
"raise",
"FatalShellException",
"(",
"\"Invalid value for query profile display mode\"",
")"
] | https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/shell/impala_shell.py#L1086-L1104 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | chrome/common/extensions/docs/examples/apps/hello-python/oauth2/__init__.py | python | SignatureMethod.signing_base | (self, request, consumer, token) | Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software. | Calculates the string that needs to be signed. | [
"Calculates",
"the",
"string",
"that",
"needs",
"to",
"be",
"signed",
"."
] | def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError | [
"def",
"signing_base",
"(",
"self",
",",
"request",
",",
"consumer",
",",
"token",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/chrome/common/extensions/docs/examples/apps/hello-python/oauth2/__init__.py#L682-L690 |
||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/setuptools/command/sdist.py | python | sdist.read_manifest | (self) | Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution. | Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution. | [
"Read",
"the",
"manifest",
"file",
"(",
"named",
"by",
"self",
".",
"manifest",
")",
"and",
"use",
"it",
"to",
"fill",
"in",
"self",
".",
"filelist",
"the",
"list",
"of",
"files",
"to",
"include",
"in",
"the",
"source",
"distribution",
"."
] | def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rb')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if six.PY3:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close() | [
"def",
"read_manifest",
"(",
"self",
")",
":",
"log",
".",
"info",
"(",
"\"reading manifest file '%s'\"",
",",
"self",
".",
"manifest",
")",
"manifest",
"=",
"open",
"(",
"self",
".",
"manifest",
",",
"'rb'",
")",
"for",
"line",
"in",
"manifest",
":",
"# The manifest must contain UTF-8. See #303.",
"if",
"six",
".",
"PY3",
":",
"try",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"'UTF-8'",
")",
"except",
"UnicodeDecodeError",
":",
"log",
".",
"warn",
"(",
"\"%r not UTF-8 decodable -- skipping\"",
"%",
"line",
")",
"continue",
"# ignore comments and blank lines",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
"or",
"not",
"line",
":",
"continue",
"self",
".",
"filelist",
".",
"append",
"(",
"line",
")",
"manifest",
".",
"close",
"(",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/setuptools/command/sdist.py#L180-L200 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/fsspec/core.py | python | OpenFile.open | (self) | return self.__enter__() | Materialise this as a real open file without context
The file should be explicitly closed to avoid enclosed open file
instances persisting | Materialise this as a real open file without context | [
"Materialise",
"this",
"as",
"a",
"real",
"open",
"file",
"without",
"context"
] | def open(self):
"""Materialise this as a real open file without context
The file should be explicitly closed to avoid enclosed open file
instances persisting
"""
return self.__enter__() | [
"def",
"open",
"(",
"self",
")",
":",
"return",
"self",
".",
"__enter__",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/fsspec/core.py#L123-L129 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | AnyButton.GetBitmapSelected | (*args, **kwargs) | return _controls_.AnyButton_GetBitmapSelected(*args, **kwargs) | GetBitmapSelected(self) -> Bitmap | GetBitmapSelected(self) -> Bitmap | [
"GetBitmapSelected",
"(",
"self",
")",
"-",
">",
"Bitmap"
] | def GetBitmapSelected(*args, **kwargs):
"""GetBitmapSelected(self) -> Bitmap"""
return _controls_.AnyButton_GetBitmapSelected(*args, **kwargs) | [
"def",
"GetBitmapSelected",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"AnyButton_GetBitmapSelected",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L125-L127 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/vision_opencv/image_geometry/src/image_geometry/cameramodels.py | python | StereoCameraModel.fromCameraInfo | (self, left_msg, right_msg) | :param left_msg: left camera parameters
:type left_msg: sensor_msgs.msg.CameraInfo
:param right_msg: right camera parameters
:type right_msg: sensor_msgs.msg.CameraInfo
Set the camera parameters from the :class:`sensor_msgs.msg.CameraInfo` messages. | :param left_msg: left camera parameters
:type left_msg: sensor_msgs.msg.CameraInfo
:param right_msg: right camera parameters
:type right_msg: sensor_msgs.msg.CameraInfo | [
":",
"param",
"left_msg",
":",
"left",
"camera",
"parameters",
":",
"type",
"left_msg",
":",
"sensor_msgs",
".",
"msg",
".",
"CameraInfo",
":",
"param",
"right_msg",
":",
"right",
"camera",
"parameters",
":",
"type",
"right_msg",
":",
"sensor_msgs",
".",
"msg",
".",
"CameraInfo"
] | def fromCameraInfo(self, left_msg, right_msg):
"""
:param left_msg: left camera parameters
:type left_msg: sensor_msgs.msg.CameraInfo
:param right_msg: right camera parameters
:type right_msg: sensor_msgs.msg.CameraInfo
Set the camera parameters from the :class:`sensor_msgs.msg.CameraInfo` messages.
"""
self.left.fromCameraInfo(left_msg)
self.right.fromCameraInfo(right_msg)
# [ Fx, 0, Cx, Fx*-Tx ]
# [ 0, Fy, Cy, 0 ]
# [ 0, 0, 1, 0 ]
fx = self.right.P[0, 0]
fy = self.right.P[1, 1]
cx = self.right.P[0, 2]
cy = self.right.P[1, 2]
tx = -self.right.P[0, 3] / fx
# Q is:
# [ 1, 0, 0, -Clx ]
# [ 0, 1, 0, -Cy ]
# [ 0, 0, 0, Fx ]
# [ 0, 0, 1 / Tx, (Crx-Clx)/Tx ]
self.Q = numpy.zeros((4, 4), dtype='float64')
self.Q[0, 0] = 1.0
self.Q[0, 3] = -cx
self.Q[1, 1] = 1.0
self.Q[1, 3] = -cy
self.Q[2, 3] = fx
self.Q[3, 2] = 1 / tx | [
"def",
"fromCameraInfo",
"(",
"self",
",",
"left_msg",
",",
"right_msg",
")",
":",
"self",
".",
"left",
".",
"fromCameraInfo",
"(",
"left_msg",
")",
"self",
".",
"right",
".",
"fromCameraInfo",
"(",
"right_msg",
")",
"# [ Fx, 0, Cx, Fx*-Tx ]",
"# [ 0, Fy, Cy, 0 ]",
"# [ 0, 0, 1, 0 ]",
"fx",
"=",
"self",
".",
"right",
".",
"P",
"[",
"0",
",",
"0",
"]",
"fy",
"=",
"self",
".",
"right",
".",
"P",
"[",
"1",
",",
"1",
"]",
"cx",
"=",
"self",
".",
"right",
".",
"P",
"[",
"0",
",",
"2",
"]",
"cy",
"=",
"self",
".",
"right",
".",
"P",
"[",
"1",
",",
"2",
"]",
"tx",
"=",
"-",
"self",
".",
"right",
".",
"P",
"[",
"0",
",",
"3",
"]",
"/",
"fx",
"# Q is:",
"# [ 1, 0, 0, -Clx ]",
"# [ 0, 1, 0, -Cy ]",
"# [ 0, 0, 0, Fx ]",
"# [ 0, 0, 1 / Tx, (Crx-Clx)/Tx ]",
"self",
".",
"Q",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"4",
",",
"4",
")",
",",
"dtype",
"=",
"'float64'",
")",
"self",
".",
"Q",
"[",
"0",
",",
"0",
"]",
"=",
"1.0",
"self",
".",
"Q",
"[",
"0",
",",
"3",
"]",
"=",
"-",
"cx",
"self",
".",
"Q",
"[",
"1",
",",
"1",
"]",
"=",
"1.0",
"self",
".",
"Q",
"[",
"1",
",",
"3",
"]",
"=",
"-",
"cy",
"self",
".",
"Q",
"[",
"2",
",",
"3",
"]",
"=",
"fx",
"self",
".",
"Q",
"[",
"3",
",",
"2",
"]",
"=",
"1",
"/",
"tx"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/vision_opencv/image_geometry/src/image_geometry/cameramodels.py#L265-L299 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/utilities/table_utils.py | python | ValidatedTableItem._modify_setData | (self) | Modify the setData method. | Modify the setData method. | [
"Modify",
"the",
"setData",
"method",
"."
] | def _modify_setData(self):
"""
Modify the setData method.
"""
setattr(self, "setData", self.validator_before_set(self.setData, self.validator)) | [
"def",
"_modify_setData",
"(",
"self",
")",
":",
"setattr",
"(",
"self",
",",
"\"setData\"",
",",
"self",
".",
"validator_before_set",
"(",
"self",
".",
"setData",
",",
"self",
".",
"validator",
")",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/utilities/table_utils.py#L75-L79 |
||
MythTV/mythtv | d282a209cb8be85d036f85a62a8ec971b67d45f4 | mythtv/programs/scripts/internetcontent/nv_python_libs/vimeo/oauth/oauth_api.py | python | OAuthServer.build_authenticate_header | (self, realm='') | return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} | Optional support for the authenticate header. | Optional support for the authenticate header. | [
"Optional",
"support",
"for",
"the",
"authenticate",
"header",
"."
] | def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} | [
"def",
"build_authenticate_header",
"(",
"self",
",",
"realm",
"=",
"''",
")",
":",
"return",
"{",
"'WWW-Authenticate'",
":",
"'OAuth realm=\"%s\"'",
"%",
"realm",
"}"
] | https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/programs/scripts/internetcontent/nv_python_libs/vimeo/oauth/oauth_api.py#L444-L446 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py | python | ClosurizedNamespacesInfo._AddUsedNamespace | (self, state_tracker, identifier, token,
is_alias_definition=False) | Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
token: The token in which the namespace is used.
is_alias_definition: If the used namespace is part of an alias_definition.
Aliased symbols need their parent namespace to be available, if it is
not yet required through another symbol, an error will be thrown. | Adds the namespace of an identifier to the list of used namespaces. | [
"Adds",
"the",
"namespace",
"of",
"an",
"identifier",
"to",
"the",
"list",
"of",
"used",
"namespaces",
"."
] | def _AddUsedNamespace(self, state_tracker, identifier, token,
is_alias_definition=False):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
token: The token in which the namespace is used.
is_alias_definition: If the used namespace is part of an alias_definition.
Aliased symbols need their parent namespace to be available, if it is
not yet required through another symbol, an error will be thrown.
"""
if self._HasSuppression(state_tracker, 'missingRequire'):
return
identifier = self._GetUsedIdentifier(identifier)
namespace = self.GetClosurizedNamespace(identifier)
# b/5362203 If its a variable in scope then its not a required namespace.
if namespace and not state_tracker.IsVariableInScope(namespace):
namespace = UsedNamespace(namespace, identifier, token,
is_alias_definition)
self._used_namespaces.append(namespace) | [
"def",
"_AddUsedNamespace",
"(",
"self",
",",
"state_tracker",
",",
"identifier",
",",
"token",
",",
"is_alias_definition",
"=",
"False",
")",
":",
"if",
"self",
".",
"_HasSuppression",
"(",
"state_tracker",
",",
"'missingRequire'",
")",
":",
"return",
"identifier",
"=",
"self",
".",
"_GetUsedIdentifier",
"(",
"identifier",
")",
"namespace",
"=",
"self",
".",
"GetClosurizedNamespace",
"(",
"identifier",
")",
"# b/5362203 If its a variable in scope then its not a required namespace.",
"if",
"namespace",
"and",
"not",
"state_tracker",
".",
"IsVariableInScope",
"(",
"namespace",
")",
":",
"namespace",
"=",
"UsedNamespace",
"(",
"namespace",
",",
"identifier",
",",
"token",
",",
"is_alias_definition",
")",
"self",
".",
"_used_namespaces",
".",
"append",
"(",
"namespace",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py#L502-L526 |
||
alibaba/weex_js_engine | 2bdf4b6f020c1fc99c63f649718f6faf7e27fdde | jni/v8core/v8/build/gyp/pylib/gyp/MSVSSettings.py | python | ValidateMSVSSettings | (settings, stderr=sys.stderr) | Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages. | Validates that the names of the settings are valid for MSVS. | [
"Validates",
"that",
"the",
"names",
"of",
"the",
"settings",
"are",
"valid",
"for",
"MSVS",
"."
] | def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr) | [
"def",
"ValidateMSVSSettings",
"(",
"settings",
",",
"stderr",
"=",
"sys",
".",
"stderr",
")",
":",
"_ValidateSettings",
"(",
"_msvs_validators",
",",
"settings",
",",
"stderr",
")"
] | https://github.com/alibaba/weex_js_engine/blob/2bdf4b6f020c1fc99c63f649718f6faf7e27fdde/jni/v8core/v8/build/gyp/pylib/gyp/MSVSSettings.py#L442-L450 |
||
lemenkov/libyuv | 5b3351bd07e83f9f9a4cb6629561331ecdb7c546 | tools_libyuv/autoroller/roll_deps.py | python | _GetBranches | () | return active, branches | Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches. | Returns a tuple of active,branches. | [
"Returns",
"a",
"tuple",
"of",
"active",
"branches",
"."
] | def _GetBranches():
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = _RunCommand(['git', 'branch'])[0].split('\n')
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches | [
"def",
"_GetBranches",
"(",
")",
":",
"lines",
"=",
"_RunCommand",
"(",
"[",
"'git'",
",",
"'branch'",
"]",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'\\n'",
")",
"branches",
"=",
"[",
"]",
"active",
"=",
"''",
"for",
"line",
"in",
"lines",
":",
"if",
"'*'",
"in",
"line",
":",
"# The assumption is that the first char will always be the '*'.",
"active",
"=",
"line",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
"branches",
".",
"append",
"(",
"active",
")",
"else",
":",
"branch",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"branch",
":",
"branches",
".",
"append",
"(",
"branch",
")",
"return",
"active",
",",
"branches"
] | https://github.com/lemenkov/libyuv/blob/5b3351bd07e83f9f9a4cb6629561331ecdb7c546/tools_libyuv/autoroller/roll_deps.py#L132-L150 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/hypertreelist.py | python | TreeListHeaderWindow.XToCol | (self, x) | return wx.NOT_FOUND | Returns the column that corresponds to the logical input `x` coordinate.
:param `x`: the `x` position to evaluate.
:return: The column that corresponds to the logical input `x` coordinate,
or ``wx.NOT_FOUND`` if there is no column at the `x` position. | Returns the column that corresponds to the logical input `x` coordinate. | [
"Returns",
"the",
"column",
"that",
"corresponds",
"to",
"the",
"logical",
"input",
"x",
"coordinate",
"."
] | def XToCol(self, x):
"""
Returns the column that corresponds to the logical input `x` coordinate.
:param `x`: the `x` position to evaluate.
:return: The column that corresponds to the logical input `x` coordinate,
or ``wx.NOT_FOUND`` if there is no column at the `x` position.
"""
colLeft = 0
numColumns = self.GetColumnCount()
for col in xrange(numColumns):
if not self.IsColumnShown(col):
continue
column = self.GetColumn(col)
if x < (colLeft + column.GetWidth()):
return col
colLeft += column.GetWidth()
return wx.NOT_FOUND | [
"def",
"XToCol",
"(",
"self",
",",
"x",
")",
":",
"colLeft",
"=",
"0",
"numColumns",
"=",
"self",
".",
"GetColumnCount",
"(",
")",
"for",
"col",
"in",
"xrange",
"(",
"numColumns",
")",
":",
"if",
"not",
"self",
".",
"IsColumnShown",
"(",
"col",
")",
":",
"continue",
"column",
"=",
"self",
".",
"GetColumn",
"(",
"col",
")",
"if",
"x",
"<",
"(",
"colLeft",
"+",
"column",
".",
"GetWidth",
"(",
")",
")",
":",
"return",
"col",
"colLeft",
"+=",
"column",
".",
"GetWidth",
"(",
")",
"return",
"wx",
".",
"NOT_FOUND"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/hypertreelist.py#L928-L952 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/keras/python/keras/layers/serialization.py | python | deserialize | (config, custom_objects=None) | return deserialize_keras_object(
config,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='layer') | Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Layer...) | Instantiates a layer from a config dictionary. | [
"Instantiates",
"a",
"layer",
"from",
"a",
"config",
"dictionary",
"."
] | def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Layer...)
"""
from tensorflow.contrib.keras.python.keras import models # pylint: disable=g-import-not-at-top
globs = globals() # All layers.
globs['Model'] = models.Model
globs['Sequential'] = models.Sequential
return deserialize_keras_object(
config,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='layer') | [
"def",
"deserialize",
"(",
"config",
",",
"custom_objects",
"=",
"None",
")",
":",
"from",
"tensorflow",
".",
"contrib",
".",
"keras",
".",
"python",
".",
"keras",
"import",
"models",
"# pylint: disable=g-import-not-at-top",
"globs",
"=",
"globals",
"(",
")",
"# All layers.",
"globs",
"[",
"'Model'",
"]",
"=",
"models",
".",
"Model",
"globs",
"[",
"'Sequential'",
"]",
"=",
"models",
".",
"Sequential",
"return",
"deserialize_keras_object",
"(",
"config",
",",
"module_objects",
"=",
"globs",
",",
"custom_objects",
"=",
"custom_objects",
",",
"printable_module_name",
"=",
"'layer'",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/keras/python/keras/layers/serialization.py#L44-L63 |
|
facebookarchive/LogDevice | ce7726050edc49a1e15d9160e81c890736b779e2 | build/fbcode_builder/getdeps/builder.py | python | CargoBuilder._resolve_config | (self) | return "\n".join(config) | Returns a configuration to be put inside root Cargo.toml file which
patches the dependencies git code with local getdeps versions.
See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section | Returns a configuration to be put inside root Cargo.toml file which
patches the dependencies git code with local getdeps versions.
See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section | [
"Returns",
"a",
"configuration",
"to",
"be",
"put",
"inside",
"root",
"Cargo",
".",
"toml",
"file",
"which",
"patches",
"the",
"dependencies",
"git",
"code",
"with",
"local",
"getdeps",
"versions",
".",
"See",
"https",
":",
"//",
"doc",
".",
"rust",
"-",
"lang",
".",
"org",
"/",
"cargo",
"/",
"reference",
"/",
"manifest",
".",
"html#the",
"-",
"patch",
"-",
"section"
] | def _resolve_config(self):
"""
Returns a configuration to be put inside root Cargo.toml file which
patches the dependencies git code with local getdeps versions.
See https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section
"""
dep_to_git = self._resolve_dep_to_git()
dep_to_crates = CargoBuilder._resolve_dep_to_crates(
self.build_source_dir(), dep_to_git
)
config = []
for name in sorted(dep_to_git.keys()):
git_conf = dep_to_git[name]
crates = sorted(dep_to_crates.get(name, []))
if not crates:
continue # nothing to patch, move along
crates_patches = [
'{} = {{ path = "{}" }}'.format(
crate,
CargoBuilder._resolve_crate_to_path(crate, git_conf).replace(
"\\", "\\\\"
),
)
for crate in crates
]
config.append(
'[patch."{0}"]\n'.format(git_conf["repo_url"])
+ "\n".join(crates_patches)
)
return "\n".join(config) | [
"def",
"_resolve_config",
"(",
"self",
")",
":",
"dep_to_git",
"=",
"self",
".",
"_resolve_dep_to_git",
"(",
")",
"dep_to_crates",
"=",
"CargoBuilder",
".",
"_resolve_dep_to_crates",
"(",
"self",
".",
"build_source_dir",
"(",
")",
",",
"dep_to_git",
")",
"config",
"=",
"[",
"]",
"for",
"name",
"in",
"sorted",
"(",
"dep_to_git",
".",
"keys",
"(",
")",
")",
":",
"git_conf",
"=",
"dep_to_git",
"[",
"name",
"]",
"crates",
"=",
"sorted",
"(",
"dep_to_crates",
".",
"get",
"(",
"name",
",",
"[",
"]",
")",
")",
"if",
"not",
"crates",
":",
"continue",
"# nothing to patch, move along",
"crates_patches",
"=",
"[",
"'{} = {{ path = \"{}\" }}'",
".",
"format",
"(",
"crate",
",",
"CargoBuilder",
".",
"_resolve_crate_to_path",
"(",
"crate",
",",
"git_conf",
")",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
",",
")",
"for",
"crate",
"in",
"crates",
"]",
"config",
".",
"append",
"(",
"'[patch.\"{0}\"]\\n'",
".",
"format",
"(",
"git_conf",
"[",
"\"repo_url\"",
"]",
")",
"+",
"\"\\n\"",
".",
"join",
"(",
"crates_patches",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"config",
")"
] | https://github.com/facebookarchive/LogDevice/blob/ce7726050edc49a1e15d9160e81c890736b779e2/build/fbcode_builder/getdeps/builder.py#L1182-L1213 |
|
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Arch/ArchEquipment.py | python | _Equipment.executeSketchArchFeatures | (self, obj, linkObj=None, index=None, linkElement=None) | To execute features in the SketchArch External Add-on (https://github.com/paullee0/FreeCAD_SketchArch)
- import ArchSketchObject module, and
- execute features that are common to ArchObjects (including Links) and ArchSketch
To install SketchArch External Add-on, see https://github.com/paullee0/FreeCAD_SketchArch#iv-install | To execute features in the SketchArch External Add-on (https://github.com/paullee0/FreeCAD_SketchArch)
- import ArchSketchObject module, and
- execute features that are common to ArchObjects (including Links) and ArchSketch | [
"To",
"execute",
"features",
"in",
"the",
"SketchArch",
"External",
"Add",
"-",
"on",
"(",
"https",
":",
"//",
"github",
".",
"com",
"/",
"paullee0",
"/",
"FreeCAD_SketchArch",
")",
"-",
"import",
"ArchSketchObject",
"module",
"and",
"-",
"execute",
"features",
"that",
"are",
"common",
"to",
"ArchObjects",
"(",
"including",
"Links",
")",
"and",
"ArchSketch"
] | def executeSketchArchFeatures(self, obj, linkObj=None, index=None, linkElement=None):
'''
To execute features in the SketchArch External Add-on (https://github.com/paullee0/FreeCAD_SketchArch)
- import ArchSketchObject module, and
- execute features that are common to ArchObjects (including Links) and ArchSketch
To install SketchArch External Add-on, see https://github.com/paullee0/FreeCAD_SketchArch#iv-install
'''
# To execute features in SketchArch External Add-on, if present
try:
import ArchSketchObject
# Execute SketchArch Feature - Intuitive Automatic Placement for Arch Windows/Doors, Equipment etc.
# see https://forum.freecadweb.org/viewtopic.php?f=23&t=50802
ArchSketchObject.updateAttachmentOffset(obj, linkObj)
except:
pass | [
"def",
"executeSketchArchFeatures",
"(",
"self",
",",
"obj",
",",
"linkObj",
"=",
"None",
",",
"index",
"=",
"None",
",",
"linkElement",
"=",
"None",
")",
":",
"# To execute features in SketchArch External Add-on, if present",
"try",
":",
"import",
"ArchSketchObject",
"# Execute SketchArch Feature - Intuitive Automatic Placement for Arch Windows/Doors, Equipment etc.",
"# see https://forum.freecadweb.org/viewtopic.php?f=23&t=50802",
"ArchSketchObject",
".",
"updateAttachmentOffset",
"(",
"obj",
",",
"linkObj",
")",
"except",
":",
"pass"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Arch/ArchEquipment.py#L350-L366 |
||
bristolcrypto/SPDZ-2 | 721abfae849625a02ea49aabc534f9cf41ca643f | Compiler/comparison.py | python | PreMulC_with_inverses_and_vectors | (p, a) | p[i] = prod_{j=0}^{i-1} a[i]
Variant for vector registers using preprocessed inverses. | p[i] = prod_{j=0}^{i-1} a[i] | [
"p",
"[",
"i",
"]",
"=",
"prod_",
"{",
"j",
"=",
"0",
"}",
"^",
"{",
"i",
"-",
"1",
"}",
"a",
"[",
"i",
"]"
] | def PreMulC_with_inverses_and_vectors(p, a):
"""
p[i] = prod_{j=0}^{i-1} a[i]
Variant for vector registers using preprocessed inverses.
"""
k = len(p)
a_vec = program.curr_block.new_reg('s', size=k)
r = program.curr_block.new_reg('s', size=k)
w = program.curr_block.new_reg('s', size=k)
w_tmp = program.curr_block.new_reg('s', size=k)
z = program.curr_block.new_reg('s', size=k)
m = program.curr_block.new_reg('c', size=k)
t = [program.curr_block.new_reg('s', size=k) for i in range(1)]
c = [program.curr_block.new_reg('c') for i in range(k)]
# warning: computer scientists count from 0
if do_precomp:
vinverse(k, r, z)
else:
vprep(k, 'PreMulC', r, z, w_tmp)
for i in range(1,k):
if do_precomp:
muls(w[i], r[i], z[i-1])
else:
movs(w[i], w_tmp[i])
movs(a_vec[i], a[i])
movs(w[0], r[0])
movs(a_vec[0], a[0])
vmuls(k, t[0], w, a_vec)
vstartopen(k, t[0])
vstopopen(k, m)
PreMulC_end(p, a, c, m, z) | [
"def",
"PreMulC_with_inverses_and_vectors",
"(",
"p",
",",
"a",
")",
":",
"k",
"=",
"len",
"(",
"p",
")",
"a_vec",
"=",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'s'",
",",
"size",
"=",
"k",
")",
"r",
"=",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'s'",
",",
"size",
"=",
"k",
")",
"w",
"=",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'s'",
",",
"size",
"=",
"k",
")",
"w_tmp",
"=",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'s'",
",",
"size",
"=",
"k",
")",
"z",
"=",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'s'",
",",
"size",
"=",
"k",
")",
"m",
"=",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'c'",
",",
"size",
"=",
"k",
")",
"t",
"=",
"[",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'s'",
",",
"size",
"=",
"k",
")",
"for",
"i",
"in",
"range",
"(",
"1",
")",
"]",
"c",
"=",
"[",
"program",
".",
"curr_block",
".",
"new_reg",
"(",
"'c'",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
"]",
"# warning: computer scientists count from 0",
"if",
"do_precomp",
":",
"vinverse",
"(",
"k",
",",
"r",
",",
"z",
")",
"else",
":",
"vprep",
"(",
"k",
",",
"'PreMulC'",
",",
"r",
",",
"z",
",",
"w_tmp",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"k",
")",
":",
"if",
"do_precomp",
":",
"muls",
"(",
"w",
"[",
"i",
"]",
",",
"r",
"[",
"i",
"]",
",",
"z",
"[",
"i",
"-",
"1",
"]",
")",
"else",
":",
"movs",
"(",
"w",
"[",
"i",
"]",
",",
"w_tmp",
"[",
"i",
"]",
")",
"movs",
"(",
"a_vec",
"[",
"i",
"]",
",",
"a",
"[",
"i",
"]",
")",
"movs",
"(",
"w",
"[",
"0",
"]",
",",
"r",
"[",
"0",
"]",
")",
"movs",
"(",
"a_vec",
"[",
"0",
"]",
",",
"a",
"[",
"0",
"]",
")",
"vmuls",
"(",
"k",
",",
"t",
"[",
"0",
"]",
",",
"w",
",",
"a_vec",
")",
"vstartopen",
"(",
"k",
",",
"t",
"[",
"0",
"]",
")",
"vstopopen",
"(",
"k",
",",
"m",
")",
"PreMulC_end",
"(",
"p",
",",
"a",
",",
"c",
",",
"m",
",",
"z",
")"
] | https://github.com/bristolcrypto/SPDZ-2/blob/721abfae849625a02ea49aabc534f9cf41ca643f/Compiler/comparison.py#L370-L401 |
||
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | Wrapping/Python/paraview/coprocessing.py | python | CoProcessor.WriteData | (self, datadescription) | This method will update all writes present in the pipeline, as
needed, to generate the output data files, respecting the
write-frequencies set on the writers. | This method will update all writes present in the pipeline, as
needed, to generate the output data files, respecting the
write-frequencies set on the writers. | [
"This",
"method",
"will",
"update",
"all",
"writes",
"present",
"in",
"the",
"pipeline",
"as",
"needed",
"to",
"generate",
"the",
"output",
"data",
"files",
"respecting",
"the",
"write",
"-",
"frequencies",
"set",
"on",
"the",
"writers",
"."
] | def WriteData(self, datadescription):
"""This method will update all writes present in the pipeline, as
needed, to generate the output data files, respecting the
write-frequencies set on the writers."""
timestep = datadescription.GetTimeStep()
for writer in self.__WritersList:
frequency = writer.parameters.GetProperty(
"WriteFrequency").GetElement(0)
if self.NeedToOutput(datadescription, frequency) or datadescription.GetForceOutput() == True:
fileName = writer.parameters.GetProperty("FileName").GetElement(0)
paddingamount = writer.parameters.GetProperty("PaddingAmount").GetElement(0)
helperName = writer.GetXMLName()
if helperName == "ExodusIIWriter":
ts = "."+str(timestep).rjust(paddingamount, '0')
writer.FileName = fileName + ts
else:
ts = str(timestep).rjust(paddingamount, '0')
writer.FileName = fileName.replace("%t", ts)
if '/' in writer.FileName and createDirectoriesIfNeeded:
oktowrite = [1.]
import vtk
comm = vtk.vtkMultiProcessController.GetGlobalController()
if comm.GetLocalProcessId() == 0:
import os
newDir = writer.FileName[0:writer.FileName.rfind('/')]
try:
os.makedirs(newDir)
except OSError:
if not os.path.isdir(newDir):
print ("ERROR: Cannot make directory for", writer.FileName, ". No data will be written.")
oktowrite[0] = 0.
comm.Broadcast(oktowrite, 1, 0)
if oktowrite[0] == 0:
# we can't make the directory so no reason to update the pipeline
return
writer.UpdatePipeline(datadescription.GetTime())
self.__AppendToCinemaDTable(timestep, "writer_%s" % self.__WritersList.index(writer), writer.FileName)
self.__FinalizeCinemaDTable() | [
"def",
"WriteData",
"(",
"self",
",",
"datadescription",
")",
":",
"timestep",
"=",
"datadescription",
".",
"GetTimeStep",
"(",
")",
"for",
"writer",
"in",
"self",
".",
"__WritersList",
":",
"frequency",
"=",
"writer",
".",
"parameters",
".",
"GetProperty",
"(",
"\"WriteFrequency\"",
")",
".",
"GetElement",
"(",
"0",
")",
"if",
"self",
".",
"NeedToOutput",
"(",
"datadescription",
",",
"frequency",
")",
"or",
"datadescription",
".",
"GetForceOutput",
"(",
")",
"==",
"True",
":",
"fileName",
"=",
"writer",
".",
"parameters",
".",
"GetProperty",
"(",
"\"FileName\"",
")",
".",
"GetElement",
"(",
"0",
")",
"paddingamount",
"=",
"writer",
".",
"parameters",
".",
"GetProperty",
"(",
"\"PaddingAmount\"",
")",
".",
"GetElement",
"(",
"0",
")",
"helperName",
"=",
"writer",
".",
"GetXMLName",
"(",
")",
"if",
"helperName",
"==",
"\"ExodusIIWriter\"",
":",
"ts",
"=",
"\".\"",
"+",
"str",
"(",
"timestep",
")",
".",
"rjust",
"(",
"paddingamount",
",",
"'0'",
")",
"writer",
".",
"FileName",
"=",
"fileName",
"+",
"ts",
"else",
":",
"ts",
"=",
"str",
"(",
"timestep",
")",
".",
"rjust",
"(",
"paddingamount",
",",
"'0'",
")",
"writer",
".",
"FileName",
"=",
"fileName",
".",
"replace",
"(",
"\"%t\"",
",",
"ts",
")",
"if",
"'/'",
"in",
"writer",
".",
"FileName",
"and",
"createDirectoriesIfNeeded",
":",
"oktowrite",
"=",
"[",
"1.",
"]",
"import",
"vtk",
"comm",
"=",
"vtk",
".",
"vtkMultiProcessController",
".",
"GetGlobalController",
"(",
")",
"if",
"comm",
".",
"GetLocalProcessId",
"(",
")",
"==",
"0",
":",
"import",
"os",
"newDir",
"=",
"writer",
".",
"FileName",
"[",
"0",
":",
"writer",
".",
"FileName",
".",
"rfind",
"(",
"'/'",
")",
"]",
"try",
":",
"os",
".",
"makedirs",
"(",
"newDir",
")",
"except",
"OSError",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"newDir",
")",
":",
"print",
"(",
"\"ERROR: Cannot make directory for\"",
",",
"writer",
".",
"FileName",
",",
"\". No data will be written.\"",
")",
"oktowrite",
"[",
"0",
"]",
"=",
"0.",
"comm",
".",
"Broadcast",
"(",
"oktowrite",
",",
"1",
",",
"0",
")",
"if",
"oktowrite",
"[",
"0",
"]",
"==",
"0",
":",
"# we can't make the directory so no reason to update the pipeline",
"return",
"writer",
".",
"UpdatePipeline",
"(",
"datadescription",
".",
"GetTime",
"(",
")",
")",
"self",
".",
"__AppendToCinemaDTable",
"(",
"timestep",
",",
"\"writer_%s\"",
"%",
"self",
".",
"__WritersList",
".",
"index",
"(",
"writer",
")",
",",
"writer",
".",
"FileName",
")",
"self",
".",
"__FinalizeCinemaDTable",
"(",
")"
] | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Wrapping/Python/paraview/coprocessing.py#L228-L265 |
||
Slicer/SlicerGitSVNArchive | 65e92bb16c2b32ea47a1a66bee71f238891ee1ca | Modules/Scripted/DICOMLib/DICOMUtils.py | python | registerSlicerURLHandler | () | Registers slicer:// protocol with this executable.
For now, only implemented on Windows. | Registers slicer:// protocol with this executable.
For now, only implemented on Windows. | [
"Registers",
"slicer",
":",
"//",
"protocol",
"with",
"this",
"executable",
".",
"For",
"now",
"only",
"implemented",
"on",
"Windows",
"."
] | def registerSlicerURLHandler():
"""
Registers slicer:// protocol with this executable.
For now, only implemented on Windows.
"""
if os.name == 'nt':
slicerLauncherPath = os.path.abspath(slicer.app.launcherExecutableFilePath)
urlHandlerRegFile = r"""Windows Registry Editor Version 5.00
[HKEY_CLASSES_ROOT\Slicer]
@="URL:Slicer Slicer Protocol"
"URL Protocol"=""
[HKEY_CLASSES_ROOT\Slicer\DefaultIcon]
@="Slicer.exe,1"
[HKEY_CLASSES_ROOT\Slicer\shell]
[HKEY_CLASSES_ROOT\Slicer\shell\open]
[HKEY_CLASSES_ROOT\Slicer\shell\open\command]
@="\"{0}\" \"%1\""
""".format(slicerLauncherPath.replace("\\","\\\\"))
urlHandlerRegFilePath = slicer.app.temporaryPath+"registerSlicerUrlHandler.reg"
with open(urlHandlerRegFilePath, "wt") as f:
f.write(urlHandlerRegFile)
slicer.qSlicerApplicationHelper().runAsAdmin("Regedt32.exe", "/s "+urlHandlerRegFilePath)
else:
raise NotImplementedError() | [
"def",
"registerSlicerURLHandler",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"slicerLauncherPath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"slicer",
".",
"app",
".",
"launcherExecutableFilePath",
")",
"urlHandlerRegFile",
"=",
"r\"\"\"Windows Registry Editor Version 5.00\n[HKEY_CLASSES_ROOT\\Slicer]\n@=\"URL:Slicer Slicer Protocol\"\n\"URL Protocol\"=\"\"\n[HKEY_CLASSES_ROOT\\Slicer\\DefaultIcon]\n@=\"Slicer.exe,1\"\n[HKEY_CLASSES_ROOT\\Slicer\\shell]\n[HKEY_CLASSES_ROOT\\Slicer\\shell\\open]\n[HKEY_CLASSES_ROOT\\Slicer\\shell\\open\\command]\n@=\"\\\"{0}\\\" \\\"%1\\\"\"\n\"\"\"",
".",
"format",
"(",
"slicerLauncherPath",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"\\\\\\\\\"",
")",
")",
"urlHandlerRegFilePath",
"=",
"slicer",
".",
"app",
".",
"temporaryPath",
"+",
"\"registerSlicerUrlHandler.reg\"",
"with",
"open",
"(",
"urlHandlerRegFilePath",
",",
"\"wt\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"urlHandlerRegFile",
")",
"slicer",
".",
"qSlicerApplicationHelper",
"(",
")",
".",
"runAsAdmin",
"(",
"\"Regedt32.exe\"",
",",
"\"/s \"",
"+",
"urlHandlerRegFilePath",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Modules/Scripted/DICOMLib/DICOMUtils.py#L739-L762 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | TextAttr.GetBackgroundColour | (*args, **kwargs) | return _controls_.TextAttr_GetBackgroundColour(*args, **kwargs) | GetBackgroundColour(self) -> Colour | GetBackgroundColour(self) -> Colour | [
"GetBackgroundColour",
"(",
"self",
")",
"-",
">",
"Colour"
] | def GetBackgroundColour(*args, **kwargs):
"""GetBackgroundColour(self) -> Colour"""
return _controls_.TextAttr_GetBackgroundColour(*args, **kwargs) | [
"def",
"GetBackgroundColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TextAttr_GetBackgroundColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L1643-L1645 |
|
tangzhenyu/Scene-Text-Understanding | 0f7ffc7aea5971a50cdc03d33d0a41075285948b | ctpn_crnn_ocr/CTPN/caffe/scripts/cpp_lint.py | python | _SetCountingStyle | (level) | Sets the module's counting options. | Sets the module's counting options. | [
"Sets",
"the",
"module",
"s",
"counting",
"options",
"."
] | def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level) | [
"def",
"_SetCountingStyle",
"(",
"level",
")",
":",
"_cpplint_state",
".",
"SetCountingStyle",
"(",
"level",
")"
] | https://github.com/tangzhenyu/Scene-Text-Understanding/blob/0f7ffc7aea5971a50cdc03d33d0a41075285948b/ctpn_crnn_ocr/CTPN/caffe/scripts/cpp_lint.py#L787-L789 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/reshape/merge.py | python | merge_asof | (
left: DataFrame | Series,
right: DataFrame | Series,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
by=None,
left_by=None,
right_by=None,
suffixes: Suffixes = ("_x", "_y"),
tolerance=None,
allow_exact_matches: bool = True,
direction: str = "backward",
) | return op.get_result() | Perform an asof merge.
This is similar to a left-join except that we match on nearest
key rather than equal keys. Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
Parameters
----------
left : DataFrame or named Series
right : DataFrame or named Series
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : bool
Use the index of the left DataFrame as the join key.
right_index : bool
Use the index of the right DataFrame as the join key.
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
right_by : column name
Field names to match on in the right DataFrame.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : int or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : bool, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than).
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
Returns
-------
merged : DataFrame
See Also
--------
merge : Merge with a database-style join.
merge_ordered : Merge with optional filling/interpolation.
Examples
--------
>>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on="a")
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on="a", allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on="a", direction="forward")
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on="a", direction="nearest")
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes = pd.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.030"),
... pd.Timestamp("2016-05-25 13:30:00.041"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.049"),
... pd.Timestamp("2016-05-25 13:30:00.072"),
... pd.Timestamp("2016-05-25 13:30:00.075")
... ],
... "ticker": [
... "GOOG",
... "MSFT",
... "MSFT",
... "MSFT",
... "GOOG",
... "AAPL",
... "GOOG",
... "MSFT"
... ],
... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
... }
... )
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades = pd.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.038"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048")
... ],
... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
... "quantity": [75, 155, 100, 100, 100]
... }
... )
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes, on="time", by="ticker")
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(
... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=pd.Timedelta("10ms"),
... allow_exact_matches=False
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN | Perform an asof merge. | [
"Perform",
"an",
"asof",
"merge",
"."
] | def merge_asof(
left: DataFrame | Series,
right: DataFrame | Series,
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
by=None,
left_by=None,
right_by=None,
suffixes: Suffixes = ("_x", "_y"),
tolerance=None,
allow_exact_matches: bool = True,
direction: str = "backward",
) -> DataFrame:
"""
Perform an asof merge.
This is similar to a left-join except that we match on nearest
key rather than equal keys. Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
Parameters
----------
left : DataFrame or named Series
right : DataFrame or named Series
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : bool
Use the index of the left DataFrame as the join key.
right_index : bool
Use the index of the right DataFrame as the join key.
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
right_by : column name
Field names to match on in the right DataFrame.
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : int or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : bool, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than).
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
Returns
-------
merged : DataFrame
See Also
--------
merge : Merge with a database-style join.
merge_ordered : Merge with optional filling/interpolation.
Examples
--------
>>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on="a")
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on="a", allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on="a", direction="forward")
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on="a", direction="nearest")
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes = pd.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.030"),
... pd.Timestamp("2016-05-25 13:30:00.041"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.049"),
... pd.Timestamp("2016-05-25 13:30:00.072"),
... pd.Timestamp("2016-05-25 13:30:00.075")
... ],
... "ticker": [
... "GOOG",
... "MSFT",
... "MSFT",
... "MSFT",
... "GOOG",
... "AAPL",
... "GOOG",
... "MSFT"
... ],
... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]
... }
... )
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades = pd.DataFrame(
... {
... "time": [
... pd.Timestamp("2016-05-25 13:30:00.023"),
... pd.Timestamp("2016-05-25 13:30:00.038"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048"),
... pd.Timestamp("2016-05-25 13:30:00.048")
... ],
... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
... "price": [51.95, 51.95, 720.77, 720.92, 98.0],
... "quantity": [75, 155, 100, 100, 100]
... }
... )
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes, on="time", by="ticker")
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(
... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(
... trades,
... quotes,
... on="time",
... by="ticker",
... tolerance=pd.Timedelta("10ms"),
... allow_exact_matches=False
... )
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
"""
op = _AsOfMerge(
left,
right,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
by=by,
left_by=left_by,
right_by=right_by,
suffixes=suffixes,
how="asof",
tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction,
)
return op.get_result() | [
"def",
"merge_asof",
"(",
"left",
":",
"DataFrame",
"|",
"Series",
",",
"right",
":",
"DataFrame",
"|",
"Series",
",",
"on",
":",
"IndexLabel",
"|",
"None",
"=",
"None",
",",
"left_on",
":",
"IndexLabel",
"|",
"None",
"=",
"None",
",",
"right_on",
":",
"IndexLabel",
"|",
"None",
"=",
"None",
",",
"left_index",
":",
"bool",
"=",
"False",
",",
"right_index",
":",
"bool",
"=",
"False",
",",
"by",
"=",
"None",
",",
"left_by",
"=",
"None",
",",
"right_by",
"=",
"None",
",",
"suffixes",
":",
"Suffixes",
"=",
"(",
"\"_x\"",
",",
"\"_y\"",
")",
",",
"tolerance",
"=",
"None",
",",
"allow_exact_matches",
":",
"bool",
"=",
"True",
",",
"direction",
":",
"str",
"=",
"\"backward\"",
",",
")",
"->",
"DataFrame",
":",
"op",
"=",
"_AsOfMerge",
"(",
"left",
",",
"right",
",",
"on",
"=",
"on",
",",
"left_on",
"=",
"left_on",
",",
"right_on",
"=",
"right_on",
",",
"left_index",
"=",
"left_index",
",",
"right_index",
"=",
"right_index",
",",
"by",
"=",
"by",
",",
"left_by",
"=",
"left_by",
",",
"right_by",
"=",
"right_by",
",",
"suffixes",
"=",
"suffixes",
",",
"how",
"=",
"\"asof\"",
",",
"tolerance",
"=",
"tolerance",
",",
"allow_exact_matches",
"=",
"allow_exact_matches",
",",
"direction",
"=",
"direction",
",",
")",
"return",
"op",
".",
"get_result",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/reshape/merge.py#L325-L597 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/_shard/sharded_tensor/__init__.py | python | pre_load_state_dict_hook | (module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) | Pre-load state dict hook to add ShardedTensor to the module. | Pre-load state dict hook to add ShardedTensor to the module. | [
"Pre",
"-",
"load",
"state",
"dict",
"hook",
"to",
"add",
"ShardedTensor",
"to",
"the",
"module",
"."
] | def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
"""
Pre-load state dict hook to add ShardedTensor to the module.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
key = prefix + submodule_name + '.' + attr_name
if key in state_dict:
if isinstance(state_dict[key], ShardedTensor):
setattr(submodule, attr_name, state_dict[key]) | [
"def",
"pre_load_state_dict_hook",
"(",
"module",
",",
"state_dict",
",",
"prefix",
",",
"local_metadata",
",",
"strict",
",",
"missing_keys",
",",
"unexpected_keys",
",",
"error_msgs",
")",
":",
"for",
"submodule_name",
",",
"submodule",
"in",
"module",
".",
"named_modules",
"(",
")",
":",
"for",
"attr_name",
",",
"attr",
"in",
"submodule",
".",
"__dict__",
".",
"items",
"(",
")",
":",
"key",
"=",
"prefix",
"+",
"submodule_name",
"+",
"'.'",
"+",
"attr_name",
"if",
"key",
"in",
"state_dict",
":",
"if",
"isinstance",
"(",
"state_dict",
"[",
"key",
"]",
",",
"ShardedTensor",
")",
":",
"setattr",
"(",
"submodule",
",",
"attr_name",
",",
"state_dict",
"[",
"key",
"]",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/_shard/sharded_tensor/__init__.py#L357-L366 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/versioncontrol.py | python | RevOptions.make_new | (self, rev) | return self.vc_class.make_rev_options(rev, extra_args=self.extra_args) | Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object. | [] | def make_new(self, rev):
# type: (str) -> RevOptions
"""
Make a copy of the current instance, but with a new rev.
Args:
rev: the name of the revision for the new object.
"""
return self.vc_class.make_rev_options(rev, extra_args=self.extra_args) | [
"def",
"make_new",
"(",
"self",
",",
"rev",
")",
":",
"# type: (str) -> RevOptions",
"return",
"self",
".",
"vc_class",
".",
"make_rev_options",
"(",
"rev",
",",
"extra_args",
"=",
"self",
".",
"extra_args",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/versioncontrol.py#L345-L361 |
||
tangzhenyu/Scene-Text-Understanding | 0f7ffc7aea5971a50cdc03d33d0a41075285948b | SynthText_Chinese/synth_utils.py | python | ssc | (v) | return np.array([[ 0, -v[2], v[1]],
[ v[2], 0, -v[0]],
[-v[1], v[0], 0]]) | Returns the skew-symmetric cross-product matrix corresponding to v. | Returns the skew-symmetric cross-product matrix corresponding to v. | [
"Returns",
"the",
"skew",
"-",
"symmetric",
"cross",
"-",
"product",
"matrix",
"corresponding",
"to",
"v",
"."
] | def ssc(v):
"""
Returns the skew-symmetric cross-product matrix corresponding to v.
"""
v /= np.linalg.norm(v)
return np.array([[ 0, -v[2], v[1]],
[ v[2], 0, -v[0]],
[-v[1], v[0], 0]]) | [
"def",
"ssc",
"(",
"v",
")",
":",
"v",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"v",
")",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"v",
"[",
"2",
"]",
",",
"v",
"[",
"1",
"]",
"]",
",",
"[",
"v",
"[",
"2",
"]",
",",
"0",
",",
"-",
"v",
"[",
"0",
"]",
"]",
",",
"[",
"-",
"v",
"[",
"1",
"]",
",",
"v",
"[",
"0",
"]",
",",
"0",
"]",
"]",
")"
] | https://github.com/tangzhenyu/Scene-Text-Understanding/blob/0f7ffc7aea5971a50cdc03d33d0a41075285948b/SynthText_Chinese/synth_utils.py#L232-L239 |
|
wy1iu/LargeMargin_Softmax_Loss | c3e9f20e4f16e2b4daf7d358a614366b9b39a6ec | scripts/cpp_lint.py | python | UpdateIncludeState | (filename, include_state, io=codecs) | return True | Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise. | Fill up the include_state with new includes found from the file. | [
"Fill",
"up",
"the",
"include_state",
"with",
"new",
"includes",
"found",
"from",
"the",
"file",
"."
] | def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True | [
"def",
"UpdateIncludeState",
"(",
"filename",
",",
"include_state",
",",
"io",
"=",
"codecs",
")",
":",
"headerfile",
"=",
"None",
"try",
":",
"headerfile",
"=",
"io",
".",
"open",
"(",
"filename",
",",
"'r'",
",",
"'utf8'",
",",
"'replace'",
")",
"except",
"IOError",
":",
"return",
"False",
"linenum",
"=",
"0",
"for",
"line",
"in",
"headerfile",
":",
"linenum",
"+=",
"1",
"clean_line",
"=",
"CleanseComments",
"(",
"line",
")",
"match",
"=",
"_RE_PATTERN_INCLUDE",
".",
"search",
"(",
"clean_line",
")",
"if",
"match",
":",
"include",
"=",
"match",
".",
"group",
"(",
"2",
")",
"# The value formatting is cute, but not really used right now.",
"# What matters here is that the key is in include_state.",
"include_state",
".",
"setdefault",
"(",
"include",
",",
"'%s:%d'",
"%",
"(",
"filename",
",",
"linenum",
")",
")",
"return",
"True"
] | https://github.com/wy1iu/LargeMargin_Softmax_Loss/blob/c3e9f20e4f16e2b4daf7d358a614366b9b39a6ec/scripts/cpp_lint.py#L4454-L4480 |
|
oracle/graaljs | 36a56e8e993d45fc40939a3a4d9c0c24990720f1 | graal-nodejs/tools/gyp/pylib/gyp/generator/make.py | python | EscapeMakeVariableExpansion | (s) | return s.replace("$", "$$") | Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally. | Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally. | [
"Make",
"has",
"its",
"own",
"variable",
"expansion",
"syntax",
"using",
"$",
".",
"We",
"must",
"escape",
"it",
"for",
"string",
"to",
"be",
"interpreted",
"literally",
"."
] | def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace("$", "$$") | [
"def",
"EscapeMakeVariableExpansion",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"\"$\"",
",",
"\"$$\"",
")"
] | https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/gyp/pylib/gyp/generator/make.py#L623-L626 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/importlib/_bootstrap_external.py | python | _LoaderBasics.exec_module | (self, module) | Execute the module. | Execute the module. | [
"Execute",
"the",
"module",
"."
] | def exec_module(self, module):
"""Execute the module."""
code = self.get_code(module.__name__)
if code is None:
raise ImportError('cannot load module {!r} when get_code() '
'returns None'.format(module.__name__))
_bootstrap._call_with_frames_removed(exec, code, module.__dict__) | [
"def",
"exec_module",
"(",
"self",
",",
"module",
")",
":",
"code",
"=",
"self",
".",
"get_code",
"(",
"module",
".",
"__name__",
")",
"if",
"code",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"'cannot load module {!r} when get_code() '",
"'returns None'",
".",
"format",
"(",
"module",
".",
"__name__",
")",
")",
"_bootstrap",
".",
"_call_with_frames_removed",
"(",
"exec",
",",
"code",
",",
"module",
".",
"__dict__",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/importlib/_bootstrap_external.py#L722-L728 |
||
Jack-Cherish/Algorithm | ab3e0f05ff15972f282b6122b73dfa0e84b5960b | Sort Algorithms.py | python | MergeSort | (input_list) | return sorted_list | 函数说明:归并排序(升序)
Website:
http://cuijiahua.com
Parameters:
input_list - 待排序列表
Returns:
sorted_list - 升序排序好的列表 | 函数说明:归并排序(升序)
Website:
http://cuijiahua.com
Parameters:
input_list - 待排序列表
Returns:
sorted_list - 升序排序好的列表 | [
"函数说明",
":",
"归并排序(升序)",
"Website",
":",
"http",
":",
"//",
"cuijiahua",
".",
"com",
"Parameters",
":",
"input_list",
"-",
"待排序列表",
"Returns",
":",
"sorted_list",
"-",
"升序排序好的列表"
] | def MergeSort(input_list):
'''
函数说明:归并排序(升序)
Website:
http://cuijiahua.com
Parameters:
input_list - 待排序列表
Returns:
sorted_list - 升序排序好的列表
'''
def merge(input_list, left, mid, right, temp):
'''
函数说明:合并函数
Website:
http://cuijiahua.com
Parameters:
input_list - 待合并列表
left - 左指针
right - 右指针
temp - 临时列表
Returns:
无
'''
i = left
j = mid + 1
k = 0
while i <= mid and j <= right:
if input_list[i] <= input_list[j]:
temp[k] = input_list[i]
i += 1
else:
temp[k] = input_list[j]
j += 1
k += 1
while i <= mid:
temp[k] = input_list[i]
i += 1
k += 1
while j <= right:
temp[k] = input_list[j]
j += 1
k += 1
k = 0
while left <= right:
input_list[left] = temp[k]
left += 1
k += 1
def merge_sort(input_list, left, right, temp):
if left >= right:
return;
mid = (right + left) // 2
merge_sort(input_list, left, mid, temp)
merge_sort(input_list, mid + 1, right, temp)
merge(input_list, left, mid, right, temp)
if len(input_list) == 0:
return []
sorted_list = input_list
temp = [0] * len(sorted_list)
merge_sort(sorted_list, 0, len(sorted_list) - 1, temp)
return sorted_list | [
"def",
"MergeSort",
"(",
"input_list",
")",
":",
"def",
"merge",
"(",
"input_list",
",",
"left",
",",
"mid",
",",
"right",
",",
"temp",
")",
":",
"'''\n\t\t函数说明:合并函数\n\t\tWebsite:\n\t\t\thttp://cuijiahua.com\n\t\tParameters:\n\t\t\tinput_list - 待合并列表\n\t\t\tleft - 左指针\n\t\t\tright - 右指针\n\t\t\ttemp - 临时列表\n\t\tReturns:\n\t\t\t无\n\t\t'''",
"i",
"=",
"left",
"j",
"=",
"mid",
"+",
"1",
"k",
"=",
"0",
"while",
"i",
"<=",
"mid",
"and",
"j",
"<=",
"right",
":",
"if",
"input_list",
"[",
"i",
"]",
"<=",
"input_list",
"[",
"j",
"]",
":",
"temp",
"[",
"k",
"]",
"=",
"input_list",
"[",
"i",
"]",
"i",
"+=",
"1",
"else",
":",
"temp",
"[",
"k",
"]",
"=",
"input_list",
"[",
"j",
"]",
"j",
"+=",
"1",
"k",
"+=",
"1",
"while",
"i",
"<=",
"mid",
":",
"temp",
"[",
"k",
"]",
"=",
"input_list",
"[",
"i",
"]",
"i",
"+=",
"1",
"k",
"+=",
"1",
"while",
"j",
"<=",
"right",
":",
"temp",
"[",
"k",
"]",
"=",
"input_list",
"[",
"j",
"]",
"j",
"+=",
"1",
"k",
"+=",
"1",
"k",
"=",
"0",
"while",
"left",
"<=",
"right",
":",
"input_list",
"[",
"left",
"]",
"=",
"temp",
"[",
"k",
"]",
"left",
"+=",
"1",
"k",
"+=",
"1",
"def",
"merge_sort",
"(",
"input_list",
",",
"left",
",",
"right",
",",
"temp",
")",
":",
"if",
"left",
">=",
"right",
":",
"return",
"mid",
"=",
"(",
"right",
"+",
"left",
")",
"//",
"2",
"merge_sort",
"(",
"input_list",
",",
"left",
",",
"mid",
",",
"temp",
")",
"merge_sort",
"(",
"input_list",
",",
"mid",
"+",
"1",
",",
"right",
",",
"temp",
")",
"merge",
"(",
"input_list",
",",
"left",
",",
"mid",
",",
"right",
",",
"temp",
")",
"if",
"len",
"(",
"input_list",
")",
"==",
"0",
":",
"return",
"[",
"]",
"sorted_list",
"=",
"input_list",
"temp",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"sorted_list",
")",
"merge_sort",
"(",
"sorted_list",
",",
"0",
",",
"len",
"(",
"sorted_list",
")",
"-",
"1",
",",
"temp",
")",
"return",
"sorted_list"
] | https://github.com/Jack-Cherish/Algorithm/blob/ab3e0f05ff15972f282b6122b73dfa0e84b5960b/Sort Algorithms.py#L245-L310 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/ros/roslib/src/roslib/rosenv.py | python | get_test_results_dir | (env=None) | return os.path.join(get_ros_home(env), 'test_results') | Get directory to use for writing test result files. There are multiple
possible locations for this. If ROS_HOME is set ROS_HOME/test_results
is used. Otherwise $HOME/.ros/test_results is used.
@param env: environment dictionary (defaults to os.environ)
@type env: dict
@return: path to use use for log file directory
@rtype: str | Get directory to use for writing test result files. There are multiple
possible locations for this. If ROS_HOME is set ROS_HOME/test_results
is used. Otherwise $HOME/.ros/test_results is used. | [
"Get",
"directory",
"to",
"use",
"for",
"writing",
"test",
"result",
"files",
".",
"There",
"are",
"multiple",
"possible",
"locations",
"for",
"this",
".",
"If",
"ROS_HOME",
"is",
"set",
"ROS_HOME",
"/",
"test_results",
"is",
"used",
".",
"Otherwise",
"$HOME",
"/",
".",
"ros",
"/",
"test_results",
"is",
"used",
"."
] | def get_test_results_dir(env=None):
"""
Get directory to use for writing test result files. There are multiple
possible locations for this. If ROS_HOME is set ROS_HOME/test_results
is used. Otherwise $HOME/.ros/test_results is used.
@param env: environment dictionary (defaults to os.environ)
@type env: dict
@return: path to use use for log file directory
@rtype: str
"""
return os.path.join(get_ros_home(env), 'test_results') | [
"def",
"get_test_results_dir",
"(",
"env",
"=",
"None",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"get_ros_home",
"(",
"env",
")",
",",
"'test_results'",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros/roslib/src/roslib/rosenv.py#L188-L199 |
|
cvxpy/cvxpy | 5165b4fb750dfd237de8659383ef24b4b2e33aaf | cvxpy/atoms/affine/conj.py | python | conj.is_incr | (self, idx) | return False | Is the composition non-decreasing in argument idx? | Is the composition non-decreasing in argument idx? | [
"Is",
"the",
"composition",
"non",
"-",
"decreasing",
"in",
"argument",
"idx?"
] | def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return False | [
"def",
"is_incr",
"(",
"self",
",",
"idx",
")",
"->",
"bool",
":",
"return",
"False"
] | https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/affine/conj.py#L42-L45 |
|
blackberry/Boost | fc90c3fde129c62565c023f091eddc4a7ed9902b | tools/build/v2/tools/common.py | python | get_absolute_tool_path | (command) | Given an invocation command,
return the absolute path to the command. This works even if commnad
has not path element and is present in PATH. | Given an invocation command,
return the absolute path to the command. This works even if commnad
has not path element and is present in PATH. | [
"Given",
"an",
"invocation",
"command",
"return",
"the",
"absolute",
"path",
"to",
"the",
"command",
".",
"This",
"works",
"even",
"if",
"commnad",
"has",
"not",
"path",
"element",
"and",
"is",
"present",
"in",
"PATH",
"."
] | def get_absolute_tool_path(command):
"""
Given an invocation command,
return the absolute path to the command. This works even if commnad
has not path element and is present in PATH.
"""
if os.path.dirname(command):
return os.path.dirname(command)
else:
programs = path.programs_path()
m = path.glob(programs, [command, command + '.exe' ])
if not len(m):
if __debug_configuration:
print "Could not find:", command, "in", programs
return None
return os.path.dirname(m[0]) | [
"def",
"get_absolute_tool_path",
"(",
"command",
")",
":",
"if",
"os",
".",
"path",
".",
"dirname",
"(",
"command",
")",
":",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"command",
")",
"else",
":",
"programs",
"=",
"path",
".",
"programs_path",
"(",
")",
"m",
"=",
"path",
".",
"glob",
"(",
"programs",
",",
"[",
"command",
",",
"command",
"+",
"'.exe'",
"]",
")",
"if",
"not",
"len",
"(",
"m",
")",
":",
"if",
"__debug_configuration",
":",
"print",
"\"Could not find:\"",
",",
"command",
",",
"\"in\"",
",",
"programs",
"return",
"None",
"return",
"os",
".",
"path",
".",
"dirname",
"(",
"m",
"[",
"0",
"]",
")"
] | https://github.com/blackberry/Boost/blob/fc90c3fde129c62565c023f091eddc4a7ed9902b/tools/build/v2/tools/common.py#L340-L355 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/py/sliceshell.py | python | SlicesShell.quit | (self) | Quit the application. | Quit the application. | [
"Quit",
"the",
"application",
"."
] | def quit(self):
"""Quit the application."""
# XXX Good enough for now but later we want to send a close event.
# In the close event handler we can make sure they want to
# quit. Other applications, like PythonCard, may choose to
# hide rather than quit so we should just post the event and
# let the surrounding app decide what it wants to do.
self.write('Click on the close button to leave the application.',
type='Output') | [
"def",
"quit",
"(",
"self",
")",
":",
"# XXX Good enough for now but later we want to send a close event.",
"# In the close event handler we can make sure they want to",
"# quit. Other applications, like PythonCard, may choose to",
"# hide rather than quit so we should just post the event and",
"# let the surrounding app decide what it wants to do.",
"self",
".",
"write",
"(",
"'Click on the close button to leave the application.'",
",",
"type",
"=",
"'Output'",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/py/sliceshell.py#L980-L988 |
||
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/ndarray/ndarray.py | python | moveaxis | (tensor, source, destination) | return op.transpose(tensor, order) | Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int or sequence of int
Original position of the axes to move. Can be negative but must be unique.
destination : int or sequence of int
Destination position for each of the original axes. Can be negative but must be unique.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
>>> X = mx.nd.zeros((3, 4, 5))
>>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape
(5, 4, 3) | Moves the `source` axis into the `destination` position
while leaving the other axes in their original order | [
"Moves",
"the",
"source",
"axis",
"into",
"the",
"destination",
"position",
"while",
"leaving",
"the",
"other",
"axes",
"in",
"their",
"original",
"order"
] | def moveaxis(tensor, source, destination):
"""Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int or sequence of int
Original position of the axes to move. Can be negative but must be unique.
destination : int or sequence of int
Destination position for each of the original axes. Can be negative but must be unique.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
>>> X = mx.nd.zeros((3, 4, 5))
>>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape
(5, 4, 3)
"""
try:
source = np.core.numeric.normalize_axis_tuple(source, tensor.ndim)
except IndexError:
raise ValueError('Source should verify 0 <= source < tensor.ndim'
'Got %d' % source)
try:
destination = np.core.numeric.normalize_axis_tuple(destination, tensor.ndim)
except IndexError:
raise ValueError('Destination should verify 0 <= destination < tensor.ndim (%d).'
% tensor.ndim, 'Got %d' % destination)
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(tensor.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
return op.transpose(tensor, order) | [
"def",
"moveaxis",
"(",
"tensor",
",",
"source",
",",
"destination",
")",
":",
"try",
":",
"source",
"=",
"np",
".",
"core",
".",
"numeric",
".",
"normalize_axis_tuple",
"(",
"source",
",",
"tensor",
".",
"ndim",
")",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"'Source should verify 0 <= source < tensor.ndim'",
"'Got %d'",
"%",
"source",
")",
"try",
":",
"destination",
"=",
"np",
".",
"core",
".",
"numeric",
".",
"normalize_axis_tuple",
"(",
"destination",
",",
"tensor",
".",
"ndim",
")",
"except",
"IndexError",
":",
"raise",
"ValueError",
"(",
"'Destination should verify 0 <= destination < tensor.ndim (%d).'",
"%",
"tensor",
".",
"ndim",
",",
"'Got %d'",
"%",
"destination",
")",
"if",
"len",
"(",
"source",
")",
"!=",
"len",
"(",
"destination",
")",
":",
"raise",
"ValueError",
"(",
"'`source` and `destination` arguments must have '",
"'the same number of elements'",
")",
"order",
"=",
"[",
"n",
"for",
"n",
"in",
"range",
"(",
"tensor",
".",
"ndim",
")",
"if",
"n",
"not",
"in",
"source",
"]",
"for",
"dest",
",",
"src",
"in",
"sorted",
"(",
"zip",
"(",
"destination",
",",
"source",
")",
")",
":",
"order",
".",
"insert",
"(",
"dest",
",",
"src",
")",
"return",
"op",
".",
"transpose",
"(",
"tensor",
",",
"order",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/ndarray/ndarray.py#L3458-L3506 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/series.py | python | Series.rename | (self, index=None, **kwargs) | return super(Series, self).rename(index=index, **kwargs) | Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : bool, default True
Also copy underlying data
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : Series (new object)
See Also
--------
Series.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64 | Alter Series index labels or name. | [
"Alter",
"Series",
"index",
"labels",
"or",
"name",
"."
] | def rename(self, index=None, **kwargs):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : bool, default True
Also copy underlying data
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : Series (new object)
See Also
--------
Series.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False),
'inplace')
non_mapping = is_scalar(index) or (is_list_like(index) and
not is_dict_like(index))
if non_mapping:
return self._set_name(index, inplace=kwargs.get('inplace'))
return super(Series, self).rename(index=index, **kwargs) | [
"def",
"rename",
"(",
"self",
",",
"index",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'inplace'",
"]",
"=",
"validate_bool_kwarg",
"(",
"kwargs",
".",
"get",
"(",
"'inplace'",
",",
"False",
")",
",",
"'inplace'",
")",
"non_mapping",
"=",
"is_scalar",
"(",
"index",
")",
"or",
"(",
"is_list_like",
"(",
"index",
")",
"and",
"not",
"is_dict_like",
"(",
"index",
")",
")",
"if",
"non_mapping",
":",
"return",
"self",
".",
"_set_name",
"(",
"index",
",",
"inplace",
"=",
"kwargs",
".",
"get",
"(",
"'inplace'",
")",
")",
"return",
"super",
"(",
"Series",
",",
"self",
")",
".",
"rename",
"(",
"index",
"=",
"index",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/series.py#L3666-L3733 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/py/py/_path/local.py | python | LocalPath.pyimport | (self, modname=None, ensuresyspath=True) | return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
Special value of ensuresyspath=="importlib" is intended
purely for using in pytest, it is capable only of importing
separate .py files outside packages, e.g. for test suite
without any __init__.py file. It effectively allows having
same-named test modules in different places and offers
mild opt-in via this option. Note that it works only in
recent versions of python. | return path as an imported python module. | [
"return",
"path",
"as",
"an",
"imported",
"python",
"module",
"."
] | def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
Special value of ensuresyspath=="importlib" is intended
purely for using in pytest, it is capable only of importing
separate .py files outside packages, e.g. for test suite
without any __init__.py file. It effectively allows having
same-named test modules in different places and offers
mild opt-in via this option. Note that it works only in
recent versions of python.
"""
if not self.check():
raise py.error.ENOENT(self)
if ensuresyspath == 'importlib':
if modname is None:
modname = self.purebasename
if not ALLOW_IMPORTLIB_MODE:
raise ImportError(
"Can't use importlib due to old version of Python")
spec = importlib.util.spec_from_file_location(
modname, str(self))
if spec is None:
raise ImportError(
"Can't find module %s at location %s" %
(modname, str(self))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# be in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
ignore = os.getenv('PY_IGNORE_IMPORTMISMATCH')
if ignore != '1':
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
import types
mod = types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod | [
"def",
"pyimport",
"(",
"self",
",",
"modname",
"=",
"None",
",",
"ensuresyspath",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"check",
"(",
")",
":",
"raise",
"py",
".",
"error",
".",
"ENOENT",
"(",
"self",
")",
"if",
"ensuresyspath",
"==",
"'importlib'",
":",
"if",
"modname",
"is",
"None",
":",
"modname",
"=",
"self",
".",
"purebasename",
"if",
"not",
"ALLOW_IMPORTLIB_MODE",
":",
"raise",
"ImportError",
"(",
"\"Can't use importlib due to old version of Python\"",
")",
"spec",
"=",
"importlib",
".",
"util",
".",
"spec_from_file_location",
"(",
"modname",
",",
"str",
"(",
"self",
")",
")",
"if",
"spec",
"is",
"None",
":",
"raise",
"ImportError",
"(",
"\"Can't find module %s at location %s\"",
"%",
"(",
"modname",
",",
"str",
"(",
"self",
")",
")",
")",
"mod",
"=",
"importlib",
".",
"util",
".",
"module_from_spec",
"(",
"spec",
")",
"spec",
".",
"loader",
".",
"exec_module",
"(",
"mod",
")",
"return",
"mod",
"pkgpath",
"=",
"None",
"if",
"modname",
"is",
"None",
":",
"pkgpath",
"=",
"self",
".",
"pypkgpath",
"(",
")",
"if",
"pkgpath",
"is",
"not",
"None",
":",
"pkgroot",
"=",
"pkgpath",
".",
"dirpath",
"(",
")",
"names",
"=",
"self",
".",
"new",
"(",
"ext",
"=",
"\"\"",
")",
".",
"relto",
"(",
"pkgroot",
")",
".",
"split",
"(",
"self",
".",
"sep",
")",
"if",
"names",
"[",
"-",
"1",
"]",
"==",
"\"__init__\"",
":",
"names",
".",
"pop",
"(",
")",
"modname",
"=",
"\".\"",
".",
"join",
"(",
"names",
")",
"else",
":",
"pkgroot",
"=",
"self",
".",
"dirpath",
"(",
")",
"modname",
"=",
"self",
".",
"purebasename",
"self",
".",
"_ensuresyspath",
"(",
"ensuresyspath",
",",
"pkgroot",
")",
"__import__",
"(",
"modname",
")",
"mod",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"if",
"self",
".",
"basename",
"==",
"\"__init__.py\"",
":",
"return",
"mod",
"# we don't check anything as we might",
"# be in a namespace package ... too icky to check",
"modfile",
"=",
"mod",
".",
"__file__",
"if",
"modfile",
"[",
"-",
"4",
":",
"]",
"in",
"(",
"'.pyc'",
",",
"'.pyo'",
")",
":",
"modfile",
"=",
"modfile",
"[",
":",
"-",
"1",
"]",
"elif",
"modfile",
".",
"endswith",
"(",
"'$py.class'",
")",
":",
"modfile",
"=",
"modfile",
"[",
":",
"-",
"9",
"]",
"+",
"'.py'",
"if",
"modfile",
".",
"endswith",
"(",
"os",
".",
"path",
".",
"sep",
"+",
"\"__init__.py\"",
")",
":",
"if",
"self",
".",
"basename",
"!=",
"\"__init__.py\"",
":",
"modfile",
"=",
"modfile",
"[",
":",
"-",
"12",
"]",
"try",
":",
"issame",
"=",
"self",
".",
"samefile",
"(",
"modfile",
")",
"except",
"py",
".",
"error",
".",
"ENOENT",
":",
"issame",
"=",
"False",
"if",
"not",
"issame",
":",
"ignore",
"=",
"os",
".",
"getenv",
"(",
"'PY_IGNORE_IMPORTMISMATCH'",
")",
"if",
"ignore",
"!=",
"'1'",
":",
"raise",
"self",
".",
"ImportMismatchError",
"(",
"modname",
",",
"modfile",
",",
"self",
")",
"return",
"mod",
"else",
":",
"try",
":",
"return",
"sys",
".",
"modules",
"[",
"modname",
"]",
"except",
"KeyError",
":",
"# we have a custom modname, do a pseudo-import",
"import",
"types",
"mod",
"=",
"types",
".",
"ModuleType",
"(",
"modname",
")",
"mod",
".",
"__file__",
"=",
"str",
"(",
"self",
")",
"sys",
".",
"modules",
"[",
"modname",
"]",
"=",
"mod",
"try",
":",
"py",
".",
"builtin",
".",
"execfile",
"(",
"str",
"(",
"self",
")",
",",
"mod",
".",
"__dict__",
")",
"except",
":",
"del",
"sys",
".",
"modules",
"[",
"modname",
"]",
"raise",
"return",
"mod"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/py/py/_path/local.py#L649-L740 |
||
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | ipc/ipdl/ipdl/parser.py | python | p_Trigger | (p) | Trigger : SEND
| RECV
| CALL
| ANSWER | Trigger : SEND
| RECV
| CALL
| ANSWER | [
"Trigger",
":",
"SEND",
"|",
"RECV",
"|",
"CALL",
"|",
"ANSWER"
] | def p_Trigger(p):
"""Trigger : SEND
| RECV
| CALL
| ANSWER"""
p[0] = [ locFromTok(p, 1), Transition.nameToTrigger(p[1]) ] | [
"def",
"p_Trigger",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"[",
"locFromTok",
"(",
"p",
",",
"1",
")",
",",
"Transition",
".",
"nameToTrigger",
"(",
"p",
"[",
"1",
"]",
")",
"]"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/ipc/ipdl/ipdl/parser.py#L596-L601 |
||
MythTV/mythtv | d282a209cb8be85d036f85a62a8ec971b67d45f4 | mythtv/contrib/imports/mirobridge/mirobridge/mirobridge_interpreter_2_0_3.py | python | MiroInterpreter.do_playlist | (self, line) | playlist <name> -- Selects a playlist. | playlist <name> -- Selects a playlist. | [
"playlist",
"<name",
">",
"--",
"Selects",
"a",
"playlist",
"."
] | def do_playlist(self, line):
"""playlist <name> -- Selects a playlist."""
for tab in self.playlistTabs.getView():
if tab.obj.get_title() == line:
self.tab = tab
self.tab_changed()
return
print "Error: %s not found" % line | [
"def",
"do_playlist",
"(",
"self",
",",
"line",
")",
":",
"for",
"tab",
"in",
"self",
".",
"playlistTabs",
".",
"getView",
"(",
")",
":",
"if",
"tab",
".",
"obj",
".",
"get_title",
"(",
")",
"==",
"line",
":",
"self",
".",
"tab",
"=",
"tab",
"self",
".",
"tab_changed",
"(",
")",
"return",
"print",
"\"Error: %s not found\"",
"%",
"line"
] | https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/contrib/imports/mirobridge/mirobridge/mirobridge_interpreter_2_0_3.py#L488-L495 |
||
NVIDIAGameWorks/kaolin | e5148d05e9c1e2ce92a07881ce3593b1c5c3f166 | kaolin/io/usd.py | python | export_mesh | (file_path, scene_path='/World/Meshes/mesh_0', vertices=None, faces=None,
uvs=None, face_uvs_idx=None, face_normals=None, materials_order=None, materials=None,
up_axis='Y', time=None) | return stage | r"""Export a single mesh to USD.
Export a single mesh defined by vertices and faces and save the stage to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
scene_path (str, optional): Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``.
If no path is provided, a default path is used.
vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``.
faces (torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``.
Mesh must be homogenous (consistent number of vertices per face).
uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``.
face_uvs_idx (torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also
be specified.
face_normals (torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``.
materials_order (torch.LongTensor): of shape (N, 2)
showing the order in which materials are used over **face_uvs_idx** and the first indices
in which they start to be used. A material can be used multiple times.
materials (list of Material): a list of materials
up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
Returns:
(Usd.Stage)
Example:
>>> vertices = torch.rand(3, 3)
>>> faces = torch.tensor([[0, 1, 2]])
>>> stage = export_mesh('./new_stage.usd', vertices=vertices, faces=faces) | r"""Export a single mesh to USD. | [
"r",
"Export",
"a",
"single",
"mesh",
"to",
"USD",
"."
] | def export_mesh(file_path, scene_path='/World/Meshes/mesh_0', vertices=None, faces=None,
uvs=None, face_uvs_idx=None, face_normals=None, materials_order=None, materials=None,
up_axis='Y', time=None):
r"""Export a single mesh to USD.
Export a single mesh defined by vertices and faces and save the stage to disk.
Args:
file_path (str): Path to usd file (\*.usd, \*.usda).
scene_path (str, optional): Absolute path of mesh within the USD file scene. Must be a valid ``Sdf.Path``.
If no path is provided, a default path is used.
vertices (torch.FloatTensor, optional): Vertices with shape ``(num_vertices, 3)``.
faces (torch.LongTensor, optional): Vertex indices for each face with shape ``(num_faces, face_size)``.
Mesh must be homogenous (consistent number of vertices per face).
uvs (torch.FloatTensor, optional): of shape ``(num_uvs, 2)``.
face_uvs_idx (torch.LongTensor, optional): of shape ``(num_faces, face_size)``. If provided, `uvs` must also
be specified.
face_normals (torch.Tensor, optional): of shape ``(num_vertices, num_faces, 3)``.
materials_order (torch.LongTensor): of shape (N, 2)
showing the order in which materials are used over **face_uvs_idx** and the first indices
in which they start to be used. A material can be used multiple times.
materials (list of Material): a list of materials
up_axis (str, optional): Specifies the scene's up axis. Choose from ``['Y', 'Z']``
time (convertible to float, optional): Positive integer defining the time at which the supplied parameters
correspond to.
Returns:
(Usd.Stage)
Example:
>>> vertices = torch.rand(3, 3)
>>> faces = torch.tensor([[0, 1, 2]])
>>> stage = export_mesh('./new_stage.usd', vertices=vertices, faces=faces)
"""
assert isinstance(scene_path, str)
if time is None:
time = Usd.TimeCode.Default()
if os.path.exists(file_path):
stage = Usd.Stage.Open(file_path)
UsdGeom.SetStageUpAxis(stage, up_axis)
else:
stage = create_stage(file_path, up_axis)
add_mesh(stage, scene_path, vertices, faces, uvs, face_uvs_idx, face_normals, materials_order, materials, time=time)
stage.Save()
return stage | [
"def",
"export_mesh",
"(",
"file_path",
",",
"scene_path",
"=",
"'/World/Meshes/mesh_0'",
",",
"vertices",
"=",
"None",
",",
"faces",
"=",
"None",
",",
"uvs",
"=",
"None",
",",
"face_uvs_idx",
"=",
"None",
",",
"face_normals",
"=",
"None",
",",
"materials_order",
"=",
"None",
",",
"materials",
"=",
"None",
",",
"up_axis",
"=",
"'Y'",
",",
"time",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"scene_path",
",",
"str",
")",
"if",
"time",
"is",
"None",
":",
"time",
"=",
"Usd",
".",
"TimeCode",
".",
"Default",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"stage",
"=",
"Usd",
".",
"Stage",
".",
"Open",
"(",
"file_path",
")",
"UsdGeom",
".",
"SetStageUpAxis",
"(",
"stage",
",",
"up_axis",
")",
"else",
":",
"stage",
"=",
"create_stage",
"(",
"file_path",
",",
"up_axis",
")",
"add_mesh",
"(",
"stage",
",",
"scene_path",
",",
"vertices",
",",
"faces",
",",
"uvs",
",",
"face_uvs_idx",
",",
"face_normals",
",",
"materials_order",
",",
"materials",
",",
"time",
"=",
"time",
")",
"stage",
".",
"Save",
"(",
")",
"return",
"stage"
] | https://github.com/NVIDIAGameWorks/kaolin/blob/e5148d05e9c1e2ce92a07881ce3593b1c5c3f166/kaolin/io/usd.py#L689-L733 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/urllib3/response.py | python | HTTPResponse.read | (self, amt=None, decode_content=None, cache_content=False) | return data | Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.) | Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``. | [
"Similar",
"to",
":",
"meth",
":",
"http",
".",
"client",
".",
"HTTPResponse",
".",
"read",
"but",
"with",
"two",
"additional",
"parameters",
":",
"decode_content",
"and",
"cache_content",
"."
] | def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
): # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if self.enforce_content_length and self.length_remaining not in (
0,
None,
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data | [
"def",
"read",
"(",
"self",
",",
"amt",
"=",
"None",
",",
"decode_content",
"=",
"None",
",",
"cache_content",
"=",
"False",
")",
":",
"self",
".",
"_init_decoder",
"(",
")",
"if",
"decode_content",
"is",
"None",
":",
"decode_content",
"=",
"self",
".",
"decode_content",
"if",
"self",
".",
"_fp",
"is",
"None",
":",
"return",
"flush_decoder",
"=",
"False",
"fp_closed",
"=",
"getattr",
"(",
"self",
".",
"_fp",
",",
"\"closed\"",
",",
"False",
")",
"with",
"self",
".",
"_error_catcher",
"(",
")",
":",
"if",
"amt",
"is",
"None",
":",
"# cStringIO doesn't like amt=None",
"data",
"=",
"self",
".",
"_fp",
".",
"read",
"(",
")",
"if",
"not",
"fp_closed",
"else",
"b\"\"",
"flush_decoder",
"=",
"True",
"else",
":",
"cache_content",
"=",
"False",
"data",
"=",
"self",
".",
"_fp",
".",
"read",
"(",
"amt",
")",
"if",
"not",
"fp_closed",
"else",
"b\"\"",
"if",
"(",
"amt",
"!=",
"0",
"and",
"not",
"data",
")",
":",
"# Platform-specific: Buggy versions of Python.",
"# Close the connection when no data is returned",
"#",
"# This is redundant to what httplib/http.client _should_",
"# already do. However, versions of python released before",
"# December 15, 2012 (http://bugs.python.org/issue16298) do",
"# not properly close the connection in all cases. There is",
"# no harm in redundantly calling close.",
"self",
".",
"_fp",
".",
"close",
"(",
")",
"flush_decoder",
"=",
"True",
"if",
"self",
".",
"enforce_content_length",
"and",
"self",
".",
"length_remaining",
"not",
"in",
"(",
"0",
",",
"None",
",",
")",
":",
"# This is an edge case that httplib failed to cover due",
"# to concerns of backward compatibility. We're",
"# addressing it here to make sure IncompleteRead is",
"# raised during streaming, so all calls with incorrect",
"# Content-Length are caught.",
"raise",
"IncompleteRead",
"(",
"self",
".",
"_fp_bytes_read",
",",
"self",
".",
"length_remaining",
")",
"if",
"data",
":",
"self",
".",
"_fp_bytes_read",
"+=",
"len",
"(",
"data",
")",
"if",
"self",
".",
"length_remaining",
"is",
"not",
"None",
":",
"self",
".",
"length_remaining",
"-=",
"len",
"(",
"data",
")",
"data",
"=",
"self",
".",
"_decode",
"(",
"data",
",",
"decode_content",
",",
"flush_decoder",
")",
"if",
"cache_content",
":",
"self",
".",
"_body",
"=",
"data",
"return",
"data"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/urllib3/response.py#L481-L553 |
|
htcondor/htcondor | 4829724575176d1d6c936e4693dfd78a728569b0 | src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py | python | ISkypeEvents.Reply | (self, Command) | This event is triggered when the API replies to a command object.
@param Command: Command object.
@type Command: L{ICommand} | This event is triggered when the API replies to a command object. | [
"This",
"event",
"is",
"triggered",
"when",
"the",
"API",
"replies",
"to",
"a",
"command",
"object",
"."
] | def Reply(self, Command):
'''This event is triggered when the API replies to a command object.
@param Command: Command object.
@type Command: L{ICommand}
''' | [
"def",
"Reply",
"(",
"self",
",",
"Command",
")",
":"
] | https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/skype.py#L1625-L1630 |
||
crosslife/OpenBird | 9e0198a1a2295f03fa1e8676e216e22c9c7d380b | cocos2d/tools/bindings-generator/clang/cindex.py | python | SourceLocation.file | (self) | return self._get_instantiation()[0] | Get the file represented by this source location. | Get the file represented by this source location. | [
"Get",
"the",
"file",
"represented",
"by",
"this",
"source",
"location",
"."
] | def file(self):
"""Get the file represented by this source location."""
return self._get_instantiation()[0] | [
"def",
"file",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_instantiation",
"(",
")",
"[",
"0",
"]"
] | https://github.com/crosslife/OpenBird/blob/9e0198a1a2295f03fa1e8676e216e22c9c7d380b/cocos2d/tools/bindings-generator/clang/cindex.py#L198-L200 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/boto3/s3/inject.py | python | copy | (self, CopySource, Bucket, Key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None) | Copy an object from one S3 location to another.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Bucket: str
:param Bucket: The name of the bucket to copy to
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy. | Copy an object from one S3 location to another. | [
"Copy",
"an",
"object",
"from",
"one",
"S3",
"location",
"to",
"another",
"."
] | def copy(self, CopySource, Bucket, Key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to another.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Bucket: str
:param Bucket: The name of the bucket to copy to
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: function
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.copy(
copy_source=CopySource, bucket=Bucket, key=Key,
extra_args=ExtraArgs, subscribers=subscribers,
source_client=SourceClient)
return future.result() | [
"def",
"copy",
"(",
"self",
",",
"CopySource",
",",
"Bucket",
",",
"Key",
",",
"ExtraArgs",
"=",
"None",
",",
"Callback",
"=",
"None",
",",
"SourceClient",
"=",
"None",
",",
"Config",
"=",
"None",
")",
":",
"subscribers",
"=",
"None",
"if",
"Callback",
"is",
"not",
"None",
":",
"subscribers",
"=",
"[",
"ProgressCallbackInvoker",
"(",
"Callback",
")",
"]",
"config",
"=",
"Config",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"TransferConfig",
"(",
")",
"with",
"create_transfer_manager",
"(",
"self",
",",
"config",
")",
"as",
"manager",
":",
"future",
"=",
"manager",
".",
"copy",
"(",
"copy_source",
"=",
"CopySource",
",",
"bucket",
"=",
"Bucket",
",",
"key",
"=",
"Key",
",",
"extra_args",
"=",
"ExtraArgs",
",",
"subscribers",
"=",
"subscribers",
",",
"source_client",
"=",
"SourceClient",
")",
"return",
"future",
".",
"result",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/boto3/s3/inject.py#L317-L379 |
||
dmlc/xgboost | 2775c2a1abd4b5b759ff517617434c8b9aeb4cc0 | python-package/xgboost/core.py | python | Booster.set_attr | (self, **kwargs: Optional[str]) | Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute. | Set the attribute of the Booster. | [
"Set",
"the",
"attribute",
"of",
"the",
"Booster",
"."
] | def set_attr(self, **kwargs: Optional[str]) -> None:
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, STRING_TYPES):
raise ValueError("Set Attr only accepts string values")
value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(
self.handle, c_str(key), value)) | [
"def",
"set_attr",
"(",
"self",
",",
"*",
"*",
"kwargs",
":",
"Optional",
"[",
"str",
"]",
")",
"->",
"None",
":",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"STRING_TYPES",
")",
":",
"raise",
"ValueError",
"(",
"\"Set Attr only accepts string values\"",
")",
"value",
"=",
"c_str",
"(",
"str",
"(",
"value",
")",
")",
"_check_call",
"(",
"_LIB",
".",
"XGBoosterSetAttr",
"(",
"self",
".",
"handle",
",",
"c_str",
"(",
"key",
")",
",",
"value",
")",
")"
] | https://github.com/dmlc/xgboost/blob/2775c2a1abd4b5b759ff517617434c8b9aeb4cc0/python-package/xgboost/core.py#L1583-L1597 |
||
trailofbits/llvm-sanitizer-tutorial | d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99 | llvm/tools/clang/bindings/python/clang/cindex.py | python | TypeKind.spelling | (self) | return conf.lib.clang_getTypeKindSpelling(self.value) | Retrieve the spelling of this TypeKind. | Retrieve the spelling of this TypeKind. | [
"Retrieve",
"the",
"spelling",
"of",
"this",
"TypeKind",
"."
] | def spelling(self):
"""Retrieve the spelling of this TypeKind."""
return conf.lib.clang_getTypeKindSpelling(self.value) | [
"def",
"spelling",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getTypeKindSpelling",
"(",
"self",
".",
"value",
")"
] | https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/tools/clang/bindings/python/clang/cindex.py#L2020-L2022 |
|
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Source/bindings/scripts/code_generator_v8.py | python | CodeGeneratorBase.generate_code | (self, definitions, definition_name) | return self.generate_code_internal(definitions, definition_name) | Returns .h/.cpp code as ((path, content)...). | Returns .h/.cpp code as ((path, content)...). | [
"Returns",
".",
"h",
"/",
".",
"cpp",
"code",
"as",
"((",
"path",
"content",
")",
"...",
")",
"."
] | def generate_code(self, definitions, definition_name):
"""Returns .h/.cpp code as ((path, content)...)."""
# Set local type info
if not should_generate_code(definitions):
return set()
IdlType.set_callback_functions(definitions.callback_functions.keys())
# Resolve typedefs
self.typedef_resolver.resolve(definitions, definition_name)
return self.generate_code_internal(definitions, definition_name) | [
"def",
"generate_code",
"(",
"self",
",",
"definitions",
",",
"definition_name",
")",
":",
"# Set local type info",
"if",
"not",
"should_generate_code",
"(",
"definitions",
")",
":",
"return",
"set",
"(",
")",
"IdlType",
".",
"set_callback_functions",
"(",
"definitions",
".",
"callback_functions",
".",
"keys",
"(",
")",
")",
"# Resolve typedefs",
"self",
".",
"typedef_resolver",
".",
"resolve",
"(",
"definitions",
",",
"definition_name",
")",
"return",
"self",
".",
"generate_code_internal",
"(",
"definitions",
",",
"definition_name",
")"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Source/bindings/scripts/code_generator_v8.py#L185-L194 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | demo/DelayedResult.py | python | FrameSimpleDelayed.handleClose | (self, event) | Only needed because in demo, closing the window does not kill the
app, so worker thread continues and sends result to dead frame; normally
your app would exit so this would not happen. | Only needed because in demo, closing the window does not kill the
app, so worker thread continues and sends result to dead frame; normally
your app would exit so this would not happen. | [
"Only",
"needed",
"because",
"in",
"demo",
"closing",
"the",
"window",
"does",
"not",
"kill",
"the",
"app",
"so",
"worker",
"thread",
"continues",
"and",
"sends",
"result",
"to",
"dead",
"frame",
";",
"normally",
"your",
"app",
"would",
"exit",
"so",
"this",
"would",
"not",
"happen",
"."
] | def handleClose(self, event):
"""Only needed because in demo, closing the window does not kill the
app, so worker thread continues and sends result to dead frame; normally
your app would exit so this would not happen."""
if self.buttonAbort.IsEnabled():
self.log( "Exiting: Aborting job %s" % self.jobID )
self.abortEvent.set()
self.Destroy() | [
"def",
"handleClose",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"buttonAbort",
".",
"IsEnabled",
"(",
")",
":",
"self",
".",
"log",
"(",
"\"Exiting: Aborting job %s\"",
"%",
"self",
".",
"jobID",
")",
"self",
".",
"abortEvent",
".",
"set",
"(",
")",
"self",
".",
"Destroy",
"(",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/demo/DelayedResult.py#L66-L73 |
||
ouster-lidar/ouster_example | 13ea8e8b8a4951fb630dbc9108666995c8443bf6 | python/src/ouster/client/data.py | python | LidarPacket.header | (self, header: ColHeader) | return res | Create a view of the specified column header.
This method is deprecated. Use the ``timestamp``, ``measurement_id`` or
``status`` properties instead.
Args:
header: The column header to parse
Returns:
A numpy array containing a copy of the specified header values | Create a view of the specified column header. | [
"Create",
"a",
"view",
"of",
"the",
"specified",
"column",
"header",
"."
] | def header(self, header: ColHeader) -> np.ndarray:
"""Create a view of the specified column header.
This method is deprecated. Use the ``timestamp``, ``measurement_id`` or
``status`` properties instead.
Args:
header: The column header to parse
Returns:
A numpy array containing a copy of the specified header values
"""
warnings.warn("LidarPacket.header is deprecated", DeprecationWarning)
res = self._pf.packet_header(header, self._data)
res.flags.writeable = False
return res | [
"def",
"header",
"(",
"self",
",",
"header",
":",
"ColHeader",
")",
"->",
"np",
".",
"ndarray",
":",
"warnings",
".",
"warn",
"(",
"\"LidarPacket.header is deprecated\"",
",",
"DeprecationWarning",
")",
"res",
"=",
"self",
".",
"_pf",
".",
"packet_header",
"(",
"header",
",",
"self",
".",
"_data",
")",
"res",
".",
"flags",
".",
"writeable",
"=",
"False",
"return",
"res"
] | https://github.com/ouster-lidar/ouster_example/blob/13ea8e8b8a4951fb630dbc9108666995c8443bf6/python/src/ouster/client/data.py#L196-L212 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/tensor_array_ops.py | python | TensorArray.__init__ | (self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None) | Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor. | Construct a new TensorArray or wrap an existing TensorArray handle. | [
"Construct",
"a",
"new",
"TensorArray",
"or",
"wrap",
"an",
"existing",
"TensorArray",
"handle",
"."
] | def __init__(self,
dtype,
size=None,
dynamic_size=None,
clear_after_read=None,
tensor_array_name=None,
handle=None,
flow=None,
infer_shape=True,
element_shape=None,
colocate_with_first_write_call=True,
name=None):
"""Construct a new TensorArray or wrap an existing TensorArray handle.
A note about the parameter `name`:
The name of the `TensorArray` (even if passed in) is uniquified: each time
a new `TensorArray` is created at runtime it is assigned its own name for
the duration of the run. This avoids name collisions if a `TensorArray`
is created within a `while_loop`.
Args:
dtype: (required) data type of the TensorArray.
size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
clear_after_read: Boolean (optional, default: True). If True, clear
TensorArray values after reading them. This disables read-many
semantics, but allows early release of memory.
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
element_shape: (optional, default: None) A `TensorShape` object specifying
the shape constraints of each of the elements of the TensorArray.
Need not be fully defined.
colocate_with_first_write_call: If `True`, the TensorArray will be
colocated on the same device as the Tensor used on its first write
(write operations include `write`, `unstack`, and `split`). If `False`,
the TensorArray will be placed on the device determined by the
device context available during its initialization.
name: A name for the operation (optional).
Raises:
ValueError: if both handle and tensor_array_name are provided.
TypeError: if handle is provided but is not a Tensor.
"""
if handle is not None and tensor_array_name:
raise ValueError(
"Cannot construct with both handle and tensor_array_name")
if handle is not None and not isinstance(handle, ops.Tensor):
raise TypeError("Handle must be a Tensor")
if handle is None and size is None:
raise ValueError("Size must be provided if handle is not provided")
if handle is not None and size is not None:
raise ValueError("Cannot provide both a handle and size "
"at the same time")
if handle is not None and element_shape is not None:
raise ValueError("Cannot provide both a handle and element_shape "
"at the same time")
if handle is not None and dynamic_size is not None:
raise ValueError("Cannot provide both a handle and dynamic_size "
"at the same time")
if handle is not None and clear_after_read is not None:
raise ValueError("Cannot provide both a handle and clear_after_read "
"at the same time")
if clear_after_read is None:
clear_after_read = True
dynamic_size = dynamic_size or False
self._dtype = dtype
# Used to keep track of what tensors the TensorArray should be
# colocated with. We choose to colocate the TensorArray with the
# first tensor written to it.
self._colocate_with_first_write_call = colocate_with_first_write_call
if colocate_with_first_write_call:
self._colocate_with = []
else:
self._colocate_with = None
# Record the current static shape for the array elements. The element
# shape is defined either by `element_shape` or the shape of the tensor
# of the first write. If `infer_shape` is true, all writes checks for
# shape equality.
if element_shape is None:
self._infer_shape = infer_shape
self._element_shape = []
else:
self._infer_shape = True
self._element_shape = [tensor_shape.TensorShape(element_shape)]
with ops.name_scope(name, "TensorArray", [handle, size, flow]) as scope:
if handle is not None:
self._handle = handle
if flow is None:
raise ValueError("flow must not be None if handle is not None.")
self._flow = flow
else:
# Construct the TensorArray with an empty device. The first
# write into the TensorArray from a Tensor with a set device
# will retroactively set the device value of this op.
def create():
return gen_data_flow_ops._tensor_array_v3(
dtype=dtype,
size=size,
element_shape=element_shape,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
tensor_array_name=tensor_array_name,
name=scope)
if colocate_with_first_write_call:
with ops.device(None), ops.colocate_with(None, ignore_existing=True):
self._handle, self._flow = create()
else:
self._handle, self._flow = create() | [
"def",
"__init__",
"(",
"self",
",",
"dtype",
",",
"size",
"=",
"None",
",",
"dynamic_size",
"=",
"None",
",",
"clear_after_read",
"=",
"None",
",",
"tensor_array_name",
"=",
"None",
",",
"handle",
"=",
"None",
",",
"flow",
"=",
"None",
",",
"infer_shape",
"=",
"True",
",",
"element_shape",
"=",
"None",
",",
"colocate_with_first_write_call",
"=",
"True",
",",
"name",
"=",
"None",
")",
":",
"if",
"handle",
"is",
"not",
"None",
"and",
"tensor_array_name",
":",
"raise",
"ValueError",
"(",
"\"Cannot construct with both handle and tensor_array_name\"",
")",
"if",
"handle",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"handle",
",",
"ops",
".",
"Tensor",
")",
":",
"raise",
"TypeError",
"(",
"\"Handle must be a Tensor\"",
")",
"if",
"handle",
"is",
"None",
"and",
"size",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Size must be provided if handle is not provided\"",
")",
"if",
"handle",
"is",
"not",
"None",
"and",
"size",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot provide both a handle and size \"",
"\"at the same time\"",
")",
"if",
"handle",
"is",
"not",
"None",
"and",
"element_shape",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot provide both a handle and element_shape \"",
"\"at the same time\"",
")",
"if",
"handle",
"is",
"not",
"None",
"and",
"dynamic_size",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot provide both a handle and dynamic_size \"",
"\"at the same time\"",
")",
"if",
"handle",
"is",
"not",
"None",
"and",
"clear_after_read",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot provide both a handle and clear_after_read \"",
"\"at the same time\"",
")",
"if",
"clear_after_read",
"is",
"None",
":",
"clear_after_read",
"=",
"True",
"dynamic_size",
"=",
"dynamic_size",
"or",
"False",
"self",
".",
"_dtype",
"=",
"dtype",
"# Used to keep track of what tensors the TensorArray should be",
"# colocated with. We choose to colocate the TensorArray with the",
"# first tensor written to it.",
"self",
".",
"_colocate_with_first_write_call",
"=",
"colocate_with_first_write_call",
"if",
"colocate_with_first_write_call",
":",
"self",
".",
"_colocate_with",
"=",
"[",
"]",
"else",
":",
"self",
".",
"_colocate_with",
"=",
"None",
"# Record the current static shape for the array elements. The element",
"# shape is defined either by `element_shape` or the shape of the tensor",
"# of the first write. If `infer_shape` is true, all writes checks for",
"# shape equality.",
"if",
"element_shape",
"is",
"None",
":",
"self",
".",
"_infer_shape",
"=",
"infer_shape",
"self",
".",
"_element_shape",
"=",
"[",
"]",
"else",
":",
"self",
".",
"_infer_shape",
"=",
"True",
"self",
".",
"_element_shape",
"=",
"[",
"tensor_shape",
".",
"TensorShape",
"(",
"element_shape",
")",
"]",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"TensorArray\"",
",",
"[",
"handle",
",",
"size",
",",
"flow",
"]",
")",
"as",
"scope",
":",
"if",
"handle",
"is",
"not",
"None",
":",
"self",
".",
"_handle",
"=",
"handle",
"if",
"flow",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"flow must not be None if handle is not None.\"",
")",
"self",
".",
"_flow",
"=",
"flow",
"else",
":",
"# Construct the TensorArray with an empty device. The first",
"# write into the TensorArray from a Tensor with a set device",
"# will retroactively set the device value of this op.",
"def",
"create",
"(",
")",
":",
"return",
"gen_data_flow_ops",
".",
"_tensor_array_v3",
"(",
"dtype",
"=",
"dtype",
",",
"size",
"=",
"size",
",",
"element_shape",
"=",
"element_shape",
",",
"dynamic_size",
"=",
"dynamic_size",
",",
"clear_after_read",
"=",
"clear_after_read",
",",
"tensor_array_name",
"=",
"tensor_array_name",
",",
"name",
"=",
"scope",
")",
"if",
"colocate_with_first_write_call",
":",
"with",
"ops",
".",
"device",
"(",
"None",
")",
",",
"ops",
".",
"colocate_with",
"(",
"None",
",",
"ignore_existing",
"=",
"True",
")",
":",
"self",
".",
"_handle",
",",
"self",
".",
"_flow",
"=",
"create",
"(",
")",
"else",
":",
"self",
".",
"_handle",
",",
"self",
".",
"_flow",
"=",
"create",
"(",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/tensor_array_ops.py#L48-L169 |
||
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/nntplib.py | python | NNTP.quit | (self) | return resp | Process a QUIT command and close the socket. Returns:
- resp: server response if successful | Process a QUIT command and close the socket. Returns:
- resp: server response if successful | [
"Process",
"a",
"QUIT",
"command",
"and",
"close",
"the",
"socket",
".",
"Returns",
":",
"-",
"resp",
":",
"server",
"response",
"if",
"successful"
] | def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp | [
"def",
"quit",
"(",
"self",
")",
":",
"resp",
"=",
"self",
".",
"shortcmd",
"(",
"'QUIT'",
")",
"self",
".",
"file",
".",
"close",
"(",
")",
"self",
".",
"sock",
".",
"close",
"(",
")",
"del",
"self",
".",
"file",
",",
"self",
".",
"sock",
"return",
"resp"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/nntplib.py#L595-L603 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/variables.py | python | RefVariable.op | (self) | return self._variable.op | The `Operation` of this variable. | The `Operation` of this variable. | [
"The",
"Operation",
"of",
"this",
"variable",
"."
] | def op(self):
"""The `Operation` of this variable."""
return self._variable.op | [
"def",
"op",
"(",
"self",
")",
":",
"return",
"self",
".",
"_variable",
".",
"op"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/variables.py#L2568-L2570 |
|
apple/swift-clang | d7403439fc6641751840b723e7165fb02f52db95 | tools/scan-build-py/libscanbuild/analyze.py | python | run_analyzer_parallel | (args) | Runs the analyzer against the given compilation database. | Runs the analyzer against the given compilation database. | [
"Runs",
"the",
"analyzer",
"against",
"the",
"given",
"compilation",
"database",
"."
] | def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename):
""" Return true when any excluded directory prefix the filename. """
return any(re.match(r'^' + directory, filename)
for directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug,
'ctu': get_ctu_config_from_args(args)
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(cmd['file']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join() | [
"def",
"run_analyzer_parallel",
"(",
"args",
")",
":",
"def",
"exclude",
"(",
"filename",
")",
":",
"\"\"\" Return true when any excluded directory prefix the filename. \"\"\"",
"return",
"any",
"(",
"re",
".",
"match",
"(",
"r'^'",
"+",
"directory",
",",
"filename",
")",
"for",
"directory",
"in",
"args",
".",
"excludes",
")",
"consts",
"=",
"{",
"'clang'",
":",
"args",
".",
"clang",
",",
"'output_dir'",
":",
"args",
".",
"output",
",",
"'output_format'",
":",
"args",
".",
"output_format",
",",
"'output_failures'",
":",
"args",
".",
"output_failures",
",",
"'direct_args'",
":",
"analyzer_params",
"(",
"args",
")",
",",
"'force_debug'",
":",
"args",
".",
"force_debug",
",",
"'ctu'",
":",
"get_ctu_config_from_args",
"(",
"args",
")",
"}",
"logging",
".",
"debug",
"(",
"'run analyzer against compilation database'",
")",
"with",
"open",
"(",
"args",
".",
"cdb",
",",
"'r'",
")",
"as",
"handle",
":",
"generator",
"=",
"(",
"dict",
"(",
"cmd",
",",
"*",
"*",
"consts",
")",
"for",
"cmd",
"in",
"json",
".",
"load",
"(",
"handle",
")",
"if",
"not",
"exclude",
"(",
"cmd",
"[",
"'file'",
"]",
")",
")",
"# when verbose output requested execute sequentially",
"pool",
"=",
"multiprocessing",
".",
"Pool",
"(",
"1",
"if",
"args",
".",
"verbose",
">",
"2",
"else",
"None",
")",
"for",
"current",
"in",
"pool",
".",
"imap_unordered",
"(",
"run",
",",
"generator",
")",
":",
"if",
"current",
"is",
"not",
"None",
":",
"# display error message from the static analyzer",
"for",
"line",
"in",
"current",
"[",
"'error_output'",
"]",
":",
"logging",
".",
"info",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")"
] | https://github.com/apple/swift-clang/blob/d7403439fc6641751840b723e7165fb02f52db95/tools/scan-build-py/libscanbuild/analyze.py#L206-L236 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Util/asn1.py | python | DerSetOf.__init__ | (self, startSet=None, implicit=None) | Initialize the DER object as a SET OF.
:Parameters:
startSet : container
The initial set of integers or DER encoded objects.
implicit : integer
The IMPLICIT tag to use for the encoded object.
It overrides the universal tag for SET OF (17). | Initialize the DER object as a SET OF. | [
"Initialize",
"the",
"DER",
"object",
"as",
"a",
"SET",
"OF",
"."
] | def __init__(self, startSet=None, implicit=None):
"""Initialize the DER object as a SET OF.
:Parameters:
startSet : container
The initial set of integers or DER encoded objects.
implicit : integer
The IMPLICIT tag to use for the encoded object.
It overrides the universal tag for SET OF (17).
"""
DerObject.__init__(self, 0x11, b'', implicit, True)
self._seq = []
# All elements must be of the same type (and therefore have the
# same leading octet)
self._elemOctet = None
if startSet:
for e in startSet:
self.add(e) | [
"def",
"__init__",
"(",
"self",
",",
"startSet",
"=",
"None",
",",
"implicit",
"=",
"None",
")",
":",
"DerObject",
".",
"__init__",
"(",
"self",
",",
"0x11",
",",
"b''",
",",
"implicit",
",",
"True",
")",
"self",
".",
"_seq",
"=",
"[",
"]",
"# All elements must be of the same type (and therefore have the",
"# same leading octet)",
"self",
".",
"_elemOctet",
"=",
"None",
"if",
"startSet",
":",
"for",
"e",
"in",
"startSet",
":",
"self",
".",
"add",
"(",
"e",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Util/asn1.py#L818-L837 |
||
apiaryio/drafter | 4634ebd07f6c6f257cc656598ccd535492fdfb55 | tools/gyp/pylib/gyp/common.py | python | DeepDependencyTargets | (target_dicts, roots) | return list(dependencies - set(roots)) | Returns the recursive list of target dependencies. | Returns the recursive list of target dependencies. | [
"Returns",
"the",
"recursive",
"list",
"of",
"target",
"dependencies",
"."
] | def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots)) | [
"def",
"DeepDependencyTargets",
"(",
"target_dicts",
",",
"roots",
")",
":",
"dependencies",
"=",
"set",
"(",
")",
"pending",
"=",
"set",
"(",
"roots",
")",
"while",
"pending",
":",
"# Pluck out one.",
"r",
"=",
"pending",
".",
"pop",
"(",
")",
"# Skip if visited already.",
"if",
"r",
"in",
"dependencies",
":",
"continue",
"# Add it.",
"dependencies",
".",
"add",
"(",
"r",
")",
"# Add its children.",
"spec",
"=",
"target_dicts",
"[",
"r",
"]",
"pending",
".",
"update",
"(",
"set",
"(",
"spec",
".",
"get",
"(",
"'dependencies'",
",",
"[",
"]",
")",
")",
")",
"pending",
".",
"update",
"(",
"set",
"(",
"spec",
".",
"get",
"(",
"'dependencies_original'",
",",
"[",
"]",
")",
")",
")",
"return",
"list",
"(",
"dependencies",
"-",
"set",
"(",
"roots",
")",
")"
] | https://github.com/apiaryio/drafter/blob/4634ebd07f6c6f257cc656598ccd535492fdfb55/tools/gyp/pylib/gyp/common.py#L296-L312 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/threading.py | python | Barrier.abort | (self) | Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised. | Place the barrier into a 'broken' state. | [
"Place",
"the",
"barrier",
"into",
"a",
"broken",
"state",
"."
] | def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break() | [
"def",
"abort",
"(",
"self",
")",
":",
"with",
"self",
".",
"_cond",
":",
"self",
".",
"_break",
"(",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/threading.py#L685-L693 |
||
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/ops/rnn.py | python | _dynamic_rnn_loop | (cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None) | return (final_outputs, final_state) | Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs. | Internal implementation of Dynamic RNN. | [
"Internal",
"implementation",
"of",
"Dynamic",
"RNN",
"."
] | def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.pack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unpack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.pack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state) | [
"def",
"_dynamic_rnn_loop",
"(",
"cell",
",",
"inputs",
",",
"initial_state",
",",
"parallel_iterations",
",",
"swap_memory",
",",
"sequence_length",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"state",
"=",
"initial_state",
"assert",
"isinstance",
"(",
"parallel_iterations",
",",
"int",
")",
",",
"\"parallel_iterations must be int\"",
"state_size",
"=",
"cell",
".",
"state_size",
"flat_input",
"=",
"nest",
".",
"flatten",
"(",
"inputs",
")",
"flat_output_size",
"=",
"nest",
".",
"flatten",
"(",
"cell",
".",
"output_size",
")",
"# Construct an initial output",
"input_shape",
"=",
"array_ops",
".",
"shape",
"(",
"flat_input",
"[",
"0",
"]",
")",
"time_steps",
"=",
"input_shape",
"[",
"0",
"]",
"batch_size",
"=",
"input_shape",
"[",
"1",
"]",
"inputs_got_shape",
"=",
"tuple",
"(",
"input_",
".",
"get_shape",
"(",
")",
".",
"with_rank_at_least",
"(",
"3",
")",
"for",
"input_",
"in",
"flat_input",
")",
"const_time_steps",
",",
"const_batch_size",
"=",
"inputs_got_shape",
"[",
"0",
"]",
".",
"as_list",
"(",
")",
"[",
":",
"2",
"]",
"for",
"shape",
"in",
"inputs_got_shape",
":",
"if",
"not",
"shape",
"[",
"2",
":",
"]",
".",
"is_fully_defined",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Input size (depth of inputs) must be accessible via shape inference,\"",
"\" but saw value None.\"",
")",
"got_time_steps",
"=",
"shape",
"[",
"0",
"]",
".",
"value",
"got_batch_size",
"=",
"shape",
"[",
"1",
"]",
".",
"value",
"if",
"const_time_steps",
"!=",
"got_time_steps",
":",
"raise",
"ValueError",
"(",
"\"Time steps is not the same for all the elements in the input in a \"",
"\"batch.\"",
")",
"if",
"const_batch_size",
"!=",
"got_batch_size",
":",
"raise",
"ValueError",
"(",
"\"Batch_size is not the same for all the elements in the input.\"",
")",
"# Prepare dynamic conditional copying of state & output",
"def",
"_create_zero_arrays",
"(",
"size",
")",
":",
"size",
"=",
"_state_size_with_prefix",
"(",
"size",
",",
"prefix",
"=",
"[",
"batch_size",
"]",
")",
"return",
"array_ops",
".",
"zeros",
"(",
"array_ops",
".",
"pack",
"(",
"size",
")",
",",
"_infer_state_dtype",
"(",
"dtype",
",",
"state",
")",
")",
"flat_zero_output",
"=",
"tuple",
"(",
"_create_zero_arrays",
"(",
"output",
")",
"for",
"output",
"in",
"flat_output_size",
")",
"zero_output",
"=",
"nest",
".",
"pack_sequence_as",
"(",
"structure",
"=",
"cell",
".",
"output_size",
",",
"flat_sequence",
"=",
"flat_zero_output",
")",
"if",
"sequence_length",
"is",
"not",
"None",
":",
"min_sequence_length",
"=",
"math_ops",
".",
"reduce_min",
"(",
"sequence_length",
")",
"max_sequence_length",
"=",
"math_ops",
".",
"reduce_max",
"(",
"sequence_length",
")",
"time",
"=",
"array_ops",
".",
"constant",
"(",
"0",
",",
"dtype",
"=",
"dtypes",
".",
"int32",
",",
"name",
"=",
"\"time\"",
")",
"with",
"ops",
".",
"name_scope",
"(",
"\"dynamic_rnn\"",
")",
"as",
"scope",
":",
"base_name",
"=",
"scope",
"def",
"_create_ta",
"(",
"name",
",",
"dtype",
")",
":",
"return",
"tensor_array_ops",
".",
"TensorArray",
"(",
"dtype",
"=",
"dtype",
",",
"size",
"=",
"time_steps",
",",
"tensor_array_name",
"=",
"base_name",
"+",
"name",
")",
"output_ta",
"=",
"tuple",
"(",
"_create_ta",
"(",
"\"output_%d\"",
"%",
"i",
",",
"_infer_state_dtype",
"(",
"dtype",
",",
"state",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"flat_output_size",
")",
")",
")",
"input_ta",
"=",
"tuple",
"(",
"_create_ta",
"(",
"\"input_%d\"",
"%",
"i",
",",
"flat_input",
"[",
"0",
"]",
".",
"dtype",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"flat_input",
")",
")",
")",
"input_ta",
"=",
"tuple",
"(",
"ta",
".",
"unpack",
"(",
"input_",
")",
"for",
"ta",
",",
"input_",
"in",
"zip",
"(",
"input_ta",
",",
"flat_input",
")",
")",
"def",
"_time_step",
"(",
"time",
",",
"output_ta_t",
",",
"state",
")",
":",
"\"\"\"Take a time step of the dynamic RNN.\n\n Args:\n time: int32 scalar Tensor.\n output_ta_t: List of `TensorArray`s that represent the output.\n state: nested tuple of vector tensors that represent the state.\n\n Returns:\n The tuple (time + 1, output_ta_t with updated flow, new_state).\n \"\"\"",
"input_t",
"=",
"tuple",
"(",
"ta",
".",
"read",
"(",
"time",
")",
"for",
"ta",
"in",
"input_ta",
")",
"# Restore some shape information",
"for",
"input_",
",",
"shape",
"in",
"zip",
"(",
"input_t",
",",
"inputs_got_shape",
")",
":",
"input_",
".",
"set_shape",
"(",
"shape",
"[",
"1",
":",
"]",
")",
"input_t",
"=",
"nest",
".",
"pack_sequence_as",
"(",
"structure",
"=",
"inputs",
",",
"flat_sequence",
"=",
"input_t",
")",
"call_cell",
"=",
"lambda",
":",
"cell",
"(",
"input_t",
",",
"state",
")",
"if",
"sequence_length",
"is",
"not",
"None",
":",
"(",
"output",
",",
"new_state",
")",
"=",
"_rnn_step",
"(",
"time",
"=",
"time",
",",
"sequence_length",
"=",
"sequence_length",
",",
"min_sequence_length",
"=",
"min_sequence_length",
",",
"max_sequence_length",
"=",
"max_sequence_length",
",",
"zero_output",
"=",
"zero_output",
",",
"state",
"=",
"state",
",",
"call_cell",
"=",
"call_cell",
",",
"state_size",
"=",
"state_size",
",",
"skip_conditionals",
"=",
"True",
")",
"else",
":",
"(",
"output",
",",
"new_state",
")",
"=",
"call_cell",
"(",
")",
"# Pack state if using state tuples",
"output",
"=",
"nest",
".",
"flatten",
"(",
"output",
")",
"output_ta_t",
"=",
"tuple",
"(",
"ta",
".",
"write",
"(",
"time",
",",
"out",
")",
"for",
"ta",
",",
"out",
"in",
"zip",
"(",
"output_ta_t",
",",
"output",
")",
")",
"return",
"(",
"time",
"+",
"1",
",",
"output_ta_t",
",",
"new_state",
")",
"_",
",",
"output_final_ta",
",",
"final_state",
"=",
"control_flow_ops",
".",
"while_loop",
"(",
"cond",
"=",
"lambda",
"time",
",",
"*",
"_",
":",
"time",
"<",
"time_steps",
",",
"body",
"=",
"_time_step",
",",
"loop_vars",
"=",
"(",
"time",
",",
"output_ta",
",",
"state",
")",
",",
"parallel_iterations",
"=",
"parallel_iterations",
",",
"swap_memory",
"=",
"swap_memory",
")",
"# Unpack final output if not using output tuples.",
"final_outputs",
"=",
"tuple",
"(",
"ta",
".",
"pack",
"(",
")",
"for",
"ta",
"in",
"output_final_ta",
")",
"# Restore some shape information",
"for",
"output",
",",
"output_size",
"in",
"zip",
"(",
"final_outputs",
",",
"flat_output_size",
")",
":",
"shape",
"=",
"_state_size_with_prefix",
"(",
"output_size",
",",
"prefix",
"=",
"[",
"const_time_steps",
",",
"const_batch_size",
"]",
")",
"output",
".",
"set_shape",
"(",
"shape",
")",
"final_outputs",
"=",
"nest",
".",
"pack_sequence_as",
"(",
"structure",
"=",
"cell",
".",
"output_size",
",",
"flat_sequence",
"=",
"final_outputs",
")",
"return",
"(",
"final_outputs",
",",
"final_state",
")"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/ops/rnn.py#L864-L1029 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/docutils/utils/math/math2html.py | python | LimitsProcessor.checklimits | (self, contents, index) | return self.checkscript(contents, index + 1) | Check if the current position has a limits command. | Check if the current position has a limits command. | [
"Check",
"if",
"the",
"current",
"position",
"has",
"a",
"limits",
"command",
"."
] | def checklimits(self, contents, index):
"Check if the current position has a limits command."
if not DocumentParameters.displaymode:
return False
if self.checkcommand(contents, index + 1, LimitPreviousCommand):
self.limitsahead(contents, index)
return False
if not isinstance(contents[index], LimitCommand):
return False
return self.checkscript(contents, index + 1) | [
"def",
"checklimits",
"(",
"self",
",",
"contents",
",",
"index",
")",
":",
"if",
"not",
"DocumentParameters",
".",
"displaymode",
":",
"return",
"False",
"if",
"self",
".",
"checkcommand",
"(",
"contents",
",",
"index",
"+",
"1",
",",
"LimitPreviousCommand",
")",
":",
"self",
".",
"limitsahead",
"(",
"contents",
",",
"index",
")",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"contents",
"[",
"index",
"]",
",",
"LimitCommand",
")",
":",
"return",
"False",
"return",
"self",
".",
"checkscript",
"(",
"contents",
",",
"index",
"+",
"1",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/utils/math/math2html.py#L4677-L4686 |
|
bh107/bohrium | 5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | bridge/npbackend/bohrium/user_kernel.py | python | gen_function_prototype | (operand_list, operand_name_list=None) | return "%s)\n" % ret[:-2] | Returns the `execute() definition based on the arrays in `operand_list` | Returns the `execute() definition based on the arrays in `operand_list` | [
"Returns",
"the",
"execute",
"()",
"definition",
"based",
"on",
"the",
"arrays",
"in",
"operand_list"
] | def gen_function_prototype(operand_list, operand_name_list=None):
""" Returns the `execute() definition based on the arrays in `operand_list` """
dtype_list = [dtype_to_c99(t.dtype) for t in operand_list]
ret = "#include <stdint.h>\n#include <complex.h>\n"
ret += "void execute("
for i in range(len(dtype_list)):
ret += "%s *" % dtype_list[i]
if operand_name_list is None:
ret += "a%d, " % i
else:
ret += "%s, " % operand_name_list[i]
return "%s)\n" % ret[:-2] | [
"def",
"gen_function_prototype",
"(",
"operand_list",
",",
"operand_name_list",
"=",
"None",
")",
":",
"dtype_list",
"=",
"[",
"dtype_to_c99",
"(",
"t",
".",
"dtype",
")",
"for",
"t",
"in",
"operand_list",
"]",
"ret",
"=",
"\"#include <stdint.h>\\n#include <complex.h>\\n\"",
"ret",
"+=",
"\"void execute(\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dtype_list",
")",
")",
":",
"ret",
"+=",
"\"%s *\"",
"%",
"dtype_list",
"[",
"i",
"]",
"if",
"operand_name_list",
"is",
"None",
":",
"ret",
"+=",
"\"a%d, \"",
"%",
"i",
"else",
":",
"ret",
"+=",
"\"%s, \"",
"%",
"operand_name_list",
"[",
"i",
"]",
"return",
"\"%s)\\n\"",
"%",
"ret",
"[",
":",
"-",
"2",
"]"
] | https://github.com/bh107/bohrium/blob/5b83e7117285fefc7779ed0e9acb0f8e74c7e068/bridge/npbackend/bohrium/user_kernel.py#L106-L117 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/Jinja2/py2/jinja2/lexer.py | python | describe_token_expr | (expr) | return _describe_token_type(type) | Like `describe_token` but for token expressions. | Like `describe_token` but for token expressions. | [
"Like",
"describe_token",
"but",
"for",
"token",
"expressions",
"."
] | def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ":" in expr:
type, value = expr.split(":", 1)
if type == TOKEN_NAME:
return value
else:
type = expr
return _describe_token_type(type) | [
"def",
"describe_token_expr",
"(",
"expr",
")",
":",
"if",
"\":\"",
"in",
"expr",
":",
"type",
",",
"value",
"=",
"expr",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"if",
"type",
"==",
"TOKEN_NAME",
":",
"return",
"value",
"else",
":",
"type",
"=",
"expr",
"return",
"_describe_token_type",
"(",
"type",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/Jinja2/py2/jinja2/lexer.py#L187-L195 |
|
GoSSIP-SJTU/Armariris | ad5d868482956b2194a77b39c8d543c7c2318200 | bindings/python/llvm/object.py | python | ObjectFile.get_sections | (self, cache=False) | Obtain the sections in this object file.
This is a generator for llvm.object.Section instances.
Sections are exposed as limited-use objects. See the module's
documentation on iterators for more. | Obtain the sections in this object file. | [
"Obtain",
"the",
"sections",
"in",
"this",
"object",
"file",
"."
] | def get_sections(self, cache=False):
"""Obtain the sections in this object file.
This is a generator for llvm.object.Section instances.
Sections are exposed as limited-use objects. See the module's
documentation on iterators for more.
"""
sections = lib.LLVMGetSections(self)
last = None
while True:
if lib.LLVMIsSectionIteratorAtEnd(self, sections):
break
last = Section(sections)
if cache:
last.cache()
yield last
lib.LLVMMoveToNextSection(sections)
last.expire()
if last is not None:
last.expire()
lib.LLVMDisposeSectionIterator(sections) | [
"def",
"get_sections",
"(",
"self",
",",
"cache",
"=",
"False",
")",
":",
"sections",
"=",
"lib",
".",
"LLVMGetSections",
"(",
"self",
")",
"last",
"=",
"None",
"while",
"True",
":",
"if",
"lib",
".",
"LLVMIsSectionIteratorAtEnd",
"(",
"self",
",",
"sections",
")",
":",
"break",
"last",
"=",
"Section",
"(",
"sections",
")",
"if",
"cache",
":",
"last",
".",
"cache",
"(",
")",
"yield",
"last",
"lib",
".",
"LLVMMoveToNextSection",
"(",
"sections",
")",
"last",
".",
"expire",
"(",
")",
"if",
"last",
"is",
"not",
"None",
":",
"last",
".",
"expire",
"(",
")",
"lib",
".",
"LLVMDisposeSectionIterator",
"(",
"sections",
")"
] | https://github.com/GoSSIP-SJTU/Armariris/blob/ad5d868482956b2194a77b39c8d543c7c2318200/bindings/python/llvm/object.py#L123-L149 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/debug/cli/base_ui.py | python | BaseUI.__init__ | (self, on_ui_exit=None, config=None) | Constructor of the base class.
Args:
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations. | Constructor of the base class. | [
"Constructor",
"of",
"the",
"base",
"class",
"."
] | def __init__(self, on_ui_exit=None, config=None):
"""Constructor of the base class.
Args:
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig()` carrying user-facing
configurations.
"""
self._on_ui_exit = on_ui_exit
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._config = config or cli_config.CLIConfig()
self._config_argparser = argparse.ArgumentParser(
description="config command", usage=argparse.SUPPRESS)
subparsers = self._config_argparser.add_subparsers()
set_parser = subparsers.add_parser("set")
set_parser.add_argument("property_name", type=str)
set_parser.add_argument("property_value", type=str)
set_parser = subparsers.add_parser("show")
self.register_command_handler(
"config",
self._config_command_handler,
self._config_argparser.format_help(),
prefix_aliases=["cfg"]) | [
"def",
"__init__",
"(",
"self",
",",
"on_ui_exit",
"=",
"None",
",",
"config",
"=",
"None",
")",
":",
"self",
".",
"_on_ui_exit",
"=",
"on_ui_exit",
"self",
".",
"_command_handler_registry",
"=",
"(",
"debugger_cli_common",
".",
"CommandHandlerRegistry",
"(",
")",
")",
"self",
".",
"_tab_completion_registry",
"=",
"debugger_cli_common",
".",
"TabCompletionRegistry",
"(",
")",
"# Create top-level tab-completion context and register the exit and help",
"# commands.",
"self",
".",
"_tab_completion_registry",
".",
"register_tab_comp_context",
"(",
"[",
"\"\"",
"]",
",",
"self",
".",
"CLI_EXIT_COMMANDS",
"+",
"[",
"debugger_cli_common",
".",
"CommandHandlerRegistry",
".",
"HELP_COMMAND",
"]",
"+",
"debugger_cli_common",
".",
"CommandHandlerRegistry",
".",
"HELP_COMMAND_ALIASES",
")",
"self",
".",
"_config",
"=",
"config",
"or",
"cli_config",
".",
"CLIConfig",
"(",
")",
"self",
".",
"_config_argparser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"config command\"",
",",
"usage",
"=",
"argparse",
".",
"SUPPRESS",
")",
"subparsers",
"=",
"self",
".",
"_config_argparser",
".",
"add_subparsers",
"(",
")",
"set_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"set\"",
")",
"set_parser",
".",
"add_argument",
"(",
"\"property_name\"",
",",
"type",
"=",
"str",
")",
"set_parser",
".",
"add_argument",
"(",
"\"property_value\"",
",",
"type",
"=",
"str",
")",
"set_parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"show\"",
")",
"self",
".",
"register_command_handler",
"(",
"\"config\"",
",",
"self",
".",
"_config_command_handler",
",",
"self",
".",
"_config_argparser",
".",
"format_help",
"(",
")",
",",
"prefix_aliases",
"=",
"[",
"\"cfg\"",
"]",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/debug/cli/base_ui.py#L35-L70 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/psutil/_pswindows.py | python | py2_strencode | (s) | Encode a unicode string to a byte string by using the default fs
encoding + "replace" error handler. | Encode a unicode string to a byte string by using the default fs
encoding + "replace" error handler. | [
"Encode",
"a",
"unicode",
"string",
"to",
"a",
"byte",
"string",
"by",
"using",
"the",
"default",
"fs",
"encoding",
"+",
"replace",
"error",
"handler",
"."
] | def py2_strencode(s):
"""Encode a unicode string to a byte string by using the default fs
encoding + "replace" error handler.
"""
if PY3:
return s
else:
if isinstance(s, str):
return s
else:
return s.encode(ENCODING, ENCODING_ERRS) | [
"def",
"py2_strencode",
"(",
"s",
")",
":",
"if",
"PY3",
":",
"return",
"s",
"else",
":",
"if",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"return",
"s",
"else",
":",
"return",
"s",
".",
"encode",
"(",
"ENCODING",
",",
"ENCODING_ERRS",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/psutil/_pswindows.py#L205-L215 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | contrib/gizmos/gtk/gizmos.py | python | TreeListCtrl.GetChildrenCount | (*args, **kwargs) | return _gizmos.TreeListCtrl_GetChildrenCount(*args, **kwargs) | GetChildrenCount(self, TreeItemId item, bool recursively=True) -> size_t | GetChildrenCount(self, TreeItemId item, bool recursively=True) -> size_t | [
"GetChildrenCount",
"(",
"self",
"TreeItemId",
"item",
"bool",
"recursively",
"=",
"True",
")",
"-",
">",
"size_t"
] | def GetChildrenCount(*args, **kwargs):
"""GetChildrenCount(self, TreeItemId item, bool recursively=True) -> size_t"""
return _gizmos.TreeListCtrl_GetChildrenCount(*args, **kwargs) | [
"def",
"GetChildrenCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gizmos",
".",
"TreeListCtrl_GetChildrenCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/gtk/gizmos.py#L742-L744 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py | python | _allreduce_fut | (
process_group: dist.ProcessGroup, tensor: torch.Tensor
) | return (
dist.all_reduce(tensor, group=group_to_use, async_op=True)
.get_future()
.then(lambda fut: fut.value()[0])
) | Averages the input gradient tensor by allreduce and returns a future. | Averages the input gradient tensor by allreduce and returns a future. | [
"Averages",
"the",
"input",
"gradient",
"tensor",
"by",
"allreduce",
"and",
"returns",
"a",
"future",
"."
] | def _allreduce_fut(
process_group: dist.ProcessGroup, tensor: torch.Tensor
) -> torch.futures.Future[torch.Tensor]:
"Averages the input gradient tensor by allreduce and returns a future."
group_to_use = process_group if process_group is not None else dist.group.WORLD
# Apply the division first to avoid overflow, especially for FP16.
tensor.div_(group_to_use.size())
return (
dist.all_reduce(tensor, group=group_to_use, async_op=True)
.get_future()
.then(lambda fut: fut.value()[0])
) | [
"def",
"_allreduce_fut",
"(",
"process_group",
":",
"dist",
".",
"ProcessGroup",
",",
"tensor",
":",
"torch",
".",
"Tensor",
")",
"->",
"torch",
".",
"futures",
".",
"Future",
"[",
"torch",
".",
"Tensor",
"]",
":",
"group_to_use",
"=",
"process_group",
"if",
"process_group",
"is",
"not",
"None",
"else",
"dist",
".",
"group",
".",
"WORLD",
"# Apply the division first to avoid overflow, especially for FP16.",
"tensor",
".",
"div_",
"(",
"group_to_use",
".",
"size",
"(",
")",
")",
"return",
"(",
"dist",
".",
"all_reduce",
"(",
"tensor",
",",
"group",
"=",
"group_to_use",
",",
"async_op",
"=",
"True",
")",
".",
"get_future",
"(",
")",
".",
"then",
"(",
"lambda",
"fut",
":",
"fut",
".",
"value",
"(",
")",
"[",
"0",
"]",
")",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py#L7-L20 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/indexes/multi.py | python | MultiIndex.get_level_values | (self, level) | return values | Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2') | Return vector of label values for requested level,
equal to the length of the index. | [
"Return",
"vector",
"of",
"label",
"values",
"for",
"requested",
"level",
"equal",
"to",
"the",
"length",
"of",
"the",
"index",
"."
] | def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values | [
"def",
"get_level_values",
"(",
"self",
",",
"level",
")",
":",
"level",
"=",
"self",
".",
"_get_level_number",
"(",
"level",
")",
"values",
"=",
"self",
".",
"_get_level_values",
"(",
"level",
")",
"return",
"values"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/indexes/multi.py#L1380-L1414 |
|
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | pygimli/physics/traveltime/modelling.py | python | TravelTimeDijkstraModelling.response | (self, par) | return self._core.response(par) | Return forward response (simulated traveltimes). | Return forward response (simulated traveltimes). | [
"Return",
"forward",
"response",
"(",
"simulated",
"traveltimes",
")",
"."
] | def response(self, par):
"""Return forward response (simulated traveltimes)."""
if not self.mesh():
pg.critical("no mesh")
return self._core.response(par) | [
"def",
"response",
"(",
"self",
",",
"par",
")",
":",
"if",
"not",
"self",
".",
"mesh",
"(",
")",
":",
"pg",
".",
"critical",
"(",
"\"no mesh\"",
")",
"return",
"self",
".",
"_core",
".",
"response",
"(",
"par",
")"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/traveltime/modelling.py#L86-L90 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/vision/models/resnet.py | python | resnet101 | (pretrained=False, **kwargs) | return _resnet('resnet101', BottleneckBlock, 101, pretrained, **kwargs) | ResNet 101-layer model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import resnet101
# build model
model = resnet101()
# build model and load imagenet pretrained weight
# model = resnet101(pretrained=True)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape) | ResNet 101-layer model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ | [
"ResNet",
"101",
"-",
"layer",
"model",
"from",
"Deep",
"Residual",
"Learning",
"for",
"Image",
"Recognition",
"<https",
":",
"//",
"arxiv",
".",
"org",
"/",
"pdf",
"/",
"1512",
".",
"03385",
".",
"pdf",
">",
"_"
] | def resnet101(pretrained=False, **kwargs):
"""ResNet 101-layer model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import resnet101
# build model
model = resnet101()
# build model and load imagenet pretrained weight
# model = resnet101(pretrained=True)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
"""
return _resnet('resnet101', BottleneckBlock, 101, pretrained, **kwargs) | [
"def",
"resnet101",
"(",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_resnet",
"(",
"'resnet101'",
",",
"BottleneckBlock",
",",
"101",
",",
"pretrained",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/vision/models/resnet.py#L379-L403 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/dataview.py | python | DataViewModel.IsContainer | (*args, **kwargs) | return _dataview.DataViewModel_IsContainer(*args, **kwargs) | IsContainer(self, DataViewItem item) -> bool
Override this to indicate whether an item is a container, in other
words, if it is a parent item that can have children. | IsContainer(self, DataViewItem item) -> bool | [
"IsContainer",
"(",
"self",
"DataViewItem",
"item",
")",
"-",
">",
"bool"
] | def IsContainer(*args, **kwargs):
"""
IsContainer(self, DataViewItem item) -> bool
Override this to indicate whether an item is a container, in other
words, if it is a parent item that can have children.
"""
return _dataview.DataViewModel_IsContainer(*args, **kwargs) | [
"def",
"IsContainer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewModel_IsContainer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/dataview.py#L537-L544 |
|
Tencent/CMONGO | c40380caa14e05509f46993aa8b8da966b09b0b5 | src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/BitKeeper.py | python | generate | (env) | Add a Builder factory function and construction variables for
BitKeeper to an Environment. | Add a Builder factory function and construction variables for
BitKeeper to an Environment. | [
"Add",
"a",
"Builder",
"factory",
"function",
"and",
"construction",
"variables",
"for",
"BitKeeper",
"to",
"an",
"Environment",
"."
] | def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET' | [
"def",
"generate",
"(",
"env",
")",
":",
"def",
"BitKeeperFactory",
"(",
"env",
"=",
"env",
")",
":",
"\"\"\" \"\"\"",
"import",
"SCons",
".",
"Warnings",
"as",
"W",
"W",
".",
"warn",
"(",
"W",
".",
"DeprecatedSourceCodeWarning",
",",
"\"\"\"The BitKeeper() factory is deprecated and there is no replacement.\"\"\"",
")",
"act",
"=",
"SCons",
".",
"Action",
".",
"Action",
"(",
"\"$BITKEEPERCOM\"",
",",
"\"$BITKEEPERCOMSTR\"",
")",
"return",
"SCons",
".",
"Builder",
".",
"Builder",
"(",
"action",
"=",
"act",
",",
"env",
"=",
"env",
")",
"env",
".",
"BitKeeper",
"=",
"BitKeeperFactory",
"env",
"[",
"'BITKEEPER'",
"]",
"=",
"'bk'",
"env",
"[",
"'BITKEEPERGET'",
"]",
"=",
"'$BITKEEPER get'",
"env",
"[",
"'BITKEEPERGETFLAGS'",
"]",
"=",
"SCons",
".",
"Util",
".",
"CLVar",
"(",
"''",
")",
"env",
"[",
"'BITKEEPERCOM'",
"]",
"=",
"'$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'"
] | https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/BitKeeper.py#L41-L57 |
||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/learn/python/learn/dataframe/transform.py | python | Transform.output_names | (self) | return _make_tuple_of_string(self._output_names) | The names of `Series` output by the `Transform`.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
A tuple of names of outputs provided by this Transform. | The names of `Series` output by the `Transform`. | [
"The",
"names",
"of",
"Series",
"output",
"by",
"the",
"Transform",
"."
] | def output_names(self):
"""The names of `Series` output by the `Transform`.
This function should depend only on `@parameter`s of this `Transform`.
Returns:
A tuple of names of outputs provided by this Transform.
"""
return _make_tuple_of_string(self._output_names) | [
"def",
"output_names",
"(",
"self",
")",
":",
"return",
"_make_tuple_of_string",
"(",
"self",
".",
"_output_names",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/learn/python/learn/dataframe/transform.py#L142-L150 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | PickerBase.SetTextCtrlProportion | (*args, **kwargs) | return _controls_.PickerBase_SetTextCtrlProportion(*args, **kwargs) | SetTextCtrlProportion(self, int prop)
Sets the proportion between the text control and the picker button.
This is used to set relative sizes of the text contorl and the picker.
The value passed to this function must be >= 1. | SetTextCtrlProportion(self, int prop) | [
"SetTextCtrlProportion",
"(",
"self",
"int",
"prop",
")"
] | def SetTextCtrlProportion(*args, **kwargs):
"""
SetTextCtrlProportion(self, int prop)
Sets the proportion between the text control and the picker button.
This is used to set relative sizes of the text contorl and the picker.
The value passed to this function must be >= 1.
"""
return _controls_.PickerBase_SetTextCtrlProportion(*args, **kwargs) | [
"def",
"SetTextCtrlProportion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"PickerBase_SetTextCtrlProportion",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L6767-L6775 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/xml/etree/ElementTree.py | python | TreeBuilder.end | (self, tag) | return self._last | Close and return current Element.
*tag* is the element name. | Close and return current Element. | [
"Close",
"and",
"return",
"current",
"Element",
"."
] | def end(self, tag):
"""Close and return current Element.
*tag* is the element name.
"""
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last | [
"def",
"end",
"(",
"self",
",",
"tag",
")",
":",
"self",
".",
"_flush",
"(",
")",
"self",
".",
"_last",
"=",
"self",
".",
"_elem",
".",
"pop",
"(",
")",
"assert",
"self",
".",
"_last",
".",
"tag",
"==",
"tag",
",",
"\"end tag mismatch (expected %s, got %s)\"",
"%",
"(",
"self",
".",
"_last",
".",
"tag",
",",
"tag",
")",
"self",
".",
"_tail",
"=",
"1",
"return",
"self",
".",
"_last"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/xml/etree/ElementTree.py#L1420-L1432 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py2/setuptools/command/install_lib.py | python | install_lib._exclude_pkg_path | (self, pkg, exclusion_path) | return os.path.join(self.install_dir, *parts) | Given a package name and exclusion path within that package,
compute the full exclusion path. | Given a package name and exclusion path within that package,
compute the full exclusion path. | [
"Given",
"a",
"package",
"name",
"and",
"exclusion",
"path",
"within",
"that",
"package",
"compute",
"the",
"full",
"exclusion",
"path",
"."
] | def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts) | [
"def",
"_exclude_pkg_path",
"(",
"self",
",",
"pkg",
",",
"exclusion_path",
")",
":",
"parts",
"=",
"pkg",
".",
"split",
"(",
"'.'",
")",
"+",
"[",
"exclusion_path",
"]",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"install_dir",
",",
"*",
"parts",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/command/install_lib.py#L31-L37 |
|
tfwu/FaceDetection-ConvNet-3D | f9251c48eb40c5aec8fba7455115c355466555be | python/build/lib.linux-x86_64-2.7/mxnet/model.py | python | _create_kvstore | (kvstore, num_device, arg_params) | return (kv, update_on_kvstore) | Create kvstore
This function select and create a proper kvstore if given the kvstore type
Parameters
----------
kvstore : KVStore or str
The kvstore
num_device : int
The number of devices
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights. | Create kvstore
This function select and create a proper kvstore if given the kvstore type
Parameters
----------
kvstore : KVStore or str
The kvstore
num_device : int
The number of devices
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights. | [
"Create",
"kvstore",
"This",
"function",
"select",
"and",
"create",
"a",
"proper",
"kvstore",
"if",
"given",
"the",
"kvstore",
"type",
"Parameters",
"----------",
"kvstore",
":",
"KVStore",
"or",
"str",
"The",
"kvstore",
"num_device",
":",
"int",
"The",
"number",
"of",
"devices",
"arg_params",
":",
"dict",
"of",
"str",
"to",
"NDArray",
"Model",
"parameter",
"dict",
"of",
"name",
"to",
"NDArray",
"of",
"net",
"s",
"weights",
"."
] | def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type
Parameters
----------
kvstore : KVStore or str
The kvstore
num_device : int
The number of devices
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
"""
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device is 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
if kvstore is 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in arg_params.values())
if max_size < 1024 * 1024 * 16:
kvstore = 'local_update_cpu'
else:
kvstore = 'local_allreduce_cpu'
logging.info('Auto-select kvstore type = %s', kvstore)
kv = kvs.create(kvstore)
else:
raise TypeError('kvstore must be KVStore, str or None')
# detect whether or not update weight on kvstore
update_on_kvstore = True
if not kv or 'local_allreduce' in kv.type:
update_on_kvstore = False
return (kv, update_on_kvstore) | [
"def",
"_create_kvstore",
"(",
"kvstore",
",",
"num_device",
",",
"arg_params",
")",
":",
"if",
"kvstore",
"is",
"None",
":",
"kv",
"=",
"None",
"elif",
"isinstance",
"(",
"kvstore",
",",
"kvs",
".",
"KVStore",
")",
":",
"kv",
"=",
"kvstore",
"elif",
"isinstance",
"(",
"kvstore",
",",
"str",
")",
":",
"# create kvstore using the string type",
"if",
"num_device",
"is",
"1",
"and",
"'dist'",
"not",
"in",
"kvstore",
":",
"# no need to use kv for single device and single machine",
"kv",
"=",
"None",
"else",
":",
"if",
"kvstore",
"is",
"'local'",
":",
"# automatically select a proper local",
"max_size",
"=",
"max",
"(",
"np",
".",
"prod",
"(",
"param",
".",
"shape",
")",
"for",
"param",
"in",
"arg_params",
".",
"values",
"(",
")",
")",
"if",
"max_size",
"<",
"1024",
"*",
"1024",
"*",
"16",
":",
"kvstore",
"=",
"'local_update_cpu'",
"else",
":",
"kvstore",
"=",
"'local_allreduce_cpu'",
"logging",
".",
"info",
"(",
"'Auto-select kvstore type = %s'",
",",
"kvstore",
")",
"kv",
"=",
"kvs",
".",
"create",
"(",
"kvstore",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'kvstore must be KVStore, str or None'",
")",
"# detect whether or not update weight on kvstore",
"update_on_kvstore",
"=",
"True",
"if",
"not",
"kv",
"or",
"'local_allreduce'",
"in",
"kv",
".",
"type",
":",
"update_on_kvstore",
"=",
"False",
"return",
"(",
"kv",
",",
"update_on_kvstore",
")"
] | https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/build/lib.linux-x86_64-2.7/mxnet/model.py#L36-L76 |
|
idaholab/moose | 9eeebc65e098b4c30f8205fb41591fd5b61eb6ff | python/MooseDocs/common/box.py | python | box | (content, title=None, line=None, width=None, color='RESET') | return mooseutils.colorText(out, color) | Tool for building unicode box around text, this is used for error reporting. | Tool for building unicode box around text, this is used for error reporting. | [
"Tool",
"for",
"building",
"unicode",
"box",
"around",
"text",
"this",
"is",
"used",
"for",
"error",
"reporting",
"."
] | def box(content, title=None, line=None, width=None, color='RESET'):
"""Tool for building unicode box around text, this is used for error reporting."""
lines = content.split('\n')
n_lines = len(max(lines, key=len))
out = ''
if title:
out += title + '\n'
if line is not None:
num_digits = len(str(line + len(lines)))
if width:
n_lines = max([width - num_digits - 2, n_lines])
out += '{0:>{1}}{2}{3}{4}'.format(' ', num_digits, '\u250C', '\u2500'*n_lines, '\u2510')
for i, x in enumerate(lines):
out += '\n{0:>{1}}{2}{3:<{4}}{2}'.format(line+i, num_digits, '\u2502', x, n_lines)
out += '\n{0:>{1}}{2}{3}{4}'.format(' ', num_digits, '\u2514', '\u2500'*n_lines,
'\u2518')
else:
if width:
n_lines = max([width - 2, n_lines])
out += '{}{}{}'.format('\u250C', '\u2500'*n_lines, '\u2510')
for i, x in enumerate(lines):
out += '\n{0}{1:<{2}}{0}'.format('\u2502', x, n_lines)
out += '\n{}{}{}'.format('\u2514', '\u2500'*n_lines, '\u2518')
if color is None:
return out
return mooseutils.colorText(out, color) | [
"def",
"box",
"(",
"content",
",",
"title",
"=",
"None",
",",
"line",
"=",
"None",
",",
"width",
"=",
"None",
",",
"color",
"=",
"'RESET'",
")",
":",
"lines",
"=",
"content",
".",
"split",
"(",
"'\\n'",
")",
"n_lines",
"=",
"len",
"(",
"max",
"(",
"lines",
",",
"key",
"=",
"len",
")",
")",
"out",
"=",
"''",
"if",
"title",
":",
"out",
"+=",
"title",
"+",
"'\\n'",
"if",
"line",
"is",
"not",
"None",
":",
"num_digits",
"=",
"len",
"(",
"str",
"(",
"line",
"+",
"len",
"(",
"lines",
")",
")",
")",
"if",
"width",
":",
"n_lines",
"=",
"max",
"(",
"[",
"width",
"-",
"num_digits",
"-",
"2",
",",
"n_lines",
"]",
")",
"out",
"+=",
"'{0:>{1}}{2}{3}{4}'",
".",
"format",
"(",
"' '",
",",
"num_digits",
",",
"'\\u250C'",
",",
"'\\u2500'",
"*",
"n_lines",
",",
"'\\u2510'",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"lines",
")",
":",
"out",
"+=",
"'\\n{0:>{1}}{2}{3:<{4}}{2}'",
".",
"format",
"(",
"line",
"+",
"i",
",",
"num_digits",
",",
"'\\u2502'",
",",
"x",
",",
"n_lines",
")",
"out",
"+=",
"'\\n{0:>{1}}{2}{3}{4}'",
".",
"format",
"(",
"' '",
",",
"num_digits",
",",
"'\\u2514'",
",",
"'\\u2500'",
"*",
"n_lines",
",",
"'\\u2518'",
")",
"else",
":",
"if",
"width",
":",
"n_lines",
"=",
"max",
"(",
"[",
"width",
"-",
"2",
",",
"n_lines",
"]",
")",
"out",
"+=",
"'{}{}{}'",
".",
"format",
"(",
"'\\u250C'",
",",
"'\\u2500'",
"*",
"n_lines",
",",
"'\\u2510'",
")",
"for",
"i",
",",
"x",
"in",
"enumerate",
"(",
"lines",
")",
":",
"out",
"+=",
"'\\n{0}{1:<{2}}{0}'",
".",
"format",
"(",
"'\\u2502'",
",",
"x",
",",
"n_lines",
")",
"out",
"+=",
"'\\n{}{}{}'",
".",
"format",
"(",
"'\\u2514'",
",",
"'\\u2500'",
"*",
"n_lines",
",",
"'\\u2518'",
")",
"if",
"color",
"is",
"None",
":",
"return",
"out",
"return",
"mooseutils",
".",
"colorText",
"(",
"out",
",",
"color",
")"
] | https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/MooseDocs/common/box.py#L11-L40 |
|
macchina-io/macchina.io | ef24ba0e18379c3dd48fb84e6dbf991101cb8db0 | platform/JS/V8/v8/third_party/jinja2/compiler.py | python | CodeGenerator.temporary_identifier | (self) | return 't_%d' % self._last_identifier | Get a new unique identifier. | Get a new unique identifier. | [
"Get",
"a",
"new",
"unique",
"identifier",
"."
] | def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier | [
"def",
"temporary_identifier",
"(",
"self",
")",
":",
"self",
".",
"_last_identifier",
"+=",
"1",
"return",
"'t_%d'",
"%",
"self",
".",
"_last_identifier"
] | https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/third_party/jinja2/compiler.py#L429-L432 |
|
deepmodeling/deepmd-kit | 159e45d248b0429844fb6a8cb3b3a201987c8d79 | deepmd/fit/polar.py | python | PolarFittingSeA.build | (self,
input_d : tf.Tensor,
rot_mat : tf.Tensor,
natoms : tf.Tensor,
reuse : bool = None,
suffix : str = '') | return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION) | Build the computational graph for fitting net
Parameters
----------
input_d
The input descriptor
rot_mat
The rotation matrix from the descriptor.
natoms
The number of atoms. This tensor has the length of Ntypes + 2
natoms[0]: number of local atoms
natoms[1]: total number of atoms held by this processor
natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
reuse
The weights in the networks should be reused when get the variable.
suffix
Name suffix to identify this descriptor
Returns
-------
atomic_polar
The atomic polarizability | Build the computational graph for fitting net
Parameters
----------
input_d
The input descriptor
rot_mat
The rotation matrix from the descriptor.
natoms
The number of atoms. This tensor has the length of Ntypes + 2
natoms[0]: number of local atoms
natoms[1]: total number of atoms held by this processor
natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
reuse
The weights in the networks should be reused when get the variable.
suffix
Name suffix to identify this descriptor | [
"Build",
"the",
"computational",
"graph",
"for",
"fitting",
"net",
"Parameters",
"----------",
"input_d",
"The",
"input",
"descriptor",
"rot_mat",
"The",
"rotation",
"matrix",
"from",
"the",
"descriptor",
".",
"natoms",
"The",
"number",
"of",
"atoms",
".",
"This",
"tensor",
"has",
"the",
"length",
"of",
"Ntypes",
"+",
"2",
"natoms",
"[",
"0",
"]",
":",
"number",
"of",
"local",
"atoms",
"natoms",
"[",
"1",
"]",
":",
"total",
"number",
"of",
"atoms",
"held",
"by",
"this",
"processor",
"natoms",
"[",
"i",
"]",
":",
"2",
"<",
"=",
"i",
"<",
"Ntypes",
"+",
"2",
"number",
"of",
"type",
"i",
"atoms",
"reuse",
"The",
"weights",
"in",
"the",
"networks",
"should",
"be",
"reused",
"when",
"get",
"the",
"variable",
".",
"suffix",
"Name",
"suffix",
"to",
"identify",
"this",
"descriptor"
] | def build (self,
input_d : tf.Tensor,
rot_mat : tf.Tensor,
natoms : tf.Tensor,
reuse : bool = None,
suffix : str = '') :
"""
Build the computational graph for fitting net
Parameters
----------
input_d
The input descriptor
rot_mat
The rotation matrix from the descriptor.
natoms
The number of atoms. This tensor has the length of Ntypes + 2
natoms[0]: number of local atoms
natoms[1]: total number of atoms held by this processor
natoms[i]: 2 <= i < Ntypes+2, number of type i atoms
reuse
The weights in the networks should be reused when get the variable.
suffix
Name suffix to identify this descriptor
Returns
-------
atomic_polar
The atomic polarizability
"""
start_index = 0
inputs = tf.cast(tf.reshape(input_d, [-1, self.dim_descrpt * natoms[0]]), self.fitting_precision)
rot_mat = tf.reshape(rot_mat, [-1, self.dim_rot_mat * natoms[0]])
count = 0
for type_i in range(self.ntypes):
# cut-out inputs
inputs_i = tf.slice (inputs,
[ 0, start_index* self.dim_descrpt],
[-1, natoms[2+type_i]* self.dim_descrpt] )
inputs_i = tf.reshape(inputs_i, [-1, self.dim_descrpt])
rot_mat_i = tf.slice (rot_mat,
[ 0, start_index* self.dim_rot_mat],
[-1, natoms[2+type_i]* self.dim_rot_mat] )
rot_mat_i = tf.reshape(rot_mat_i, [-1, self.dim_rot_mat_1, 3])
start_index += natoms[2+type_i]
if not type_i in self.sel_type :
continue
layer = inputs_i
for ii in range(0,len(self.n_neuron)) :
if ii >= 1 and self.n_neuron[ii] == self.n_neuron[ii-1] :
layer+= one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, use_timestep = self.resnet_dt, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
else :
layer = one_layer(layer, self.n_neuron[ii], name='layer_'+str(ii)+'_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, activation_fn = self.fitting_activation_fn, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
if self.fit_diag :
bavg = np.zeros(self.dim_rot_mat_1)
# bavg[0] = self.avgeig[0]
# bavg[1] = self.avgeig[1]
# bavg[2] = self.avgeig[2]
# (nframes x natoms) x naxis
final_layer = one_layer(layer, self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
# (nframes x natoms) x naxis
final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1])
# (nframes x natoms) x naxis x naxis
final_layer = tf.matrix_diag(final_layer)
else :
bavg = np.zeros(self.dim_rot_mat_1*self.dim_rot_mat_1)
# bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0]
# bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1]
# bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2]
# (nframes x natoms) x (naxis x naxis)
final_layer = one_layer(layer, self.dim_rot_mat_1*self.dim_rot_mat_1, activation_fn = None, name='final_layer_type_'+str(type_i)+suffix, reuse=reuse, seed = self.seed, bavg = bavg, precision = self.fitting_precision, uniform_seed = self.uniform_seed)
if (not self.uniform_seed) and (self.seed is not None): self.seed += self.seed_shift
# (nframes x natoms) x naxis x naxis
final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0] * natoms[2+type_i], self.dim_rot_mat_1, self.dim_rot_mat_1])
# (nframes x natoms) x naxis x naxis
final_layer = final_layer + tf.transpose(final_layer, perm = [0,2,1])
# (nframes x natoms) x naxis x 3(coord)
final_layer = tf.matmul(final_layer, rot_mat_i)
# (nframes x natoms) x 3(coord) x 3(coord)
final_layer = tf.matmul(rot_mat_i, final_layer, transpose_a = True)
# nframes x natoms x 3 x 3
final_layer = tf.reshape(final_layer, [tf.shape(inputs)[0], natoms[2+type_i], 3, 3])
# shift and scale
sel_type_idx = self.sel_type.index(type_i)
final_layer = final_layer * self.scale[sel_type_idx]
final_layer = final_layer + self.constant_matrix[sel_type_idx] * tf.eye(3, batch_shape=[tf.shape(inputs)[0], natoms[2+type_i]], dtype = GLOBAL_TF_FLOAT_PRECISION)
# concat the results
if count == 0:
outs = final_layer
else:
outs = tf.concat([outs, final_layer], axis = 1)
count += 1
tf.summary.histogram('fitting_net_output', outs)
return tf.cast(tf.reshape(outs, [-1]), GLOBAL_TF_FLOAT_PRECISION) | [
"def",
"build",
"(",
"self",
",",
"input_d",
":",
"tf",
".",
"Tensor",
",",
"rot_mat",
":",
"tf",
".",
"Tensor",
",",
"natoms",
":",
"tf",
".",
"Tensor",
",",
"reuse",
":",
"bool",
"=",
"None",
",",
"suffix",
":",
"str",
"=",
"''",
")",
":",
"start_index",
"=",
"0",
"inputs",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"reshape",
"(",
"input_d",
",",
"[",
"-",
"1",
",",
"self",
".",
"dim_descrpt",
"*",
"natoms",
"[",
"0",
"]",
"]",
")",
",",
"self",
".",
"fitting_precision",
")",
"rot_mat",
"=",
"tf",
".",
"reshape",
"(",
"rot_mat",
",",
"[",
"-",
"1",
",",
"self",
".",
"dim_rot_mat",
"*",
"natoms",
"[",
"0",
"]",
"]",
")",
"count",
"=",
"0",
"for",
"type_i",
"in",
"range",
"(",
"self",
".",
"ntypes",
")",
":",
"# cut-out inputs",
"inputs_i",
"=",
"tf",
".",
"slice",
"(",
"inputs",
",",
"[",
"0",
",",
"start_index",
"*",
"self",
".",
"dim_descrpt",
"]",
",",
"[",
"-",
"1",
",",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
"*",
"self",
".",
"dim_descrpt",
"]",
")",
"inputs_i",
"=",
"tf",
".",
"reshape",
"(",
"inputs_i",
",",
"[",
"-",
"1",
",",
"self",
".",
"dim_descrpt",
"]",
")",
"rot_mat_i",
"=",
"tf",
".",
"slice",
"(",
"rot_mat",
",",
"[",
"0",
",",
"start_index",
"*",
"self",
".",
"dim_rot_mat",
"]",
",",
"[",
"-",
"1",
",",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
"*",
"self",
".",
"dim_rot_mat",
"]",
")",
"rot_mat_i",
"=",
"tf",
".",
"reshape",
"(",
"rot_mat_i",
",",
"[",
"-",
"1",
",",
"self",
".",
"dim_rot_mat_1",
",",
"3",
"]",
")",
"start_index",
"+=",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
"if",
"not",
"type_i",
"in",
"self",
".",
"sel_type",
":",
"continue",
"layer",
"=",
"inputs_i",
"for",
"ii",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"n_neuron",
")",
")",
":",
"if",
"ii",
">=",
"1",
"and",
"self",
".",
"n_neuron",
"[",
"ii",
"]",
"==",
"self",
".",
"n_neuron",
"[",
"ii",
"-",
"1",
"]",
":",
"layer",
"+=",
"one_layer",
"(",
"layer",
",",
"self",
".",
"n_neuron",
"[",
"ii",
"]",
",",
"name",
"=",
"'layer_'",
"+",
"str",
"(",
"ii",
")",
"+",
"'_type_'",
"+",
"str",
"(",
"type_i",
")",
"+",
"suffix",
",",
"reuse",
"=",
"reuse",
",",
"seed",
"=",
"self",
".",
"seed",
",",
"use_timestep",
"=",
"self",
".",
"resnet_dt",
",",
"activation_fn",
"=",
"self",
".",
"fitting_activation_fn",
",",
"precision",
"=",
"self",
".",
"fitting_precision",
",",
"uniform_seed",
"=",
"self",
".",
"uniform_seed",
")",
"else",
":",
"layer",
"=",
"one_layer",
"(",
"layer",
",",
"self",
".",
"n_neuron",
"[",
"ii",
"]",
",",
"name",
"=",
"'layer_'",
"+",
"str",
"(",
"ii",
")",
"+",
"'_type_'",
"+",
"str",
"(",
"type_i",
")",
"+",
"suffix",
",",
"reuse",
"=",
"reuse",
",",
"seed",
"=",
"self",
".",
"seed",
",",
"activation_fn",
"=",
"self",
".",
"fitting_activation_fn",
",",
"precision",
"=",
"self",
".",
"fitting_precision",
",",
"uniform_seed",
"=",
"self",
".",
"uniform_seed",
")",
"if",
"(",
"not",
"self",
".",
"uniform_seed",
")",
"and",
"(",
"self",
".",
"seed",
"is",
"not",
"None",
")",
":",
"self",
".",
"seed",
"+=",
"self",
".",
"seed_shift",
"if",
"self",
".",
"fit_diag",
":",
"bavg",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"dim_rot_mat_1",
")",
"# bavg[0] = self.avgeig[0]",
"# bavg[1] = self.avgeig[1]",
"# bavg[2] = self.avgeig[2]",
"# (nframes x natoms) x naxis",
"final_layer",
"=",
"one_layer",
"(",
"layer",
",",
"self",
".",
"dim_rot_mat_1",
",",
"activation_fn",
"=",
"None",
",",
"name",
"=",
"'final_layer_type_'",
"+",
"str",
"(",
"type_i",
")",
"+",
"suffix",
",",
"reuse",
"=",
"reuse",
",",
"seed",
"=",
"self",
".",
"seed",
",",
"bavg",
"=",
"bavg",
",",
"precision",
"=",
"self",
".",
"fitting_precision",
",",
"uniform_seed",
"=",
"self",
".",
"uniform_seed",
")",
"if",
"(",
"not",
"self",
".",
"uniform_seed",
")",
"and",
"(",
"self",
".",
"seed",
"is",
"not",
"None",
")",
":",
"self",
".",
"seed",
"+=",
"self",
".",
"seed_shift",
"# (nframes x natoms) x naxis",
"final_layer",
"=",
"tf",
".",
"reshape",
"(",
"final_layer",
",",
"[",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
"*",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
",",
"self",
".",
"dim_rot_mat_1",
"]",
")",
"# (nframes x natoms) x naxis x naxis",
"final_layer",
"=",
"tf",
".",
"matrix_diag",
"(",
"final_layer",
")",
"else",
":",
"bavg",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"dim_rot_mat_1",
"*",
"self",
".",
"dim_rot_mat_1",
")",
"# bavg[0*self.dim_rot_mat_1+0] = self.avgeig[0]",
"# bavg[1*self.dim_rot_mat_1+1] = self.avgeig[1]",
"# bavg[2*self.dim_rot_mat_1+2] = self.avgeig[2]",
"# (nframes x natoms) x (naxis x naxis)",
"final_layer",
"=",
"one_layer",
"(",
"layer",
",",
"self",
".",
"dim_rot_mat_1",
"*",
"self",
".",
"dim_rot_mat_1",
",",
"activation_fn",
"=",
"None",
",",
"name",
"=",
"'final_layer_type_'",
"+",
"str",
"(",
"type_i",
")",
"+",
"suffix",
",",
"reuse",
"=",
"reuse",
",",
"seed",
"=",
"self",
".",
"seed",
",",
"bavg",
"=",
"bavg",
",",
"precision",
"=",
"self",
".",
"fitting_precision",
",",
"uniform_seed",
"=",
"self",
".",
"uniform_seed",
")",
"if",
"(",
"not",
"self",
".",
"uniform_seed",
")",
"and",
"(",
"self",
".",
"seed",
"is",
"not",
"None",
")",
":",
"self",
".",
"seed",
"+=",
"self",
".",
"seed_shift",
"# (nframes x natoms) x naxis x naxis",
"final_layer",
"=",
"tf",
".",
"reshape",
"(",
"final_layer",
",",
"[",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
"*",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
",",
"self",
".",
"dim_rot_mat_1",
",",
"self",
".",
"dim_rot_mat_1",
"]",
")",
"# (nframes x natoms) x naxis x naxis",
"final_layer",
"=",
"final_layer",
"+",
"tf",
".",
"transpose",
"(",
"final_layer",
",",
"perm",
"=",
"[",
"0",
",",
"2",
",",
"1",
"]",
")",
"# (nframes x natoms) x naxis x 3(coord)",
"final_layer",
"=",
"tf",
".",
"matmul",
"(",
"final_layer",
",",
"rot_mat_i",
")",
"# (nframes x natoms) x 3(coord) x 3(coord)",
"final_layer",
"=",
"tf",
".",
"matmul",
"(",
"rot_mat_i",
",",
"final_layer",
",",
"transpose_a",
"=",
"True",
")",
"# nframes x natoms x 3 x 3",
"final_layer",
"=",
"tf",
".",
"reshape",
"(",
"final_layer",
",",
"[",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
",",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
",",
"3",
",",
"3",
"]",
")",
"# shift and scale",
"sel_type_idx",
"=",
"self",
".",
"sel_type",
".",
"index",
"(",
"type_i",
")",
"final_layer",
"=",
"final_layer",
"*",
"self",
".",
"scale",
"[",
"sel_type_idx",
"]",
"final_layer",
"=",
"final_layer",
"+",
"self",
".",
"constant_matrix",
"[",
"sel_type_idx",
"]",
"*",
"tf",
".",
"eye",
"(",
"3",
",",
"batch_shape",
"=",
"[",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"0",
"]",
",",
"natoms",
"[",
"2",
"+",
"type_i",
"]",
"]",
",",
"dtype",
"=",
"GLOBAL_TF_FLOAT_PRECISION",
")",
"# concat the results",
"if",
"count",
"==",
"0",
":",
"outs",
"=",
"final_layer",
"else",
":",
"outs",
"=",
"tf",
".",
"concat",
"(",
"[",
"outs",
",",
"final_layer",
"]",
",",
"axis",
"=",
"1",
")",
"count",
"+=",
"1",
"tf",
".",
"summary",
".",
"histogram",
"(",
"'fitting_net_output'",
",",
"outs",
")",
"return",
"tf",
".",
"cast",
"(",
"tf",
".",
"reshape",
"(",
"outs",
",",
"[",
"-",
"1",
"]",
")",
",",
"GLOBAL_TF_FLOAT_PRECISION",
")"
] | https://github.com/deepmodeling/deepmd-kit/blob/159e45d248b0429844fb6a8cb3b3a201987c8d79/deepmd/fit/polar.py#L274-L372 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/zoombar.py | python | ZoomBar.OnPaint | (self, event) | Handles the ``wx.EVT_PAINT`` event for :class:`ZoomBar`.
:param `event`: a :class:`PaintEvent` event to be processed. | Handles the ``wx.EVT_PAINT`` event for :class:`ZoomBar`. | [
"Handles",
"the",
"wx",
".",
"EVT_PAINT",
"event",
"for",
":",
"class",
":",
"ZoomBar",
"."
] | def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for :class:`ZoomBar`.
:param `event`: a :class:`PaintEvent` event to be processed.
"""
dc = wx.AutoBufferedPaintDC(self)
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
background = self._imgBar.GetBitmap()
pos = self._imgBar.GetPosition()
dc.DrawBitmap(background, pos.x, pos.y, True)
if not self._buttons:
return
self.DrawButtons(dc)
self.DrawReflections(dc)
self.DrawLabels(dc) | [
"def",
"OnPaint",
"(",
"self",
",",
"event",
")",
":",
"dc",
"=",
"wx",
".",
"AutoBufferedPaintDC",
"(",
"self",
")",
"dc",
".",
"SetBackground",
"(",
"wx",
".",
"WHITE_BRUSH",
")",
"dc",
".",
"Clear",
"(",
")",
"background",
"=",
"self",
".",
"_imgBar",
".",
"GetBitmap",
"(",
")",
"pos",
"=",
"self",
".",
"_imgBar",
".",
"GetPosition",
"(",
")",
"dc",
".",
"DrawBitmap",
"(",
"background",
",",
"pos",
".",
"x",
",",
"pos",
".",
"y",
",",
"True",
")",
"if",
"not",
"self",
".",
"_buttons",
":",
"return",
"self",
".",
"DrawButtons",
"(",
"dc",
")",
"self",
".",
"DrawReflections",
"(",
"dc",
")",
"self",
".",
"DrawLabels",
"(",
"dc",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/zoombar.py#L1144-L1166 |
||
jsupancic/deep_hand_pose | 22cbeae1a8410ff5d37c060c7315719d0a5d608f | scripts/cpp_lint.py | python | PrintUsage | (message) | Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message. | Prints a brief usage string and exits, optionally with an error message. | [
"Prints",
"a",
"brief",
"usage",
"string",
"and",
"exits",
"optionally",
"with",
"an",
"error",
"message",
"."
] | def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1) | [
"def",
"PrintUsage",
"(",
"message",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"_USAGE",
")",
"if",
"message",
":",
"sys",
".",
"exit",
"(",
"'\\nFATAL ERROR: '",
"+",
"message",
")",
"else",
":",
"sys",
".",
"exit",
"(",
"1",
")"
] | https://github.com/jsupancic/deep_hand_pose/blob/22cbeae1a8410ff5d37c060c7315719d0a5d608f/scripts/cpp_lint.py#L4757-L4767 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/telemetry/telemetry/core/platform/__init__.py | python | Platform.HasBeenThermallyThrottled | (self) | return self._platform_backend.HasBeenThermallyThrottled() | Returns True if the device has been thermally throttled. | Returns True if the device has been thermally throttled. | [
"Returns",
"True",
"if",
"the",
"device",
"has",
"been",
"thermally",
"throttled",
"."
] | def HasBeenThermallyThrottled(self):
"""Returns True if the device has been thermally throttled."""
return self._platform_backend.HasBeenThermallyThrottled() | [
"def",
"HasBeenThermallyThrottled",
"(",
"self",
")",
":",
"return",
"self",
".",
"_platform_backend",
".",
"HasBeenThermallyThrottled",
"(",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/telemetry/core/platform/__init__.py#L72-L74 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Cipher/_mode_cfb.py | python | CfbMode.encrypt | (self, plaintext, output=None) | Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
This function does not add any padding to the plaintext.
:Parameters:
plaintext : bytes/bytearray/memoryview
The piece of data to encrypt.
It can be of any length.
:Keywords:
output : bytearray/memoryview
The location where the ciphertext must be written to.
If ``None``, the ciphertext is returned.
:Return:
If ``output`` is ``None``, the ciphertext is returned as ``bytes``.
Otherwise, ``None``. | Encrypt data with the key and the parameters set at initialization. | [
"Encrypt",
"data",
"with",
"the",
"key",
"and",
"the",
"parameters",
"set",
"at",
"initialization",
"."
] | def encrypt(self, plaintext, output=None):
"""Encrypt data with the key and the parameters set at initialization.
A cipher object is stateful: once you have encrypted a message
you cannot encrypt (or decrypt) another message using the same
object.
The data to encrypt can be broken up in two or
more pieces and `encrypt` can be called multiple times.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is equivalent to:
>>> c.encrypt(a+b)
This function does not add any padding to the plaintext.
:Parameters:
plaintext : bytes/bytearray/memoryview
The piece of data to encrypt.
It can be of any length.
:Keywords:
output : bytearray/memoryview
The location where the ciphertext must be written to.
If ``None``, the ciphertext is returned.
:Return:
If ``output`` is ``None``, the ciphertext is returned as ``bytes``.
Otherwise, ``None``.
"""
if self.encrypt not in self._next:
raise TypeError("encrypt() cannot be called after decrypt()")
self._next = [ self.encrypt ]
if output is None:
ciphertext = create_string_buffer(len(plaintext))
else:
ciphertext = output
if not is_writeable_buffer(output):
raise TypeError("output must be a bytearray or a writeable memoryview")
if len(plaintext) != len(output):
raise ValueError("output must have the same length as the input"
" (%d bytes)" % len(plaintext))
result = raw_cfb_lib.CFB_encrypt(self._state.get(),
c_uint8_ptr(plaintext),
c_uint8_ptr(ciphertext),
c_size_t(len(plaintext)))
if result:
raise ValueError("Error %d while encrypting in CFB mode" % result)
if output is None:
return get_raw_buffer(ciphertext)
else:
return None | [
"def",
"encrypt",
"(",
"self",
",",
"plaintext",
",",
"output",
"=",
"None",
")",
":",
"if",
"self",
".",
"encrypt",
"not",
"in",
"self",
".",
"_next",
":",
"raise",
"TypeError",
"(",
"\"encrypt() cannot be called after decrypt()\"",
")",
"self",
".",
"_next",
"=",
"[",
"self",
".",
"encrypt",
"]",
"if",
"output",
"is",
"None",
":",
"ciphertext",
"=",
"create_string_buffer",
"(",
"len",
"(",
"plaintext",
")",
")",
"else",
":",
"ciphertext",
"=",
"output",
"if",
"not",
"is_writeable_buffer",
"(",
"output",
")",
":",
"raise",
"TypeError",
"(",
"\"output must be a bytearray or a writeable memoryview\"",
")",
"if",
"len",
"(",
"plaintext",
")",
"!=",
"len",
"(",
"output",
")",
":",
"raise",
"ValueError",
"(",
"\"output must have the same length as the input\"",
"\" (%d bytes)\"",
"%",
"len",
"(",
"plaintext",
")",
")",
"result",
"=",
"raw_cfb_lib",
".",
"CFB_encrypt",
"(",
"self",
".",
"_state",
".",
"get",
"(",
")",
",",
"c_uint8_ptr",
"(",
"plaintext",
")",
",",
"c_uint8_ptr",
"(",
"ciphertext",
")",
",",
"c_size_t",
"(",
"len",
"(",
"plaintext",
")",
")",
")",
"if",
"result",
":",
"raise",
"ValueError",
"(",
"\"Error %d while encrypting in CFB mode\"",
"%",
"result",
")",
"if",
"output",
"is",
"None",
":",
"return",
"get_raw_buffer",
"(",
"ciphertext",
")",
"else",
":",
"return",
"None"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Cipher/_mode_cfb.py#L124-L183 |
||
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/finding-3-digit-even-numbers.py | python | Solution4.findEvenNumbers | (self, digits) | return result | :type digits: List[int]
:rtype: List[int] | :type digits: List[int]
:rtype: List[int] | [
":",
"type",
"digits",
":",
"List",
"[",
"int",
"]",
":",
"rtype",
":",
"List",
"[",
"int",
"]"
] | def findEvenNumbers(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
k = 3
def backtracking(curr, digit_cnt, result):
if len(curr) == k:
result.append(reduce(lambda x, y: x*10+y, curr))
return
for i, (digit, cnt) in enumerate(digit_cnt):
if (not curr and digit == 0) or (len(curr) == k-1 and digit%2 != 0):
continue
digit_cnt[i][1] -= 1
digit_cnt[i], digit_cnt[-1] = digit_cnt[-1], digit_cnt[i]
removed = []
if digit_cnt[-1][1] == 0:
removed = digit_cnt.pop()
curr.append(digit)
backtracking(curr, digit_cnt, result)
curr.pop()
if removed:
digit_cnt.append(removed)
digit_cnt[i], digit_cnt[-1] = digit_cnt[-1], digit_cnt[i]
digit_cnt[i][1] += 1
cnt = collections.Counter(digits)
digit_cnt = map(list, cnt.iteritems())
result = []
backtracking([], digit_cnt, result)
result.sort()
return result | [
"def",
"findEvenNumbers",
"(",
"self",
",",
"digits",
")",
":",
"k",
"=",
"3",
"def",
"backtracking",
"(",
"curr",
",",
"digit_cnt",
",",
"result",
")",
":",
"if",
"len",
"(",
"curr",
")",
"==",
"k",
":",
"result",
".",
"append",
"(",
"reduce",
"(",
"lambda",
"x",
",",
"y",
":",
"x",
"*",
"10",
"+",
"y",
",",
"curr",
")",
")",
"return",
"for",
"i",
",",
"(",
"digit",
",",
"cnt",
")",
"in",
"enumerate",
"(",
"digit_cnt",
")",
":",
"if",
"(",
"not",
"curr",
"and",
"digit",
"==",
"0",
")",
"or",
"(",
"len",
"(",
"curr",
")",
"==",
"k",
"-",
"1",
"and",
"digit",
"%",
"2",
"!=",
"0",
")",
":",
"continue",
"digit_cnt",
"[",
"i",
"]",
"[",
"1",
"]",
"-=",
"1",
"digit_cnt",
"[",
"i",
"]",
",",
"digit_cnt",
"[",
"-",
"1",
"]",
"=",
"digit_cnt",
"[",
"-",
"1",
"]",
",",
"digit_cnt",
"[",
"i",
"]",
"removed",
"=",
"[",
"]",
"if",
"digit_cnt",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"==",
"0",
":",
"removed",
"=",
"digit_cnt",
".",
"pop",
"(",
")",
"curr",
".",
"append",
"(",
"digit",
")",
"backtracking",
"(",
"curr",
",",
"digit_cnt",
",",
"result",
")",
"curr",
".",
"pop",
"(",
")",
"if",
"removed",
":",
"digit_cnt",
".",
"append",
"(",
"removed",
")",
"digit_cnt",
"[",
"i",
"]",
",",
"digit_cnt",
"[",
"-",
"1",
"]",
"=",
"digit_cnt",
"[",
"-",
"1",
"]",
",",
"digit_cnt",
"[",
"i",
"]",
"digit_cnt",
"[",
"i",
"]",
"[",
"1",
"]",
"+=",
"1",
"cnt",
"=",
"collections",
".",
"Counter",
"(",
"digits",
")",
"digit_cnt",
"=",
"map",
"(",
"list",
",",
"cnt",
".",
"iteritems",
"(",
")",
")",
"result",
"=",
"[",
"]",
"backtracking",
"(",
"[",
"]",
",",
"digit_cnt",
",",
"result",
")",
"result",
".",
"sort",
"(",
")",
"return",
"result"
] | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/finding-3-digit-even-numbers.py#L112-L143 |
|
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | scripts/Python/static-binding/lldb.py | python | SBFunction.GetDisplayName | (self) | return _lldb.SBFunction_GetDisplayName(self) | GetDisplayName(SBFunction self) -> char const * | GetDisplayName(SBFunction self) -> char const * | [
"GetDisplayName",
"(",
"SBFunction",
"self",
")",
"-",
">",
"char",
"const",
"*"
] | def GetDisplayName(self):
"""GetDisplayName(SBFunction self) -> char const *"""
return _lldb.SBFunction_GetDisplayName(self) | [
"def",
"GetDisplayName",
"(",
"self",
")",
":",
"return",
"_lldb",
".",
"SBFunction_GetDisplayName",
"(",
"self",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L5908-L5910 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/asyncio/streams.py | python | StreamReader._wait_for_data | (self, func_name) | Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it. | Wait until feed_data() or feed_eof() is called. | [
"Wait",
"until",
"feed_data",
"()",
"or",
"feed_eof",
"()",
"is",
"called",
"."
] | async def _wait_for_data(self, func_name):
"""Wait until feed_data() or feed_eof() is called.
If stream was paused, automatically resume it.
"""
# StreamReader uses a future to link the protocol feed_data() method
# to a read coroutine. Running two read coroutines at the same time
# would have an unexpected behaviour. It would not possible to know
# which coroutine would get the next data.
if self._waiter is not None:
raise RuntimeError(
f'{func_name}() called while another coroutine is '
f'already waiting for incoming data')
assert not self._eof, '_wait_for_data after EOF'
# Waiting for data while paused will make deadlock, so prevent it.
# This is essential for readexactly(n) for case when n > self._limit.
if self._paused:
self._paused = False
self._transport.resume_reading()
self._waiter = self._loop.create_future()
try:
await self._waiter
finally:
self._waiter = None | [
"async",
"def",
"_wait_for_data",
"(",
"self",
",",
"func_name",
")",
":",
"# StreamReader uses a future to link the protocol feed_data() method",
"# to a read coroutine. Running two read coroutines at the same time",
"# would have an unexpected behaviour. It would not possible to know",
"# which coroutine would get the next data.",
"if",
"self",
".",
"_waiter",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
"(",
"f'{func_name}() called while another coroutine is '",
"f'already waiting for incoming data'",
")",
"assert",
"not",
"self",
".",
"_eof",
",",
"'_wait_for_data after EOF'",
"# Waiting for data while paused will make deadlock, so prevent it.",
"# This is essential for readexactly(n) for case when n > self._limit.",
"if",
"self",
".",
"_paused",
":",
"self",
".",
"_paused",
"=",
"False",
"self",
".",
"_transport",
".",
"resume_reading",
"(",
")",
"self",
".",
"_waiter",
"=",
"self",
".",
"_loop",
".",
"create_future",
"(",
")",
"try",
":",
"await",
"self",
".",
"_waiter",
"finally",
":",
"self",
".",
"_waiter",
"=",
"None"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/asyncio/streams.py#L449-L475 |
||
nci/drishti | 89cd8b740239c5b2c8222dffd4e27432fde170a1 | bin/assets/scripts/unet3Plus/unet_collection/losses.py | python | iou_box | (y_true, y_pred, mode='giou', dtype=tf.float32) | return 1 - iou_box_coef(y_true, y_pred, mode=mode, dtype=dtype) | Inersection over Union (IoU) and generalized IoU losses for bounding boxes.
iou_box(y_true, y_pred, mode='giou', dtype=tf.float32)
----------
Rezatofighi, H., Tsoi, N., Gwak, J., Sadeghian, A., Reid, I. and Savarese, S., 2019.
Generalized intersection over union: A metric and a loss for bounding box regression.
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 658-666).
----------
Input
y_true: the target bounding box.
y_pred: the predicted bounding box.
Elements of a bounding box should be organized as: [y_min, x_min, y_max, x_max].
mode: 'iou' for IoU coeff (i.e., Jaccard index);
'giou' for generalized IoU coeff.
dtype: the data type of input tensors.
Default is tf.float32. | Inersection over Union (IoU) and generalized IoU losses for bounding boxes.
iou_box(y_true, y_pred, mode='giou', dtype=tf.float32)
----------
Rezatofighi, H., Tsoi, N., Gwak, J., Sadeghian, A., Reid, I. and Savarese, S., 2019.
Generalized intersection over union: A metric and a loss for bounding box regression.
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 658-666).
----------
Input
y_true: the target bounding box.
y_pred: the predicted bounding box.
Elements of a bounding box should be organized as: [y_min, x_min, y_max, x_max]. | [
"Inersection",
"over",
"Union",
"(",
"IoU",
")",
"and",
"generalized",
"IoU",
"losses",
"for",
"bounding",
"boxes",
".",
"iou_box",
"(",
"y_true",
"y_pred",
"mode",
"=",
"giou",
"dtype",
"=",
"tf",
".",
"float32",
")",
"----------",
"Rezatofighi",
"H",
".",
"Tsoi",
"N",
".",
"Gwak",
"J",
".",
"Sadeghian",
"A",
".",
"Reid",
"I",
".",
"and",
"Savarese",
"S",
".",
"2019",
".",
"Generalized",
"intersection",
"over",
"union",
":",
"A",
"metric",
"and",
"a",
"loss",
"for",
"bounding",
"box",
"regression",
".",
"In",
"Proceedings",
"of",
"the",
"IEEE",
"/",
"CVF",
"Conference",
"on",
"Computer",
"Vision",
"and",
"Pattern",
"Recognition",
"(",
"pp",
".",
"658",
"-",
"666",
")",
".",
"----------",
"Input",
"y_true",
":",
"the",
"target",
"bounding",
"box",
".",
"y_pred",
":",
"the",
"predicted",
"bounding",
"box",
".",
"Elements",
"of",
"a",
"bounding",
"box",
"should",
"be",
"organized",
"as",
":",
"[",
"y_min",
"x_min",
"y_max",
"x_max",
"]",
"."
] | def iou_box(y_true, y_pred, mode='giou', dtype=tf.float32):
"""
Inersection over Union (IoU) and generalized IoU losses for bounding boxes.
iou_box(y_true, y_pred, mode='giou', dtype=tf.float32)
----------
Rezatofighi, H., Tsoi, N., Gwak, J., Sadeghian, A., Reid, I. and Savarese, S., 2019.
Generalized intersection over union: A metric and a loss for bounding box regression.
In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 658-666).
----------
Input
y_true: the target bounding box.
y_pred: the predicted bounding box.
Elements of a bounding box should be organized as: [y_min, x_min, y_max, x_max].
mode: 'iou' for IoU coeff (i.e., Jaccard index);
'giou' for generalized IoU coeff.
dtype: the data type of input tensors.
Default is tf.float32.
"""
y_pred = tf.convert_to_tensor(y_pred)
y_pred = tf.cast(y_pred, dtype)
y_true = tf.cast(y_true, dtype)
y_pred = tf.squeeze(y_pred)
y_true = tf.squeeze(y_true)
return 1 - iou_box_coef(y_true, y_pred, mode=mode, dtype=dtype) | [
"def",
"iou_box",
"(",
"y_true",
",",
"y_pred",
",",
"mode",
"=",
"'giou'",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"y_pred",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"y_pred",
")",
"y_pred",
"=",
"tf",
".",
"cast",
"(",
"y_pred",
",",
"dtype",
")",
"y_true",
"=",
"tf",
".",
"cast",
"(",
"y_true",
",",
"dtype",
")",
"y_pred",
"=",
"tf",
".",
"squeeze",
"(",
"y_pred",
")",
"y_true",
"=",
"tf",
".",
"squeeze",
"(",
"y_true",
")",
"return",
"1",
"-",
"iou_box_coef",
"(",
"y_true",
",",
"y_pred",
",",
"mode",
"=",
"mode",
",",
"dtype",
"=",
"dtype",
")"
] | https://github.com/nci/drishti/blob/89cd8b740239c5b2c8222dffd4e27432fde170a1/bin/assets/scripts/unet3Plus/unet_collection/losses.py#L351-L385 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | build/android/generate_emma_html.py | python | _GetFilesWithExt | (root_dir, ext) | return files | Gets all files with a given extension.
Args:
root_dir: Directory in which to search for files.
ext: Extension to look for (including dot)
Returns:
A list of absolute paths to files that match. | Gets all files with a given extension. | [
"Gets",
"all",
"files",
"with",
"a",
"given",
"extension",
"."
] | def _GetFilesWithExt(root_dir, ext):
"""Gets all files with a given extension.
Args:
root_dir: Directory in which to search for files.
ext: Extension to look for (including dot)
Returns:
A list of absolute paths to files that match.
"""
files = []
for root, _, filenames in os.walk(root_dir):
basenames = fnmatch.filter(filenames, '*.' + ext)
files.extend([os.path.join(root, basename)
for basename in basenames])
return files | [
"def",
"_GetFilesWithExt",
"(",
"root_dir",
",",
"ext",
")",
":",
"files",
"=",
"[",
"]",
"for",
"root",
",",
"_",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"root_dir",
")",
":",
"basenames",
"=",
"fnmatch",
".",
"filter",
"(",
"filenames",
",",
"'*.'",
"+",
"ext",
")",
"files",
".",
"extend",
"(",
"[",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"for",
"basename",
"in",
"basenames",
"]",
")",
"return",
"files"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/build/android/generate_emma_html.py#L20-L36 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/math_ops.py | python | softplus | (features, name=None) | return gen_nn_ops.softplus(features, name) | Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
`softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
takes on positive values.
<img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
Example:
>>> import tensorflow as tf
>>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
array([0.6931472, 1.3132616], dtype=float32)
Args:
features: `Tensor`
name: Optional: name to associate with this operation.
Returns:
`Tensor` | Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`. | [
"Computes",
"elementwise",
"softplus",
":",
"softplus",
"(",
"x",
")",
"=",
"log",
"(",
"exp",
"(",
"x",
")",
"+",
"1",
")",
"."
] | def softplus(features, name=None):
"""Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
`softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
takes on positive values.
<img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
Example:
>>> import tensorflow as tf
>>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
array([0.6931472, 1.3132616], dtype=float32)
Args:
features: `Tensor`
name: Optional: name to associate with this operation.
Returns:
`Tensor`
"""
return gen_nn_ops.softplus(features, name) | [
"def",
"softplus",
"(",
"features",
",",
"name",
"=",
"None",
")",
":",
"return",
"gen_nn_ops",
".",
"softplus",
"(",
"features",
",",
"name",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/math_ops.py#L636-L656 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_gdi.py | python | PseudoDC.DrawBitmapPoint | (*args, **kwargs) | return _gdi_.PseudoDC_DrawBitmapPoint(*args, **kwargs) | DrawBitmapPoint(self, Bitmap bmp, Point pt, bool useMask=False)
Draw a bitmap on the device context at the specified point. If
*transparent* is true and the bitmap has a transparency mask, (or
alpha channel on the platforms that support it) then the bitmap will
be drawn transparently. | DrawBitmapPoint(self, Bitmap bmp, Point pt, bool useMask=False) | [
"DrawBitmapPoint",
"(",
"self",
"Bitmap",
"bmp",
"Point",
"pt",
"bool",
"useMask",
"=",
"False",
")"
] | def DrawBitmapPoint(*args, **kwargs):
"""
DrawBitmapPoint(self, Bitmap bmp, Point pt, bool useMask=False)
Draw a bitmap on the device context at the specified point. If
*transparent* is true and the bitmap has a transparency mask, (or
alpha channel on the platforms that support it) then the bitmap will
be drawn transparently.
"""
return _gdi_.PseudoDC_DrawBitmapPoint(*args, **kwargs) | [
"def",
"DrawBitmapPoint",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"PseudoDC_DrawBitmapPoint",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L8074-L8083 |
|
robotology/yarp | 3d6e3f258db7755a3c44dd1e62c303cc36c49a8f | extern/thrift/thrift/lib/py/src/transport/TZlibTransport.py | python | TZlibTransport.cstringio_refill | (self, partialread, reqlen) | return self.__rbuf | Implement the CReadableTransport interface for refill | Implement the CReadableTransport interface for refill | [
"Implement",
"the",
"CReadableTransport",
"interface",
"for",
"refill"
] | def cstringio_refill(self, partialread, reqlen):
"""Implement the CReadableTransport interface for refill"""
retstring = partialread
if reqlen < self.DEFAULT_BUFFSIZE:
retstring += self.read(self.DEFAULT_BUFFSIZE)
while len(retstring) < reqlen:
retstring += self.read(reqlen - len(retstring))
self.__rbuf = BufferIO(retstring)
return self.__rbuf | [
"def",
"cstringio_refill",
"(",
"self",
",",
"partialread",
",",
"reqlen",
")",
":",
"retstring",
"=",
"partialread",
"if",
"reqlen",
"<",
"self",
".",
"DEFAULT_BUFFSIZE",
":",
"retstring",
"+=",
"self",
".",
"read",
"(",
"self",
".",
"DEFAULT_BUFFSIZE",
")",
"while",
"len",
"(",
"retstring",
")",
"<",
"reqlen",
":",
"retstring",
"+=",
"self",
".",
"read",
"(",
"reqlen",
"-",
"len",
"(",
"retstring",
")",
")",
"self",
".",
"__rbuf",
"=",
"BufferIO",
"(",
"retstring",
")",
"return",
"self",
".",
"__rbuf"
] | https://github.com/robotology/yarp/blob/3d6e3f258db7755a3c44dd1e62c303cc36c49a8f/extern/thrift/thrift/lib/py/src/transport/TZlibTransport.py#L240-L248 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.