nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
list | function
stringlengths 34
151k
| function_tokens
list | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/gsutil/third_party/boto/boto/ec2/cloudwatch/__init__.py
|
python
|
CloudWatchConnection.disable_alarm_actions
|
(self, alarm_names)
|
return self.get_status('DisableAlarmActions', params)
|
Disables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
|
Disables actions for the specified alarms.
|
[
"Disables",
"actions",
"for",
"the",
"specified",
"alarms",
"."
] |
def disable_alarm_actions(self, alarm_names):
"""
Disables actions for the specified alarms.
:type alarms: list
:param alarms: List of alarm names.
"""
params = {}
self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
return self.get_status('DisableAlarmActions', params)
|
[
"def",
"disable_alarm_actions",
"(",
"self",
",",
"alarm_names",
")",
":",
"params",
"=",
"{",
"}",
"self",
".",
"build_list_params",
"(",
"params",
",",
"alarm_names",
",",
"'AlarmNames.member.%s'",
")",
"return",
"self",
".",
"get_status",
"(",
"'DisableAlarmActions'",
",",
"params",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/ec2/cloudwatch/__init__.py#L584-L593
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/shape_base.py
|
python
|
dsplit
|
(ary, indices_or_sections)
|
return split(ary, indices_or_sections, 2)
|
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
|
Split array into multiple sub-arrays along the 3rd axis (depth).
|
[
"Split",
"array",
"into",
"multiple",
"sub",
"-",
"arrays",
"along",
"the",
"3rd",
"axis",
"(",
"depth",
")",
"."
] |
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
if _nx.ndim(ary) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
|
[
"def",
"dsplit",
"(",
"ary",
",",
"indices_or_sections",
")",
":",
"if",
"_nx",
".",
"ndim",
"(",
"ary",
")",
"<",
"3",
":",
"raise",
"ValueError",
"(",
"'dsplit only works on arrays of 3 or more dimensions'",
")",
"return",
"split",
"(",
"ary",
",",
"indices_or_sections",
",",
"2",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/shape_base.py#L993-L1034
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py2/scipy/fftpack/helper.py
|
python
|
_init_nd_shape_and_axes
|
(x, shape, axes)
|
return shape, axes
|
Handle shape and axes arguments for n-dimensional transforms.
Returns the shape and axes in a standard form, taking into account negative
values and checking for various potential errors.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterpart.
Returns
-------
shape : array
The shape of the result. It is a 1D integer array.
axes : array
The shape of the result. It is a 1D integer array.
|
Handle shape and axes arguments for n-dimensional transforms.
|
[
"Handle",
"shape",
"and",
"axes",
"arguments",
"for",
"n",
"-",
"dimensional",
"transforms",
"."
] |
def _init_nd_shape_and_axes(x, shape, axes):
"""Handle shape and axes arguments for n-dimensional transforms.
Returns the shape and axes in a standard form, taking into account negative
values and checking for various potential errors.
Parameters
----------
x : array_like
The input array.
shape : int or array_like of ints or None
The shape of the result. If both `shape` and `axes` (see below) are
None, `shape` is ``x.shape``; if `shape` is None but `axes` is
not None, then `shape` is ``scipy.take(x.shape, axes, axis=0)``.
If `shape` is -1, the size of the corresponding dimension of `x` is
used.
axes : int or array_like of ints or None
Axes along which the calculation is computed.
The default is over all axes.
Negative indices are automatically converted to their positive
counterpart.
Returns
-------
shape : array
The shape of the result. It is a 1D integer array.
axes : array
The shape of the result. It is a 1D integer array.
"""
x = asarray(x)
noshape = shape is None
noaxes = axes is None
if noaxes:
axes = arange(x.ndim, dtype=intc)
else:
axes = atleast_1d(axes)
if axes.size == 0:
axes = axes.astype(intc)
if not axes.ndim == 1:
raise ValueError("when given, axes values must be a scalar or vector")
if not issubdtype(axes.dtype, integer):
raise ValueError("when given, axes values must be integers")
axes = where(axes < 0, axes + x.ndim, axes)
if axes.size != 0 and (axes.max() >= x.ndim or axes.min() < 0):
raise ValueError("axes exceeds dimensionality of input")
if axes.size != 0 and unique(axes).shape != axes.shape:
raise ValueError("all axes must be unique")
if not noshape:
shape = atleast_1d(shape)
elif isscalar(x):
shape = array([], dtype=intc)
elif noaxes:
shape = array(x.shape, dtype=intc)
else:
shape = take(x.shape, axes)
if shape.size == 0:
shape = shape.astype(intc)
if shape.ndim != 1:
raise ValueError("when given, shape values must be a scalar or vector")
if not issubdtype(shape.dtype, integer):
raise ValueError("when given, shape values must be integers")
if axes.shape != shape.shape:
raise ValueError("when given, axes and shape arguments"
" have to be of the same length")
shape = where(shape == -1, array(x.shape)[axes], shape)
if shape.size != 0 and (shape < 1).any():
raise ValueError(
"invalid number of data points ({0}) specified".format(shape))
return shape, axes
|
[
"def",
"_init_nd_shape_and_axes",
"(",
"x",
",",
"shape",
",",
"axes",
")",
":",
"x",
"=",
"asarray",
"(",
"x",
")",
"noshape",
"=",
"shape",
"is",
"None",
"noaxes",
"=",
"axes",
"is",
"None",
"if",
"noaxes",
":",
"axes",
"=",
"arange",
"(",
"x",
".",
"ndim",
",",
"dtype",
"=",
"intc",
")",
"else",
":",
"axes",
"=",
"atleast_1d",
"(",
"axes",
")",
"if",
"axes",
".",
"size",
"==",
"0",
":",
"axes",
"=",
"axes",
".",
"astype",
"(",
"intc",
")",
"if",
"not",
"axes",
".",
"ndim",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"when given, axes values must be a scalar or vector\"",
")",
"if",
"not",
"issubdtype",
"(",
"axes",
".",
"dtype",
",",
"integer",
")",
":",
"raise",
"ValueError",
"(",
"\"when given, axes values must be integers\"",
")",
"axes",
"=",
"where",
"(",
"axes",
"<",
"0",
",",
"axes",
"+",
"x",
".",
"ndim",
",",
"axes",
")",
"if",
"axes",
".",
"size",
"!=",
"0",
"and",
"(",
"axes",
".",
"max",
"(",
")",
">=",
"x",
".",
"ndim",
"or",
"axes",
".",
"min",
"(",
")",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"axes exceeds dimensionality of input\"",
")",
"if",
"axes",
".",
"size",
"!=",
"0",
"and",
"unique",
"(",
"axes",
")",
".",
"shape",
"!=",
"axes",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"all axes must be unique\"",
")",
"if",
"not",
"noshape",
":",
"shape",
"=",
"atleast_1d",
"(",
"shape",
")",
"elif",
"isscalar",
"(",
"x",
")",
":",
"shape",
"=",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"intc",
")",
"elif",
"noaxes",
":",
"shape",
"=",
"array",
"(",
"x",
".",
"shape",
",",
"dtype",
"=",
"intc",
")",
"else",
":",
"shape",
"=",
"take",
"(",
"x",
".",
"shape",
",",
"axes",
")",
"if",
"shape",
".",
"size",
"==",
"0",
":",
"shape",
"=",
"shape",
".",
"astype",
"(",
"intc",
")",
"if",
"shape",
".",
"ndim",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"when given, shape values must be a scalar or vector\"",
")",
"if",
"not",
"issubdtype",
"(",
"shape",
".",
"dtype",
",",
"integer",
")",
":",
"raise",
"ValueError",
"(",
"\"when given, shape values must be integers\"",
")",
"if",
"axes",
".",
"shape",
"!=",
"shape",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"\"when given, axes and shape arguments\"",
"\" have to be of the same length\"",
")",
"shape",
"=",
"where",
"(",
"shape",
"==",
"-",
"1",
",",
"array",
"(",
"x",
".",
"shape",
")",
"[",
"axes",
"]",
",",
"shape",
")",
"if",
"shape",
".",
"size",
"!=",
"0",
"and",
"(",
"shape",
"<",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"invalid number of data points ({0}) specified\"",
".",
"format",
"(",
"shape",
")",
")",
"return",
"shape",
",",
"axes"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/fftpack/helper.py#L157-L237
|
|
NVlabs/fermat
|
06e8c03ac59ab440cbb13897f90631ef1861e769
|
contrib/assimp-4.1.0/port/PyAssimp/scripts/transformations.py
|
python
|
euler_from_matrix
|
(matrix, axes='sxyz')
|
return ax, ay, az
|
Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print axes, "failed"
|
Return Euler angles from rotation matrix for specified axis sequence.
|
[
"Return",
"Euler",
"angles",
"from",
"rotation",
"matrix",
"for",
"specified",
"axis",
"sequence",
"."
] |
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print axes, "failed"
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
|
[
"def",
"euler_from_matrix",
"(",
"matrix",
",",
"axes",
"=",
"'sxyz'",
")",
":",
"try",
":",
"firstaxis",
",",
"parity",
",",
"repetition",
",",
"frame",
"=",
"_AXES2TUPLE",
"[",
"axes",
".",
"lower",
"(",
")",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"_",
"=",
"_TUPLE2AXES",
"[",
"axes",
"]",
"firstaxis",
",",
"parity",
",",
"repetition",
",",
"frame",
"=",
"axes",
"i",
"=",
"firstaxis",
"j",
"=",
"_NEXT_AXIS",
"[",
"i",
"+",
"parity",
"]",
"k",
"=",
"_NEXT_AXIS",
"[",
"i",
"-",
"parity",
"+",
"1",
"]",
"M",
"=",
"numpy",
".",
"array",
"(",
"matrix",
",",
"dtype",
"=",
"numpy",
".",
"float64",
",",
"copy",
"=",
"False",
")",
"[",
":",
"3",
",",
":",
"3",
"]",
"if",
"repetition",
":",
"sy",
"=",
"math",
".",
"sqrt",
"(",
"M",
"[",
"i",
",",
"j",
"]",
"*",
"M",
"[",
"i",
",",
"j",
"]",
"+",
"M",
"[",
"i",
",",
"k",
"]",
"*",
"M",
"[",
"i",
",",
"k",
"]",
")",
"if",
"sy",
">",
"_EPS",
":",
"ax",
"=",
"math",
".",
"atan2",
"(",
"M",
"[",
"i",
",",
"j",
"]",
",",
"M",
"[",
"i",
",",
"k",
"]",
")",
"ay",
"=",
"math",
".",
"atan2",
"(",
"sy",
",",
"M",
"[",
"i",
",",
"i",
"]",
")",
"az",
"=",
"math",
".",
"atan2",
"(",
"M",
"[",
"j",
",",
"i",
"]",
",",
"-",
"M",
"[",
"k",
",",
"i",
"]",
")",
"else",
":",
"ax",
"=",
"math",
".",
"atan2",
"(",
"-",
"M",
"[",
"j",
",",
"k",
"]",
",",
"M",
"[",
"j",
",",
"j",
"]",
")",
"ay",
"=",
"math",
".",
"atan2",
"(",
"sy",
",",
"M",
"[",
"i",
",",
"i",
"]",
")",
"az",
"=",
"0.0",
"else",
":",
"cy",
"=",
"math",
".",
"sqrt",
"(",
"M",
"[",
"i",
",",
"i",
"]",
"*",
"M",
"[",
"i",
",",
"i",
"]",
"+",
"M",
"[",
"j",
",",
"i",
"]",
"*",
"M",
"[",
"j",
",",
"i",
"]",
")",
"if",
"cy",
">",
"_EPS",
":",
"ax",
"=",
"math",
".",
"atan2",
"(",
"M",
"[",
"k",
",",
"j",
"]",
",",
"M",
"[",
"k",
",",
"k",
"]",
")",
"ay",
"=",
"math",
".",
"atan2",
"(",
"-",
"M",
"[",
"k",
",",
"i",
"]",
",",
"cy",
")",
"az",
"=",
"math",
".",
"atan2",
"(",
"M",
"[",
"j",
",",
"i",
"]",
",",
"M",
"[",
"i",
",",
"i",
"]",
")",
"else",
":",
"ax",
"=",
"math",
".",
"atan2",
"(",
"-",
"M",
"[",
"j",
",",
"k",
"]",
",",
"M",
"[",
"j",
",",
"j",
"]",
")",
"ay",
"=",
"math",
".",
"atan2",
"(",
"-",
"M",
"[",
"k",
",",
"i",
"]",
",",
"cy",
")",
"az",
"=",
"0.0",
"if",
"parity",
":",
"ax",
",",
"ay",
",",
"az",
"=",
"-",
"ax",
",",
"-",
"ay",
",",
"-",
"az",
"if",
"frame",
":",
"ax",
",",
"az",
"=",
"az",
",",
"ax",
"return",
"ax",
",",
"ay",
",",
"az"
] |
https://github.com/NVlabs/fermat/blob/06e8c03ac59ab440cbb13897f90631ef1861e769/contrib/assimp-4.1.0/port/PyAssimp/scripts/transformations.py#L1031-L1086
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/wsgiref/simple_server.py
|
python
|
WSGIServer.server_bind
|
(self)
|
Override server_bind to store the server name.
|
Override server_bind to store the server name.
|
[
"Override",
"server_bind",
"to",
"store",
"the",
"server",
"name",
"."
] |
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
|
[
"def",
"server_bind",
"(",
"self",
")",
":",
"HTTPServer",
".",
"server_bind",
"(",
"self",
")",
"self",
".",
"setup_environ",
"(",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/wsgiref/simple_server.py#L48-L51
|
||
CRYTEK/CRYENGINE
|
232227c59a220cbbd311576f0fbeba7bb53b2a8c
|
Code/Tools/waf-1.7.13/waflib/Tools/glib2.py
|
python
|
add_enums_from_template
|
(self, source='', target='', template='', comments='')
|
Add a file to the list of enum files to process. Store them in the attribute *enums_list*.
:param source: enum file to process
:type source: string
:param target: target file
:type target: string
:param template: template file
:type template: string
:param comments: comments
:type comments: string
|
Add a file to the list of enum files to process. Store them in the attribute *enums_list*.
|
[
"Add",
"a",
"file",
"to",
"the",
"list",
"of",
"enum",
"files",
"to",
"process",
".",
"Store",
"them",
"in",
"the",
"attribute",
"*",
"enums_list",
"*",
"."
] |
def add_enums_from_template(self, source='', target='', template='', comments=''):
"""
Add a file to the list of enum files to process. Store them in the attribute *enums_list*.
:param source: enum file to process
:type source: string
:param target: target file
:type target: string
:param template: template file
:type template: string
:param comments: comments
:type comments: string
"""
if not hasattr(self, 'enums_list'):
self.enums_list = []
self.meths.append('process_enums')
self.enums_list.append({'source': source,
'target': target,
'template': template,
'file-head': '',
'file-prod': '',
'file-tail': '',
'enum-prod': '',
'value-head': '',
'value-prod': '',
'value-tail': '',
'comments': comments})
|
[
"def",
"add_enums_from_template",
"(",
"self",
",",
"source",
"=",
"''",
",",
"target",
"=",
"''",
",",
"template",
"=",
"''",
",",
"comments",
"=",
"''",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'enums_list'",
")",
":",
"self",
".",
"enums_list",
"=",
"[",
"]",
"self",
".",
"meths",
".",
"append",
"(",
"'process_enums'",
")",
"self",
".",
"enums_list",
".",
"append",
"(",
"{",
"'source'",
":",
"source",
",",
"'target'",
":",
"target",
",",
"'template'",
":",
"template",
",",
"'file-head'",
":",
"''",
",",
"'file-prod'",
":",
"''",
",",
"'file-tail'",
":",
"''",
",",
"'enum-prod'",
":",
"''",
",",
"'value-head'",
":",
"''",
",",
"'value-prod'",
":",
"''",
",",
"'value-tail'",
":",
"''",
",",
"'comments'",
":",
"comments",
"}",
")"
] |
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Tools/glib2.py#L90-L116
|
||
jackaudio/jack2
|
21b293dbc37d42446141a08922cdec0d2550c6a0
|
waflib/Build.py
|
python
|
BuildContext.load_envs
|
(self)
|
The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method
creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those
files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`.
|
The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method
creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those
files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`.
|
[
"The",
"configuration",
"command",
"creates",
"files",
"of",
"the",
"form",
"build",
"/",
"c4che",
"/",
"NAMEcache",
".",
"py",
".",
"This",
"method",
"creates",
"a",
":",
"py",
":",
"class",
":",
"waflib",
".",
"ConfigSet",
".",
"ConfigSet",
"instance",
"for",
"each",
"NAME",
"by",
"reading",
"those",
"files",
"and",
"stores",
"them",
"in",
":",
"py",
":",
"attr",
":",
"waflib",
".",
"Build",
".",
"BuildContext",
".",
"allenvs",
"."
] |
def load_envs(self):
"""
The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method
creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those
files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`.
"""
node = self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/')
env = ConfigSet.ConfigSet(x.abspath())
self.all_envs[name] = env
for f in env[CFG_FILES]:
newnode = self.root.find_resource(f)
if not newnode or not newnode.exists():
raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f)
|
[
"def",
"load_envs",
"(",
"self",
")",
":",
"node",
"=",
"self",
".",
"root",
".",
"find_node",
"(",
"self",
".",
"cache_dir",
")",
"if",
"not",
"node",
":",
"raise",
"Errors",
".",
"WafError",
"(",
"'The project was not configured: run \"waf configure\" first!'",
")",
"lst",
"=",
"node",
".",
"ant_glob",
"(",
"'**/*%s'",
"%",
"CACHE_SUFFIX",
",",
"quiet",
"=",
"True",
")",
"if",
"not",
"lst",
":",
"raise",
"Errors",
".",
"WafError",
"(",
"'The cache directory is empty: reconfigure the project'",
")",
"for",
"x",
"in",
"lst",
":",
"name",
"=",
"x",
".",
"path_from",
"(",
"node",
")",
".",
"replace",
"(",
"CACHE_SUFFIX",
",",
"''",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"env",
"=",
"ConfigSet",
".",
"ConfigSet",
"(",
"x",
".",
"abspath",
"(",
")",
")",
"self",
".",
"all_envs",
"[",
"name",
"]",
"=",
"env",
"for",
"f",
"in",
"env",
"[",
"CFG_FILES",
"]",
":",
"newnode",
"=",
"self",
".",
"root",
".",
"find_resource",
"(",
"f",
")",
"if",
"not",
"newnode",
"or",
"not",
"newnode",
".",
"exists",
"(",
")",
":",
"raise",
"Errors",
".",
"WafError",
"(",
"'Missing configuration file %r, reconfigure the project!'",
"%",
"f",
")"
] |
https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/waflib/Build.py#L186-L207
|
||
ricardoquesada/Spidermonkey
|
4a75ea2543408bd1b2c515aa95901523eeef7858
|
python/mozbuild/mozpack/files.py
|
python
|
ManifestFile.add
|
(self, entry)
|
Add the given entry to the manifest. Entries are rebased at open() time
instead of add() time so that they can be more easily remove()d.
|
Add the given entry to the manifest. Entries are rebased at open() time
instead of add() time so that they can be more easily remove()d.
|
[
"Add",
"the",
"given",
"entry",
"to",
"the",
"manifest",
".",
"Entries",
"are",
"rebased",
"at",
"open",
"()",
"time",
"instead",
"of",
"add",
"()",
"time",
"so",
"that",
"they",
"can",
"be",
"more",
"easily",
"remove",
"()",
"d",
"."
] |
def add(self, entry):
'''
Add the given entry to the manifest. Entries are rebased at open() time
instead of add() time so that they can be more easily remove()d.
'''
assert isinstance(entry, ManifestEntry)
self._entries.append(entry)
|
[
"def",
"add",
"(",
"self",
",",
"entry",
")",
":",
"assert",
"isinstance",
"(",
"entry",
",",
"ManifestEntry",
")",
"self",
".",
"_entries",
".",
"append",
"(",
"entry",
")"
] |
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/mozbuild/mozpack/files.py#L547-L553
|
||
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
scripts/SANS/ISISCommandInterface.py
|
python
|
DisplayMask
|
(mask_worksp=None)
|
return mask_worksp
|
Displays masking by applying it to a workspace and displaying
it in instrument view. If no workspace is passed a copy of the
sample workspace is used, unless no sample was loaded and then
an empty instrument will be shown
@param mask_worksp: optional this named workspace will be modified and should be from the currently selected instrument
@return the name of the workspace that was displayed
|
Displays masking by applying it to a workspace and displaying
it in instrument view. If no workspace is passed a copy of the
sample workspace is used, unless no sample was loaded and then
an empty instrument will be shown
|
[
"Displays",
"masking",
"by",
"applying",
"it",
"to",
"a",
"workspace",
"and",
"displaying",
"it",
"in",
"instrument",
"view",
".",
"If",
"no",
"workspace",
"is",
"passed",
"a",
"copy",
"of",
"the",
"sample",
"workspace",
"is",
"used",
"unless",
"no",
"sample",
"was",
"loaded",
"and",
"then",
"an",
"empty",
"instrument",
"will",
"be",
"shown"
] |
def DisplayMask(mask_worksp=None):
"""
Displays masking by applying it to a workspace and displaying
it in instrument view. If no workspace is passed a copy of the
sample workspace is used, unless no sample was loaded and then
an empty instrument will be shown
@param mask_worksp: optional this named workspace will be modified and should be from the currently selected instrument
@return the name of the workspace that was displayed
"""
if not mask_worksp:
mask_worksp = '__CurrentMask'
samp = LAST_SAMPLE
if samp:
CloneWorkspace(InputWorkspace=samp, OutputWorkspace=mask_worksp)
if su.isEventWorkspace(samp):
assert samp + "_monitors" in mtd
CloneWorkspace(InputWorkspace=samp + "_monitors",
OutputWorkspace=mask_worksp + "_monitors")
su.fromEvent2Histogram(mask_worksp, mtd[mask_worksp + "_monitors"])
else:
msg = 'Cannot display the mask without a sample workspace'
_printMessage(msg, log=True, no_console=False)
return
ReductionSingleton().mask.display(mask_worksp, ReductionSingleton())
return mask_worksp
|
[
"def",
"DisplayMask",
"(",
"mask_worksp",
"=",
"None",
")",
":",
"if",
"not",
"mask_worksp",
":",
"mask_worksp",
"=",
"'__CurrentMask'",
"samp",
"=",
"LAST_SAMPLE",
"if",
"samp",
":",
"CloneWorkspace",
"(",
"InputWorkspace",
"=",
"samp",
",",
"OutputWorkspace",
"=",
"mask_worksp",
")",
"if",
"su",
".",
"isEventWorkspace",
"(",
"samp",
")",
":",
"assert",
"samp",
"+",
"\"_monitors\"",
"in",
"mtd",
"CloneWorkspace",
"(",
"InputWorkspace",
"=",
"samp",
"+",
"\"_monitors\"",
",",
"OutputWorkspace",
"=",
"mask_worksp",
"+",
"\"_monitors\"",
")",
"su",
".",
"fromEvent2Histogram",
"(",
"mask_worksp",
",",
"mtd",
"[",
"mask_worksp",
"+",
"\"_monitors\"",
"]",
")",
"else",
":",
"msg",
"=",
"'Cannot display the mask without a sample workspace'",
"_printMessage",
"(",
"msg",
",",
"log",
"=",
"True",
",",
"no_console",
"=",
"False",
")",
"return",
"ReductionSingleton",
"(",
")",
".",
"mask",
".",
"display",
"(",
"mask_worksp",
",",
"ReductionSingleton",
"(",
")",
")",
"return",
"mask_worksp"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/ISISCommandInterface.py#L1021-L1048
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/scipy/stats/_distn_infrastructure.py
|
python
|
rv_generic.entropy
|
(self, *args, **kwds)
|
return output
|
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
|
Differential entropy of the RV.
|
[
"Differential",
"entropy",
"of",
"the",
"RV",
"."
] |
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
|
[
"def",
"entropy",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"args",
",",
"loc",
",",
"scale",
"=",
"self",
".",
"_parse_args",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"# NB: for discrete distributions scale=1 by construction in _parse_args",
"args",
"=",
"tuple",
"(",
"map",
"(",
"asarray",
",",
"args",
")",
")",
"cond0",
"=",
"self",
".",
"_argcheck",
"(",
"*",
"args",
")",
"&",
"(",
"scale",
">",
"0",
")",
"&",
"(",
"loc",
"==",
"loc",
")",
"output",
"=",
"zeros",
"(",
"shape",
"(",
"cond0",
")",
",",
"'d'",
")",
"place",
"(",
"output",
",",
"(",
"1",
"-",
"cond0",
")",
",",
"self",
".",
"badvalue",
")",
"goodargs",
"=",
"argsreduce",
"(",
"cond0",
",",
"*",
"args",
")",
"place",
"(",
"output",
",",
"cond0",
",",
"self",
".",
"vecentropy",
"(",
"*",
"goodargs",
")",
"+",
"log",
"(",
"scale",
")",
")",
"return",
"output"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/stats/_distn_infrastructure.py#L1081-L1112
|
|
goldeneye-source/ges-code
|
2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d
|
thirdparty/protobuf-2.3.0/python/google/protobuf/service_reflection.py
|
python
|
GeneratedServiceStubType.__init__
|
(cls, name, bases, dictionary)
|
Creates a message service stub class.
Args:
name: Name of the class (ignored, here).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
|
Creates a message service stub class.
|
[
"Creates",
"a",
"message",
"service",
"stub",
"class",
"."
] |
def __init__(cls, name, bases, dictionary):
"""Creates a message service stub class.
Args:
name: Name of the class (ignored, here).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service stub is subclassed.
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
service_stub_builder = _ServiceStubBuilder(descriptor)
service_stub_builder.BuildServiceStub(cls)
|
[
"def",
"__init__",
"(",
"cls",
",",
"name",
",",
"bases",
",",
"dictionary",
")",
":",
"super",
"(",
"GeneratedServiceStubType",
",",
"cls",
")",
".",
"__init__",
"(",
"name",
",",
"bases",
",",
"dictionary",
")",
"# Don't do anything if this class doesn't have a descriptor. This happens",
"# when a service stub is subclassed.",
"if",
"GeneratedServiceStubType",
".",
"_DESCRIPTOR_KEY",
"not",
"in",
"dictionary",
":",
"return",
"descriptor",
"=",
"dictionary",
"[",
"GeneratedServiceStubType",
".",
"_DESCRIPTOR_KEY",
"]",
"service_stub_builder",
"=",
"_ServiceStubBuilder",
"(",
"descriptor",
")",
"service_stub_builder",
".",
"BuildServiceStub",
"(",
"cls",
")"
] |
https://github.com/goldeneye-source/ges-code/blob/2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d/thirdparty/protobuf-2.3.0/python/google/protobuf/service_reflection.py#L94-L111
|
||
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/compiler/pycodegen.py
|
python
|
compile
|
(source, filename, mode, flags=None, dont_inherit=None)
|
return gen.code
|
Replacement for builtin compile() function
|
Replacement for builtin compile() function
|
[
"Replacement",
"for",
"builtin",
"compile",
"()",
"function"
] |
def compile(source, filename, mode, flags=None, dont_inherit=None):
"""Replacement for builtin compile() function"""
if flags is not None or dont_inherit is not None:
raise RuntimeError, "not implemented yet"
if mode == "single":
gen = Interactive(source, filename)
elif mode == "exec":
gen = Module(source, filename)
elif mode == "eval":
gen = Expression(source, filename)
else:
raise ValueError("compile() 3rd arg must be 'exec' or "
"'eval' or 'single'")
gen.compile()
return gen.code
|
[
"def",
"compile",
"(",
"source",
",",
"filename",
",",
"mode",
",",
"flags",
"=",
"None",
",",
"dont_inherit",
"=",
"None",
")",
":",
"if",
"flags",
"is",
"not",
"None",
"or",
"dont_inherit",
"is",
"not",
"None",
":",
"raise",
"RuntimeError",
",",
"\"not implemented yet\"",
"if",
"mode",
"==",
"\"single\"",
":",
"gen",
"=",
"Interactive",
"(",
"source",
",",
"filename",
")",
"elif",
"mode",
"==",
"\"exec\"",
":",
"gen",
"=",
"Module",
"(",
"source",
",",
"filename",
")",
"elif",
"mode",
"==",
"\"eval\"",
":",
"gen",
"=",
"Expression",
"(",
"source",
",",
"filename",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"compile() 3rd arg must be 'exec' or \"",
"\"'eval' or 'single'\"",
")",
"gen",
".",
"compile",
"(",
")",
"return",
"gen",
".",
"code"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/compiler/pycodegen.py#L51-L66
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/python/ops/distributions/gamma.py
|
python
|
Gamma.rate
|
(self)
|
return self._rate
|
Rate parameter.
|
Rate parameter.
|
[
"Rate",
"parameter",
"."
] |
def rate(self):
"""Rate parameter."""
return self._rate
|
[
"def",
"rate",
"(",
"self",
")",
":",
"return",
"self",
".",
"_rate"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/distributions/gamma.py#L160-L162
|
|
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/python/debug/lib/debug_data.py
|
python
|
InconvertibleTensorProto.__init__
|
(self, tensor_proto, initialized=True)
|
Constructor.
Args:
tensor_proto: the `TensorProto` object that cannot be represented as a
`np.ndarray` object.
initialized: (`bool`) whether the Tensor is initialized.
|
Constructor.
|
[
"Constructor",
"."
] |
def __init__(self, tensor_proto, initialized=True):
"""Constructor.
Args:
tensor_proto: the `TensorProto` object that cannot be represented as a
`np.ndarray` object.
initialized: (`bool`) whether the Tensor is initialized.
"""
self._tensor_proto = tensor_proto
self._initialized = initialized
|
[
"def",
"__init__",
"(",
"self",
",",
"tensor_proto",
",",
"initialized",
"=",
"True",
")",
":",
"self",
".",
"_tensor_proto",
"=",
"tensor_proto",
"self",
".",
"_initialized",
"=",
"initialized"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/debug/lib/debug_data.py#L60-L69
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/extern/pygments/formatters/img.py
|
python
|
ImageFormatter._get_char_x
|
(self, charno)
|
return charno * self.fontw + self.image_pad + self.line_number_width
|
Get the X coordinate of a character position.
|
Get the X coordinate of a character position.
|
[
"Get",
"the",
"X",
"coordinate",
"of",
"a",
"character",
"position",
"."
] |
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
|
[
"def",
"_get_char_x",
"(",
"self",
",",
"charno",
")",
":",
"return",
"charno",
"*",
"self",
".",
"fontw",
"+",
"self",
".",
"image_pad",
"+",
"self",
".",
"line_number_width"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/pygments/formatters/img.py#L357-L361
|
|
trilinos/Trilinos
|
6168be6dd51e35e1cd681e9c4b24433e709df140
|
packages/kokkos-kernels/scripts/analysis/batched/pd.py
|
python
|
kkt_parse_oneliner
|
(ln)
|
return (v[0].strip(), int(v[1]), float(v[2]), float(v[3]), float(v[4]))
|
Parse one line of data -> (alg name, block size, time, avg flop/s, max flop/s).
|
Parse one line of data -> (alg name, block size, time, avg flop/s, max flop/s).
|
[
"Parse",
"one",
"line",
"of",
"data",
"-",
">",
"(",
"alg",
"name",
"block",
"size",
"time",
"avg",
"flop",
"/",
"s",
"max",
"flop",
"/",
"s",
")",
"."
] |
def kkt_parse_oneliner(ln):
'Parse one line of data -> (alg name, block size, time, avg flop/s, max flop/s).'
v = kkt_parse_oneliner_re(ln)
if len(v) == 0:
return ()
if len(v) != 5:
print 'Parse error; ln = ' + ln
return ()
return (v[0].strip(), int(v[1]), float(v[2]), float(v[3]), float(v[4]))
|
[
"def",
"kkt_parse_oneliner",
"(",
"ln",
")",
":",
"v",
"=",
"kkt_parse_oneliner_re",
"(",
"ln",
")",
"if",
"len",
"(",
"v",
")",
"==",
"0",
":",
"return",
"(",
")",
"if",
"len",
"(",
"v",
")",
"!=",
"5",
":",
"print",
"'Parse error; ln = '",
"+",
"ln",
"return",
"(",
")",
"return",
"(",
"v",
"[",
"0",
"]",
".",
"strip",
"(",
")",
",",
"int",
"(",
"v",
"[",
"1",
"]",
")",
",",
"float",
"(",
"v",
"[",
"2",
"]",
")",
",",
"float",
"(",
"v",
"[",
"3",
"]",
")",
",",
"float",
"(",
"v",
"[",
"4",
"]",
")",
")"
] |
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/kokkos-kernels/scripts/analysis/batched/pd.py#L158-L166
|
|
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/python/framework/ops.py
|
python
|
Graph.get_operations
|
(self)
|
Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
|
Return the list of operations in the graph.
|
[
"Return",
"the",
"list",
"of",
"operations",
"in",
"the",
"graph",
"."
] |
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
|
[
"def",
"get_operations",
"(",
"self",
")",
":",
"if",
"self",
".",
"_finalized",
":",
"return",
"list",
"(",
"self",
".",
"_nodes_by_id",
".",
"values",
"(",
")",
")",
"with",
"self",
".",
"_lock",
":",
"return",
"list",
"(",
"self",
".",
"_nodes_by_id",
".",
"values",
"(",
")",
")"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/framework/ops.py#L2799-L2815
|
||
miyosuda/TensorFlowAndroidDemo
|
35903e0221aa5f109ea2dbef27f20b52e317f42d
|
jni-build/jni/include/tensorflow/python/ops/parsing_ops.py
|
python
|
parse_example
|
(serialized, features, name=None, example_names=None)
|
return _parse_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
|
Parses `Example` protos into a `dict` of tensors.
Parses a number of serialized [`Example`]
(https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`.
`example_names` may contain descriptive names for the corresponding serialized
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as `serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`
and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a
`SparseTensor`, and each `FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
is the batch entry the value is from in `serialized`, and `index` is the
value's index in the list of values associated with that feature and example.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Examples:
For example, if one expects a `tf.float32` sparse feature `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"kw": VarLenFeature(tf.string),
"dank": VarLenFeature(tf.int64),
"gps": VarLenFeature(tf.float32),
}
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
example_names: ["input0", "input1"],
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
|
Parses `Example` protos into a `dict` of tensors.
|
[
"Parses",
"Example",
"protos",
"into",
"a",
"dict",
"of",
"tensors",
"."
] |
def parse_example(serialized, features, name=None, example_names=None):
# pylint: disable=line-too-long
"""Parses `Example` protos into a `dict` of tensors.
Parses a number of serialized [`Example`]
(https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`.
`example_names` may contain descriptive names for the corresponding serialized
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as `serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`
and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a
`SparseTensor`, and each `FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
is the batch entry the value is from in `serialized`, and `index` is the
value's index in the list of values associated with that feature and example.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Examples:
For example, if one expects a `tf.float32` sparse feature `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"kw": VarLenFeature(tf.string),
"dank": VarLenFeature(tf.int64),
"gps": VarLenFeature(tf.float32),
}
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
example_names: ["input0", "input1"],
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features, [VarLenFeature, FixedLenFeature])
return _parse_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
|
[
"def",
"parse_example",
"(",
"serialized",
",",
"features",
",",
"name",
"=",
"None",
",",
"example_names",
"=",
"None",
")",
":",
"# pylint: disable=line-too-long",
"if",
"not",
"features",
":",
"raise",
"ValueError",
"(",
"\"Missing: features was %s.\"",
"%",
"features",
")",
"(",
"sparse_keys",
",",
"sparse_types",
",",
"dense_keys",
",",
"dense_types",
",",
"dense_defaults",
",",
"dense_shapes",
")",
"=",
"_features_to_raw_params",
"(",
"features",
",",
"[",
"VarLenFeature",
",",
"FixedLenFeature",
"]",
")",
"return",
"_parse_example_raw",
"(",
"serialized",
",",
"example_names",
",",
"sparse_keys",
",",
"sparse_types",
",",
"dense_keys",
",",
"dense_types",
",",
"dense_defaults",
",",
"dense_shapes",
",",
"name",
")"
] |
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/parsing_ops.py#L152-L307
|
|
tensorflow/tensorflow
|
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
|
tensorflow/python/tpu/tensor_tracer_flags.py
|
python
|
TTParameters.is_enabled
|
(self)
|
Returns True if TensorTracer is enabled.
|
Returns True if TensorTracer is enabled.
|
[
"Returns",
"True",
"if",
"TensorTracer",
"is",
"enabled",
"."
] |
def is_enabled(self):
"""Returns True if TensorTracer is enabled."""
if self.is_flag_on(FLAG_NAME_ENABLE):
logging.debug('Tensor Tracer is enabled with flags %s.',
self._env.get(FLAGS_ENV_VAR))
return True
else:
return False
|
[
"def",
"is_enabled",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_flag_on",
"(",
"FLAG_NAME_ENABLE",
")",
":",
"logging",
".",
"debug",
"(",
"'Tensor Tracer is enabled with flags %s.'",
",",
"self",
".",
"_env",
".",
"get",
"(",
"FLAGS_ENV_VAR",
")",
")",
"return",
"True",
"else",
":",
"return",
"False"
] |
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/tpu/tensor_tracer_flags.py#L462-L470
|
||
albertz/openlierox
|
d316c14a8eb57848ef56e9bfa7b23a56f694a51b
|
tools/DedicatedServerVideo/gdata/sites/client.py
|
python
|
SitesClient.get_content_feed
|
(self, uri=None, auth_token=None, **kwargs)
|
return self.get_feed(uri, desired_class=gdata.sites.data.ContentFeed,
auth_token=auth_token, **kwargs)
|
Retrieves the content feed containing the current state of site.
Args:
uri: string (optional) A full URI to query the Content feed with.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
gdata.sites.data.ContentFeed
|
Retrieves the content feed containing the current state of site.
|
[
"Retrieves",
"the",
"content",
"feed",
"containing",
"the",
"current",
"state",
"of",
"site",
"."
] |
def get_content_feed(self, uri=None, auth_token=None, **kwargs):
"""Retrieves the content feed containing the current state of site.
Args:
uri: string (optional) A full URI to query the Content feed with.
auth_token: (optional) gdata.gauth.ClientLoginToken, AuthSubToken, or
OAuthToken which authorizes this client to edit the user's data.
kwargs: Other parameters to pass to self.get_feed().
Returns:
gdata.sites.data.ContentFeed
"""
if uri is None:
uri = self.make_content_feed_uri()
return self.get_feed(uri, desired_class=gdata.sites.data.ContentFeed,
auth_token=auth_token, **kwargs)
|
[
"def",
"get_content_feed",
"(",
"self",
",",
"uri",
"=",
"None",
",",
"auth_token",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"uri",
"is",
"None",
":",
"uri",
"=",
"self",
".",
"make_content_feed_uri",
"(",
")",
"return",
"self",
".",
"get_feed",
"(",
"uri",
",",
"desired_class",
"=",
"gdata",
".",
"sites",
".",
"data",
".",
"ContentFeed",
",",
"auth_token",
"=",
"auth_token",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/gdata/sites/client.py#L122-L137
|
|
SoarGroup/Soar
|
a1c5e249499137a27da60533c72969eef3b8ab6b
|
scons/scons-local-4.1.0/SCons/Node/FS.py
|
python
|
File.get_cachedir_csig
|
(self)
|
return self.cachedir_csig
|
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
|
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
|
[
"Fetch",
"a",
"Node",
"s",
"content",
"signature",
"for",
"purposes",
"of",
"computing",
"another",
"Node",
"s",
"cachesig",
"."
] |
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = MD5filesignature(cachefile, File.md5_chunksize)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig
|
[
"def",
"get_cachedir_csig",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"cachedir_csig",
"except",
"AttributeError",
":",
"pass",
"cachedir",
",",
"cachefile",
"=",
"self",
".",
"get_build_env",
"(",
")",
".",
"get_CacheDir",
"(",
")",
".",
"cachepath",
"(",
"self",
")",
"if",
"not",
"self",
".",
"exists",
"(",
")",
"and",
"cachefile",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"cachefile",
")",
":",
"self",
".",
"cachedir_csig",
"=",
"MD5filesignature",
"(",
"cachefile",
",",
"File",
".",
"md5_chunksize",
")",
"else",
":",
"self",
".",
"cachedir_csig",
"=",
"self",
".",
"get_csig",
"(",
")",
"return",
"self",
".",
"cachedir_csig"
] |
https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Node/FS.py#L3594-L3618
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/genericmessagedialog.py
|
python
|
GenericMessageDialog.GetDefaultNoLabel
|
(self)
|
return _("No")
|
Returns the default label for the ``No`` button.
:note: this method may be overridden to provide different defaults for the
default button labels.
.. versionadded:: 0.9.3
|
Returns the default label for the ``No`` button.
|
[
"Returns",
"the",
"default",
"label",
"for",
"the",
"No",
"button",
"."
] |
def GetDefaultNoLabel(self):
"""
Returns the default label for the ``No`` button.
:note: this method may be overridden to provide different defaults for the
default button labels.
.. versionadded:: 0.9.3
"""
return _("No")
|
[
"def",
"GetDefaultNoLabel",
"(",
"self",
")",
":",
"return",
"_",
"(",
"\"No\"",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/genericmessagedialog.py#L1023-L1033
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py
|
python
|
ParserElement.setDebugActions
|
( self, startAction, successAction, exceptionAction )
|
return self
|
Enable display of debugging messages while doing pattern matching.
|
Enable display of debugging messages while doing pattern matching.
|
[
"Enable",
"display",
"of",
"debugging",
"messages",
"while",
"doing",
"pattern",
"matching",
"."
] |
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
|
[
"def",
"setDebugActions",
"(",
"self",
",",
"startAction",
",",
"successAction",
",",
"exceptionAction",
")",
":",
"self",
".",
"debugActions",
"=",
"(",
"startAction",
"or",
"_defaultStartDebugAction",
",",
"successAction",
"or",
"_defaultSuccessDebugAction",
",",
"exceptionAction",
"or",
"_defaultExceptionDebugAction",
")",
"self",
".",
"debug",
"=",
"True",
"return",
"self"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/_vendor/pyparsing.py#L2102-L2110
|
|
mhammond/pywin32
|
44afd86ba8485194df93234639243252deeb40d5
|
Pythonwin/pywin/framework/interact.py
|
python
|
CloseInteractiveWindow
|
()
|
Close the interactive window, allowing it to be re-created on demand.
|
Close the interactive window, allowing it to be re-created on demand.
|
[
"Close",
"the",
"interactive",
"window",
"allowing",
"it",
"to",
"be",
"re",
"-",
"created",
"on",
"demand",
"."
] |
def CloseInteractiveWindow():
"""Close the interactive window, allowing it to be re-created on demand."""
global edit
if edit is not None and edit.currentView is not None:
if edit.currentView.GetParentFrame() == win32ui.GetMainFrame():
# It is docked, just hide the dock bar.
frame = win32ui.GetMainFrame()
cb = frame.GetControlBar(ID_DOCKED_INTERACTIVE_CONTROLBAR)
frame.ShowControlBar(cb, 0, 1)
else:
# It is a standard window - destroy the frame/view, allowing the object itself to remain.
edit.currentView.GetParentFrame().DestroyWindow()
|
[
"def",
"CloseInteractiveWindow",
"(",
")",
":",
"global",
"edit",
"if",
"edit",
"is",
"not",
"None",
"and",
"edit",
".",
"currentView",
"is",
"not",
"None",
":",
"if",
"edit",
".",
"currentView",
".",
"GetParentFrame",
"(",
")",
"==",
"win32ui",
".",
"GetMainFrame",
"(",
")",
":",
"# It is docked, just hide the dock bar.",
"frame",
"=",
"win32ui",
".",
"GetMainFrame",
"(",
")",
"cb",
"=",
"frame",
".",
"GetControlBar",
"(",
"ID_DOCKED_INTERACTIVE_CONTROLBAR",
")",
"frame",
".",
"ShowControlBar",
"(",
"cb",
",",
"0",
",",
"1",
")",
"else",
":",
"# It is a standard window - destroy the frame/view, allowing the object itself to remain.",
"edit",
".",
"currentView",
".",
"GetParentFrame",
"(",
")",
".",
"DestroyWindow",
"(",
")"
] |
https://github.com/mhammond/pywin32/blob/44afd86ba8485194df93234639243252deeb40d5/Pythonwin/pywin/framework/interact.py#L942-L953
|
||
wenwei202/caffe
|
f54a74abaf6951d8485cbdcfa1d74a4c37839466
|
python/caffe/io.py
|
python
|
Transformer.set_transpose
|
(self, in_, order)
|
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
|
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
|
[
"Set",
"the",
"input",
"channel",
"order",
"for",
"e",
".",
"g",
".",
"RGB",
"to",
"BGR",
"conversion",
"as",
"needed",
"for",
"the",
"reference",
"ImageNet",
"model",
"."
] |
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
|
[
"def",
"set_transpose",
"(",
"self",
",",
"in_",
",",
"order",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"if",
"len",
"(",
"order",
")",
"!=",
"len",
"(",
"self",
".",
"inputs",
"[",
"in_",
"]",
")",
"-",
"1",
":",
"raise",
"Exception",
"(",
"'Transpose order needs to have the same number of '",
"'dimensions as the input.'",
")",
"self",
".",
"transpose",
"[",
"in_",
"]",
"=",
"order"
] |
https://github.com/wenwei202/caffe/blob/f54a74abaf6951d8485cbdcfa1d74a4c37839466/python/caffe/io.py#L187-L201
|
||
OPAE/opae-sdk
|
221124343c8275243a249eb72d69e0ea2d568d1b
|
python/opae.admin/opae/admin/tools/fpgasupdate.py
|
python
|
canonicalize_bdf
|
(bdf)
|
return None
|
Verifies the given PCIe address.
bdf - a string representing the PCIe address. It must be of
the form bb:dd.f or ssss:bb:dd.f.
returns None if bdf does not have the proper form. Otherwise
returns the canonical form as a string.
|
Verifies the given PCIe address.
|
[
"Verifies",
"the",
"given",
"PCIe",
"address",
"."
] |
def canonicalize_bdf(bdf):
"""Verifies the given PCIe address.
bdf - a string representing the PCIe address. It must be of
the form bb:dd.f or ssss:bb:dd.f.
returns None if bdf does not have the proper form. Otherwise
returns the canonical form as a string.
"""
abbrev_pcie_addr_pattern = r'(?P<bus>[\da-f]{2}):' \
r'(?P<device>[\da-f]{2})\.' \
r'(?P<function>\d)'
pcie_addr_pattern = r'(?P<segment>[\da-f]{4}):' + abbrev_pcie_addr_pattern
abbrev_regex = re.compile(abbrev_pcie_addr_pattern, re.IGNORECASE)
match = abbrev_regex.match(bdf)
if match:
return '0000:' + bdf
regex = re.compile(pcie_addr_pattern, re.IGNORECASE)
match = regex.match(bdf)
if match:
return bdf
return None
|
[
"def",
"canonicalize_bdf",
"(",
"bdf",
")",
":",
"abbrev_pcie_addr_pattern",
"=",
"r'(?P<bus>[\\da-f]{2}):'",
"r'(?P<device>[\\da-f]{2})\\.'",
"r'(?P<function>\\d)'",
"pcie_addr_pattern",
"=",
"r'(?P<segment>[\\da-f]{4}):'",
"+",
"abbrev_pcie_addr_pattern",
"abbrev_regex",
"=",
"re",
".",
"compile",
"(",
"abbrev_pcie_addr_pattern",
",",
"re",
".",
"IGNORECASE",
")",
"match",
"=",
"abbrev_regex",
".",
"match",
"(",
"bdf",
")",
"if",
"match",
":",
"return",
"'0000:'",
"+",
"bdf",
"regex",
"=",
"re",
".",
"compile",
"(",
"pcie_addr_pattern",
",",
"re",
".",
"IGNORECASE",
")",
"match",
"=",
"regex",
".",
"match",
"(",
"bdf",
")",
"if",
"match",
":",
"return",
"bdf",
"return",
"None"
] |
https://github.com/OPAE/opae-sdk/blob/221124343c8275243a249eb72d69e0ea2d568d1b/python/opae.admin/opae/admin/tools/fpgasupdate.py#L356-L381
|
|
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
scripts/SANS/ISISCommandInterface.py
|
python
|
get_q_resolution_a2
|
()
|
return val
|
Get the A2 diameter
@returns the diameter for the second aperature in mm
|
Get the A2 diameter
|
[
"Get",
"the",
"A2",
"diameter"
] |
def get_q_resolution_a2():
'''
Get the A2 diameter
@returns the diameter for the second aperature in mm
'''
val = get_q_resolution_float(ReductionSingleton().to_Q.get_q_resolution_a2, "A2")
print(str(val))
return val
|
[
"def",
"get_q_resolution_a2",
"(",
")",
":",
"val",
"=",
"get_q_resolution_float",
"(",
"ReductionSingleton",
"(",
")",
".",
"to_Q",
".",
"get_q_resolution_a2",
",",
"\"A2\"",
")",
"print",
"(",
"str",
"(",
"val",
")",
")",
"return",
"val"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/ISISCommandInterface.py#L1625-L1632
|
|
sigmaai/self-driving-golf-cart
|
8d891600af3d851add27a10ae45cf3c2108bb87c
|
ros/src/ros_carla_bridge/carla_ackermann_control/src/carla_ackermann_control/carla_control_physics.py
|
python
|
get_slope_force
|
(vehicle_info, vehicle_status)
|
return slope_force
|
Calculate the force of a carla vehicle faces when driving on a slope.
:param vehicle_info: the vehicle info
:type vehicle_info: carla_ros_bridge.CarlaEgoVehicleInfo
:param vehicle_status: the ego vehicle status
:type vehicle_status: carla_ros_bridge.CarlaEgoVehicleStatus
:return: slope force [N, >0 uphill, <0 downhill]
:rtype: float64
|
Calculate the force of a carla vehicle faces when driving on a slope.
|
[
"Calculate",
"the",
"force",
"of",
"a",
"carla",
"vehicle",
"faces",
"when",
"driving",
"on",
"a",
"slope",
"."
] |
def get_slope_force(vehicle_info, vehicle_status):
"""
Calculate the force of a carla vehicle faces when driving on a slope.
:param vehicle_info: the vehicle info
:type vehicle_info: carla_ros_bridge.CarlaEgoVehicleInfo
:param vehicle_status: the ego vehicle status
:type vehicle_status: carla_ros_bridge.CarlaEgoVehicleStatus
:return: slope force [N, >0 uphill, <0 downhill]
:rtype: float64
"""
dummy_roll, pitch, dummy_yaw = euler_from_quaternion(
[vehicle_status.orientation.x, vehicle_status.orientation.y,
vehicle_status.orientation.z, vehicle_status.orientation.w])
slope_force = get_acceleration_of_gravity(
vehicle_info) * get_vehicle_mass(vehicle_info) * math.sin(-pitch)
return slope_force
|
[
"def",
"get_slope_force",
"(",
"vehicle_info",
",",
"vehicle_status",
")",
":",
"dummy_roll",
",",
"pitch",
",",
"dummy_yaw",
"=",
"euler_from_quaternion",
"(",
"[",
"vehicle_status",
".",
"orientation",
".",
"x",
",",
"vehicle_status",
".",
"orientation",
".",
"y",
",",
"vehicle_status",
".",
"orientation",
".",
"z",
",",
"vehicle_status",
".",
"orientation",
".",
"w",
"]",
")",
"slope_force",
"=",
"get_acceleration_of_gravity",
"(",
"vehicle_info",
")",
"*",
"get_vehicle_mass",
"(",
"vehicle_info",
")",
"*",
"math",
".",
"sin",
"(",
"-",
"pitch",
")",
"return",
"slope_force"
] |
https://github.com/sigmaai/self-driving-golf-cart/blob/8d891600af3d851add27a10ae45cf3c2108bb87c/ros/src/ros_carla_bridge/carla_ackermann_control/src/carla_ackermann_control/carla_control_physics.py#L174-L190
|
|
panda3d/panda3d
|
833ad89ebad58395d0af0b7ec08538e5e4308265
|
direct/src/dist/FreezeTool.py
|
python
|
Freezer.generateCode
|
(self, basename, compileToExe = False)
|
return target
|
After a call to done(), this freezes all of the
accumulated python code into either an executable program (if
compileToExe is true) or a dynamic library (if compileToExe is
false). The basename is the name of the file to write,
without the extension.
The return value is the newly-generated filename, including
the filename extension. Additional extension modules are
listed in self.extras.
|
After a call to done(), this freezes all of the
accumulated python code into either an executable program (if
compileToExe is true) or a dynamic library (if compileToExe is
false). The basename is the name of the file to write,
without the extension.
|
[
"After",
"a",
"call",
"to",
"done",
"()",
"this",
"freezes",
"all",
"of",
"the",
"accumulated",
"python",
"code",
"into",
"either",
"an",
"executable",
"program",
"(",
"if",
"compileToExe",
"is",
"true",
")",
"or",
"a",
"dynamic",
"library",
"(",
"if",
"compileToExe",
"is",
"false",
")",
".",
"The",
"basename",
"is",
"the",
"name",
"of",
"the",
"file",
"to",
"write",
"without",
"the",
"extension",
"."
] |
def generateCode(self, basename, compileToExe = False):
""" After a call to done(), this freezes all of the
accumulated python code into either an executable program (if
compileToExe is true) or a dynamic library (if compileToExe is
false). The basename is the name of the file to write,
without the extension.
The return value is the newly-generated filename, including
the filename extension. Additional extension modules are
listed in self.extras. """
if compileToExe:
# We must have a __main__ module to make an exe file.
if not self.__writingModule('__main__'):
message = "Can't generate an executable without a __main__ module."
raise Exception(message)
filename = basename + self.sourceExtension
dllexport = ''
dllimport = ''
if self.platform.startswith('win'):
dllexport = '__declspec(dllexport) '
dllimport = '__declspec(dllimport) '
if not self.cenv:
self.cenv = CompilationEnvironment(platform = self.platform)
if compileToExe:
code = self.frozenMainCode
if self.platform.startswith('win'):
code += self.frozenDllMainCode
initCode = self.mainInitCode % {
'frozenMainCode' : code,
'programName' : os.path.basename(basename),
'dllexport' : dllexport,
'dllimport' : dllimport,
}
if self.platform.startswith('win'):
target = basename + '.exe'
else:
target = basename
compileFunc = self.cenv.compileExe
else:
if self.platform.startswith('win'):
target = basename + self.cenv.dllext + '.pyd'
else:
target = basename + '.so'
initCode = dllInitCode % {
'moduleName' : os.path.basename(basename),
'dllexport' : dllexport,
'dllimport' : dllimport,
}
compileFunc = self.cenv.compileDll
self.writeCode(filename, initCode=initCode)
# Keep track of the files we should clean up after use.
cleanFiles = [filename, basename + self.objectExtension]
extraLink = []
if self.linkExtensionModules:
for mod, fn in self.extras:
if not fn:
continue
if sys.platform == 'win32':
# We can't link with a .pyd directly on Windows. Check
# if there is a corresponding .lib file in the Python libs
# directory.
libsdir = os.path.join(sys.exec_prefix, 'libs')
libfile = os.path.join(libsdir, mod + '.lib')
if os.path.isfile(libfile):
extraLink.append(mod + '.lib')
continue
# No, so we have to generate a .lib file. This is pretty
# easy given that we know the only symbol we need is a
# initmodule or PyInit_module function.
modname = mod.split('.')[-1]
libfile = modname + '.lib'
symbolName = 'PyInit_' + modname
os.system('lib /nologo /def /export:%s /name:%s.pyd /out:%s' % (symbolName, modname, libfile))
extraLink.append(libfile)
cleanFiles += [libfile, modname + '.exp']
else:
extraLink.append(fn)
try:
compileFunc(filename, basename, extraLink=extraLink)
finally:
if not self.keepTemporaryFiles:
for file in cleanFiles:
if os.path.exists(file):
os.unlink(file)
return target
|
[
"def",
"generateCode",
"(",
"self",
",",
"basename",
",",
"compileToExe",
"=",
"False",
")",
":",
"if",
"compileToExe",
":",
"# We must have a __main__ module to make an exe file.",
"if",
"not",
"self",
".",
"__writingModule",
"(",
"'__main__'",
")",
":",
"message",
"=",
"\"Can't generate an executable without a __main__ module.\"",
"raise",
"Exception",
"(",
"message",
")",
"filename",
"=",
"basename",
"+",
"self",
".",
"sourceExtension",
"dllexport",
"=",
"''",
"dllimport",
"=",
"''",
"if",
"self",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"dllexport",
"=",
"'__declspec(dllexport) '",
"dllimport",
"=",
"'__declspec(dllimport) '",
"if",
"not",
"self",
".",
"cenv",
":",
"self",
".",
"cenv",
"=",
"CompilationEnvironment",
"(",
"platform",
"=",
"self",
".",
"platform",
")",
"if",
"compileToExe",
":",
"code",
"=",
"self",
".",
"frozenMainCode",
"if",
"self",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"code",
"+=",
"self",
".",
"frozenDllMainCode",
"initCode",
"=",
"self",
".",
"mainInitCode",
"%",
"{",
"'frozenMainCode'",
":",
"code",
",",
"'programName'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"basename",
")",
",",
"'dllexport'",
":",
"dllexport",
",",
"'dllimport'",
":",
"dllimport",
",",
"}",
"if",
"self",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"target",
"=",
"basename",
"+",
"'.exe'",
"else",
":",
"target",
"=",
"basename",
"compileFunc",
"=",
"self",
".",
"cenv",
".",
"compileExe",
"else",
":",
"if",
"self",
".",
"platform",
".",
"startswith",
"(",
"'win'",
")",
":",
"target",
"=",
"basename",
"+",
"self",
".",
"cenv",
".",
"dllext",
"+",
"'.pyd'",
"else",
":",
"target",
"=",
"basename",
"+",
"'.so'",
"initCode",
"=",
"dllInitCode",
"%",
"{",
"'moduleName'",
":",
"os",
".",
"path",
".",
"basename",
"(",
"basename",
")",
",",
"'dllexport'",
":",
"dllexport",
",",
"'dllimport'",
":",
"dllimport",
",",
"}",
"compileFunc",
"=",
"self",
".",
"cenv",
".",
"compileDll",
"self",
".",
"writeCode",
"(",
"filename",
",",
"initCode",
"=",
"initCode",
")",
"# Keep track of the files we should clean up after use.",
"cleanFiles",
"=",
"[",
"filename",
",",
"basename",
"+",
"self",
".",
"objectExtension",
"]",
"extraLink",
"=",
"[",
"]",
"if",
"self",
".",
"linkExtensionModules",
":",
"for",
"mod",
",",
"fn",
"in",
"self",
".",
"extras",
":",
"if",
"not",
"fn",
":",
"continue",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"# We can't link with a .pyd directly on Windows. Check",
"# if there is a corresponding .lib file in the Python libs",
"# directory.",
"libsdir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"exec_prefix",
",",
"'libs'",
")",
"libfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"libsdir",
",",
"mod",
"+",
"'.lib'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"libfile",
")",
":",
"extraLink",
".",
"append",
"(",
"mod",
"+",
"'.lib'",
")",
"continue",
"# No, so we have to generate a .lib file. This is pretty",
"# easy given that we know the only symbol we need is a",
"# initmodule or PyInit_module function.",
"modname",
"=",
"mod",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"libfile",
"=",
"modname",
"+",
"'.lib'",
"symbolName",
"=",
"'PyInit_'",
"+",
"modname",
"os",
".",
"system",
"(",
"'lib /nologo /def /export:%s /name:%s.pyd /out:%s'",
"%",
"(",
"symbolName",
",",
"modname",
",",
"libfile",
")",
")",
"extraLink",
".",
"append",
"(",
"libfile",
")",
"cleanFiles",
"+=",
"[",
"libfile",
",",
"modname",
"+",
"'.exp'",
"]",
"else",
":",
"extraLink",
".",
"append",
"(",
"fn",
")",
"try",
":",
"compileFunc",
"(",
"filename",
",",
"basename",
",",
"extraLink",
"=",
"extraLink",
")",
"finally",
":",
"if",
"not",
"self",
".",
"keepTemporaryFiles",
":",
"for",
"file",
"in",
"cleanFiles",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file",
")",
":",
"os",
".",
"unlink",
"(",
"file",
")",
"return",
"target"
] |
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/dist/FreezeTool.py#L1657-L1755
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
tools/auto_bisect/bisect_perf_regression.py
|
python
|
BisectPerformanceMetrics.PrepareToBisectOnDepot
|
(
self, current_depot, start_revision, end_revision, previous_revision)
|
return depot_revision_list
|
Changes to the appropriate directory and gathers a list of revisions
to bisect between |start_revision| and |end_revision|.
Args:
current_depot: The depot we want to bisect.
start_revision: Start of the revision range.
end_revision: End of the revision range.
previous_revision: The last revision we synced to on |previous_depot|.
Returns:
A list containing the revisions between |start_revision| and
|end_revision| inclusive.
|
Changes to the appropriate directory and gathers a list of revisions
to bisect between |start_revision| and |end_revision|.
|
[
"Changes",
"to",
"the",
"appropriate",
"directory",
"and",
"gathers",
"a",
"list",
"of",
"revisions",
"to",
"bisect",
"between",
"|start_revision|",
"and",
"|end_revision|",
"."
] |
def PrepareToBisectOnDepot(
self, current_depot, start_revision, end_revision, previous_revision):
"""Changes to the appropriate directory and gathers a list of revisions
to bisect between |start_revision| and |end_revision|.
Args:
current_depot: The depot we want to bisect.
start_revision: Start of the revision range.
end_revision: End of the revision range.
previous_revision: The last revision we synced to on |previous_depot|.
Returns:
A list containing the revisions between |start_revision| and
|end_revision| inclusive.
"""
# Change into working directory of external library to run
# subsequent commands.
self.depot_registry.ChangeToDepotDir(current_depot)
# V8 (and possibly others) is merged in periodically. Bisecting
# this directory directly won't give much good info.
if 'custom_deps' in bisect_utils.DEPOT_DEPS_NAME[current_depot]:
config_path = os.path.join(self.src_cwd, '..')
if bisect_utils.RunGClientAndCreateConfig(
self.opts, bisect_utils.DEPOT_DEPS_NAME[current_depot]['custom_deps'],
cwd=config_path):
return []
if bisect_utils.RunGClient(
['sync', '--revision', previous_revision], cwd=self.src_cwd):
return []
if current_depot == 'v8_bleeding_edge':
self.depot_registry.ChangeToDepotDir('chromium')
shutil.move('v8', 'v8.bak')
shutil.move('v8_bleeding_edge', 'v8')
self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
self.depot_registry.SetDepotDir(
'v8_bleeding_edge', os.path.join(self.src_cwd, 'v8'))
self.depot_registry.SetDepotDir(
'v8', os.path.join(self.src_cwd, 'v8.bak'))
self.depot_registry.ChangeToDepotDir(current_depot)
depot_revision_list = self.GetRevisionList(current_depot,
end_revision,
start_revision)
self.depot_registry.ChangeToDepotDir('chromium')
return depot_revision_list
|
[
"def",
"PrepareToBisectOnDepot",
"(",
"self",
",",
"current_depot",
",",
"start_revision",
",",
"end_revision",
",",
"previous_revision",
")",
":",
"# Change into working directory of external library to run",
"# subsequent commands.",
"self",
".",
"depot_registry",
".",
"ChangeToDepotDir",
"(",
"current_depot",
")",
"# V8 (and possibly others) is merged in periodically. Bisecting",
"# this directory directly won't give much good info.",
"if",
"'custom_deps'",
"in",
"bisect_utils",
".",
"DEPOT_DEPS_NAME",
"[",
"current_depot",
"]",
":",
"config_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"src_cwd",
",",
"'..'",
")",
"if",
"bisect_utils",
".",
"RunGClientAndCreateConfig",
"(",
"self",
".",
"opts",
",",
"bisect_utils",
".",
"DEPOT_DEPS_NAME",
"[",
"current_depot",
"]",
"[",
"'custom_deps'",
"]",
",",
"cwd",
"=",
"config_path",
")",
":",
"return",
"[",
"]",
"if",
"bisect_utils",
".",
"RunGClient",
"(",
"[",
"'sync'",
",",
"'--revision'",
",",
"previous_revision",
"]",
",",
"cwd",
"=",
"self",
".",
"src_cwd",
")",
":",
"return",
"[",
"]",
"if",
"current_depot",
"==",
"'v8_bleeding_edge'",
":",
"self",
".",
"depot_registry",
".",
"ChangeToDepotDir",
"(",
"'chromium'",
")",
"shutil",
".",
"move",
"(",
"'v8'",
",",
"'v8.bak'",
")",
"shutil",
".",
"move",
"(",
"'v8_bleeding_edge'",
",",
"'v8'",
")",
"self",
".",
"cleanup_commands",
".",
"append",
"(",
"[",
"'mv'",
",",
"'v8'",
",",
"'v8_bleeding_edge'",
"]",
")",
"self",
".",
"cleanup_commands",
".",
"append",
"(",
"[",
"'mv'",
",",
"'v8.bak'",
",",
"'v8'",
"]",
")",
"self",
".",
"depot_registry",
".",
"SetDepotDir",
"(",
"'v8_bleeding_edge'",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"src_cwd",
",",
"'v8'",
")",
")",
"self",
".",
"depot_registry",
".",
"SetDepotDir",
"(",
"'v8'",
",",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"src_cwd",
",",
"'v8.bak'",
")",
")",
"self",
".",
"depot_registry",
".",
"ChangeToDepotDir",
"(",
"current_depot",
")",
"depot_revision_list",
"=",
"self",
".",
"GetRevisionList",
"(",
"current_depot",
",",
"end_revision",
",",
"start_revision",
")",
"self",
".",
"depot_registry",
".",
"ChangeToDepotDir",
"(",
"'chromium'",
")",
"return",
"depot_revision_list"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/auto_bisect/bisect_perf_regression.py#L1824-L1877
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/zoombar.py
|
python
|
ZoomBarImage.SetLabel
|
(self, label)
|
Sets the button label.
:param `label`: a string specifying the button label. May be an empty string
for no label.
|
Sets the button label.
|
[
"Sets",
"the",
"button",
"label",
"."
] |
def SetLabel(self, label):
"""
Sets the button label.
:param `label`: a string specifying the button label. May be an empty string
for no label.
"""
self._label = label
|
[
"def",
"SetLabel",
"(",
"self",
",",
"label",
")",
":",
"self",
".",
"_label",
"=",
"label"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/zoombar.py#L497-L505
|
||
Atarity/Lightpack
|
4dee73a443cba4c4073291febe450e6c1941f3af
|
Software/apiexamples/liOSC/OSC.py
|
python
|
OSCMessage.__getitem__
|
(self, i)
|
return self.values()[i]
|
Returns the indicated argument (or slice)
|
Returns the indicated argument (or slice)
|
[
"Returns",
"the",
"indicated",
"argument",
"(",
"or",
"slice",
")"
] |
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
|
[
"def",
"__getitem__",
"(",
"self",
",",
"i",
")",
":",
"return",
"self",
".",
"values",
"(",
")",
"[",
"i",
"]"
] |
https://github.com/Atarity/Lightpack/blob/4dee73a443cba4c4073291febe450e6c1941f3af/Software/apiexamples/liOSC/OSC.py#L362-L365
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_core.py
|
python
|
InputStream.seek
|
(*args, **kwargs)
|
return _core_.InputStream_seek(*args, **kwargs)
|
seek(self, int offset, int whence=0)
|
seek(self, int offset, int whence=0)
|
[
"seek",
"(",
"self",
"int",
"offset",
"int",
"whence",
"=",
"0",
")"
] |
def seek(*args, **kwargs):
"""seek(self, int offset, int whence=0)"""
return _core_.InputStream_seek(*args, **kwargs)
|
[
"def",
"seek",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"InputStream_seek",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L2182-L2184
|
|
ChromiumWebApps/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
third_party/closure_linter/closure_linter/closurizednamespacesinfo.py
|
python
|
ClosurizedNamespacesInfo.Reset
|
(self)
|
Resets the internal state to prepare for processing a new file.
|
Resets the internal state to prepare for processing a new file.
|
[
"Resets",
"the",
"internal",
"state",
"to",
"prepare",
"for",
"processing",
"a",
"new",
"file",
"."
] |
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file and the second is the identifier itself.
self._created_namespaces = []
# A list of tuples where the first element is the namespace of an identifier
# used in the file and the second is the identifier itself.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
|
[
"def",
"Reset",
"(",
"self",
")",
":",
"# A list of goog.provide tokens in the order they appeared in the file.",
"self",
".",
"_provide_tokens",
"=",
"[",
"]",
"# A list of goog.require tokens in the order they appeared in the file.",
"self",
".",
"_require_tokens",
"=",
"[",
"]",
"# Namespaces that are already goog.provided.",
"self",
".",
"_provided_namespaces",
"=",
"[",
"]",
"# Namespaces that are already goog.required.",
"self",
".",
"_required_namespaces",
"=",
"[",
"]",
"# Note that created_namespaces and used_namespaces contain both namespaces",
"# and identifiers because there are many existing cases where a method or",
"# constant is provided directly instead of its namespace. Ideally, these",
"# two lists would only have to contain namespaces.",
"# A list of tuples where the first element is the namespace of an identifier",
"# created in the file and the second is the identifier itself.",
"self",
".",
"_created_namespaces",
"=",
"[",
"]",
"# A list of tuples where the first element is the namespace of an identifier",
"# used in the file and the second is the identifier itself.",
"self",
".",
"_used_namespaces",
"=",
"[",
"]",
"# A list of seemingly-unnecessary namespaces that are goog.required() and",
"# annotated with @suppress {extraRequire}.",
"self",
".",
"_suppressed_requires",
"=",
"[",
"]",
"# A list of goog.provide tokens which are duplicates.",
"self",
".",
"_duplicate_provide_tokens",
"=",
"[",
"]",
"# A list of goog.require tokens which are duplicates.",
"self",
".",
"_duplicate_require_tokens",
"=",
"[",
"]",
"# Whether this file is in a goog.scope. Someday, we may add support",
"# for checking scopified namespaces, but for now let's just fail",
"# in a more reasonable way.",
"self",
".",
"_scopified_file",
"=",
"False"
] |
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/closure_linter/closure_linter/closurizednamespacesinfo.py#L61-L102
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/pandas/py3/pandas/io/formats/style.py
|
python
|
Styler.use
|
(self, styles: list[tuple[Callable, tuple, dict]])
|
return self
|
Set the styles on the current ``Styler``.
Possibly uses styles from ``Styler.export``.
Parameters
----------
styles : list
List of style functions.
Returns
-------
self : Styler
See Also
--------
Styler.export : Export the styles to applied to the current ``Styler``.
|
Set the styles on the current ``Styler``.
|
[
"Set",
"the",
"styles",
"on",
"the",
"current",
"Styler",
"."
] |
def use(self, styles: list[tuple[Callable, tuple, dict]]) -> Styler:
"""
Set the styles on the current ``Styler``.
Possibly uses styles from ``Styler.export``.
Parameters
----------
styles : list
List of style functions.
Returns
-------
self : Styler
See Also
--------
Styler.export : Export the styles to applied to the current ``Styler``.
"""
self._todo.extend(styles)
return self
|
[
"def",
"use",
"(",
"self",
",",
"styles",
":",
"list",
"[",
"tuple",
"[",
"Callable",
",",
"tuple",
",",
"dict",
"]",
"]",
")",
"->",
"Styler",
":",
"self",
".",
"_todo",
".",
"extend",
"(",
"styles",
")",
"return",
"self"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/formats/style.py#L1379-L1399
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/saver.py
|
python
|
BaseSaverBuilder._AddShardedRestoreOps
|
(self, filename_tensor, per_device,
restore_sequentially, reshape)
|
return control_flow_ops.group(*sharded_restores, name="restore_all")
|
Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as returned by
_GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of the
corresponding variable.
Returns:
An Operation that restores the variables.
|
Add Ops to restore variables from multiple devices.
|
[
"Add",
"Ops",
"to",
"restore",
"variables",
"from",
"multiple",
"devices",
"."
] |
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as returned by
_GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of the
corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(
self._AddRestoreOps(
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
|
[
"def",
"_AddShardedRestoreOps",
"(",
"self",
",",
"filename_tensor",
",",
"per_device",
",",
"restore_sequentially",
",",
"reshape",
")",
":",
"sharded_restores",
"=",
"[",
"]",
"for",
"shard",
",",
"(",
"device",
",",
"saveables",
")",
"in",
"enumerate",
"(",
"per_device",
")",
":",
"with",
"ops",
".",
"device",
"(",
"device",
")",
":",
"sharded_restores",
".",
"append",
"(",
"self",
".",
"_AddRestoreOps",
"(",
"filename_tensor",
",",
"saveables",
",",
"restore_sequentially",
",",
"reshape",
",",
"preferred_shard",
"=",
"shard",
",",
"name",
"=",
"\"restore_shard\"",
")",
")",
"return",
"control_flow_ops",
".",
"group",
"(",
"*",
"sharded_restores",
",",
"name",
"=",
"\"restore_all\"",
")"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/saver.py#L355-L382
|
|
weolar/miniblink49
|
1c4678db0594a4abde23d3ebbcc7cd13c3170777
|
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py
|
python
|
MessageSender.send
|
(self, message)
|
Send a message, blocking.
|
Send a message, blocking.
|
[
"Send",
"a",
"message",
"blocking",
"."
] |
def send(self, message):
"""Send a message, blocking."""
condition = threading.Condition()
condition.acquire()
self._queue.put((message, condition))
condition.wait()
|
[
"def",
"send",
"(",
"self",
",",
"message",
")",
":",
"condition",
"=",
"threading",
".",
"Condition",
"(",
")",
"condition",
".",
"acquire",
"(",
")",
"self",
".",
"_queue",
".",
"put",
"(",
"(",
"message",
",",
"condition",
")",
")",
"condition",
".",
"wait",
"(",
")"
] |
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/msgutil.py#L205-L211
|
||
alexgkendall/caffe-segnet
|
344c113bf1832886f1cbe9f33ffe28a3beeaf412
|
scripts/cpp_lint.py
|
python
|
CheckStyle
|
(filename, clean_lines, linenum, file_extension, nesting_state,
error)
|
Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
|
Checks rules from the 'C++ style rules' section of cppguide.html.
|
[
"Checks",
"rules",
"from",
"the",
"C",
"++",
"style",
"rules",
"section",
"of",
"cppguide",
".",
"html",
"."
] |
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
|
[
"def",
"CheckStyle",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"file_extension",
",",
"nesting_state",
",",
"error",
")",
":",
"# Don't use \"elided\" lines here, otherwise we can't check commented lines.",
"# Don't want to use \"raw\" either, because we don't want to check inside C++11",
"# raw strings,",
"raw_lines",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"raw_lines",
"[",
"linenum",
"]",
"if",
"line",
".",
"find",
"(",
"'\\t'",
")",
"!=",
"-",
"1",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/tab'",
",",
"1",
",",
"'Tab found; better to use spaces'",
")",
"# One or three blank spaces at the beginning of the line is weird; it's",
"# hard to reconcile that with 2-space indents.",
"# NOTE: here are the conditions rob pike used for his tests. Mine aren't",
"# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces",
"# if(RLENGTH > 20) complain = 0;",
"# if(match($0, \" +(error|private|public|protected):\")) complain = 0;",
"# if(match(prev, \"&& *$\")) complain = 0;",
"# if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;",
"# if(match(prev, \"[\\\",=><] *$\")) complain = 0;",
"# if(match($0, \" <<\")) complain = 0;",
"# if(match(prev, \" +for \\\\(\")) complain = 0;",
"# if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;",
"initial_spaces",
"=",
"0",
"cleansed_line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"while",
"initial_spaces",
"<",
"len",
"(",
"line",
")",
"and",
"line",
"[",
"initial_spaces",
"]",
"==",
"' '",
":",
"initial_spaces",
"+=",
"1",
"if",
"line",
"and",
"line",
"[",
"-",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/end_of_line'",
",",
"4",
",",
"'Line ends in whitespace. Consider deleting these extra spaces.'",
")",
"# There are certain situations we allow one space, notably for section labels",
"elif",
"(",
"(",
"initial_spaces",
"==",
"1",
"or",
"initial_spaces",
"==",
"3",
")",
"and",
"not",
"Match",
"(",
"r'\\s*\\w+\\s*:\\s*$'",
",",
"cleansed_line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/indent'",
",",
"3",
",",
"'Weird number of spaces at line-start. '",
"'Are you using a 2-space indent?'",
")",
"# Check if the line is a header guard.",
"is_header_guard",
"=",
"False",
"if",
"file_extension",
"==",
"'h'",
":",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"if",
"(",
"line",
".",
"startswith",
"(",
"'#ifndef %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#define %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#endif // %s'",
"%",
"cppvar",
")",
")",
":",
"is_header_guard",
"=",
"True",
"# #include lines and header guards can be long, since there's no clean way to",
"# split them.",
"#",
"# URLs can be long too. It's possible to split these, but it makes them",
"# harder to cut&paste.",
"#",
"# The \"$Id:...$\" comment may also get very long without it being the",
"# developers fault.",
"if",
"(",
"not",
"line",
".",
"startswith",
"(",
"'#include'",
")",
"and",
"not",
"is_header_guard",
"and",
"not",
"Match",
"(",
"r'^\\s*//.*http(s?)://\\S*$'",
",",
"line",
")",
"and",
"not",
"Match",
"(",
"r'^// \\$Id:.*#[0-9]+ \\$$'",
",",
"line",
")",
")",
":",
"line_width",
"=",
"GetLineWidth",
"(",
"line",
")",
"extended_length",
"=",
"int",
"(",
"(",
"_line_length",
"*",
"1.25",
")",
")",
"if",
"line_width",
">",
"extended_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"4",
",",
"'Lines should very rarely be longer than %i characters'",
"%",
"extended_length",
")",
"elif",
"line_width",
">",
"_line_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"2",
",",
"'Lines should be <= %i characters long'",
"%",
"_line_length",
")",
"if",
"(",
"cleansed_line",
".",
"count",
"(",
"';'",
")",
">",
"1",
"and",
"# for loops are allowed two ;'s (and may run over two lines).",
"cleansed_line",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"and",
"(",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"or",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"';'",
")",
"!=",
"-",
"1",
")",
"and",
"# It's ok to have many commands in a switch case that fits in 1 line",
"not",
"(",
"(",
"cleansed_line",
".",
"find",
"(",
"'case '",
")",
"!=",
"-",
"1",
"or",
"cleansed_line",
".",
"find",
"(",
"'default:'",
")",
"!=",
"-",
"1",
")",
"and",
"cleansed_line",
".",
"find",
"(",
"'break;'",
")",
"!=",
"-",
"1",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"0",
",",
"'More than one command on the same line'",
")",
"# Some more style checks",
"CheckBraces",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckEmptyBlockBody",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAccess",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAltTokens",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"classinfo",
":",
"CheckSectionSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"classinfo",
",",
"linenum",
",",
"error",
")"
] |
https://github.com/alexgkendall/caffe-segnet/blob/344c113bf1832886f1cbe9f33ffe28a3beeaf412/scripts/cpp_lint.py#L3459-L3563
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/stc.py
|
python
|
StyledTextCtrl.UpperCase
|
(*args, **kwargs)
|
return _stc.StyledTextCtrl_UpperCase(*args, **kwargs)
|
UpperCase(self)
Transform the selection to upper case.
|
UpperCase(self)
|
[
"UpperCase",
"(",
"self",
")"
] |
def UpperCase(*args, **kwargs):
"""
UpperCase(self)
Transform the selection to upper case.
"""
return _stc.StyledTextCtrl_UpperCase(*args, **kwargs)
|
[
"def",
"UpperCase",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_UpperCase",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/stc.py#L4676-L4682
|
|
tpfister/caffe-heatmap
|
4db69ef53e6b8a0b3b4ebb29328b0ab3dbf67c4e
|
scripts/cpp_lint.py
|
python
|
_CppLintState.SetVerboseLevel
|
(self, level)
|
return last_verbose_level
|
Sets the module's verbosity, and returns the previous setting.
|
Sets the module's verbosity, and returns the previous setting.
|
[
"Sets",
"the",
"module",
"s",
"verbosity",
"and",
"returns",
"the",
"previous",
"setting",
"."
] |
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
|
[
"def",
"SetVerboseLevel",
"(",
"self",
",",
"level",
")",
":",
"last_verbose_level",
"=",
"self",
".",
"verbose_level",
"self",
".",
"verbose_level",
"=",
"level",
"return",
"last_verbose_level"
] |
https://github.com/tpfister/caffe-heatmap/blob/4db69ef53e6b8a0b3b4ebb29328b0ab3dbf67c4e/scripts/cpp_lint.py#L707-L711
|
|
emscripten-core/emscripten
|
0d413d3c5af8b28349682496edc14656f5700c2f
|
third_party/ply/example/ansic/cparse.py
|
python
|
p_relational_expression_5
|
(t)
|
relational_expression : relational_expression GE shift_expression
|
relational_expression : relational_expression GE shift_expression
|
[
"relational_expression",
":",
"relational_expression",
"GE",
"shift_expression"
] |
def p_relational_expression_5(t):
'relational_expression : relational_expression GE shift_expression'
pass
|
[
"def",
"p_relational_expression_5",
"(",
"t",
")",
":",
"pass"
] |
https://github.com/emscripten-core/emscripten/blob/0d413d3c5af8b28349682496edc14656f5700c2f/third_party/ply/example/ansic/cparse.py#L697-L699
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/propgrid.py
|
python
|
PropertyGridPage.FitColumns
|
(*args, **kwargs)
|
return _propgrid.PropertyGridPage_FitColumns(*args, **kwargs)
|
FitColumns(self) -> Size
|
FitColumns(self) -> Size
|
[
"FitColumns",
"(",
"self",
")",
"-",
">",
"Size"
] |
def FitColumns(*args, **kwargs):
"""FitColumns(self) -> Size"""
return _propgrid.PropertyGridPage_FitColumns(*args, **kwargs)
|
[
"def",
"FitColumns",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGridPage_FitColumns",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/propgrid.py#L3355-L3357
|
|
OpenLightingProject/ola
|
d1433a1bed73276fbe55ce18c03b1c208237decc
|
tools/rdm/ModelCollector.py
|
python
|
ModelCollector._CheckForAckOrNack
|
(self, response)
|
return True
|
Check for all the different error conditions.
Returns:
True if this response was an ACK or NACK, False for all other cases.
|
Check for all the different error conditions.
|
[
"Check",
"for",
"all",
"the",
"different",
"error",
"conditions",
"."
] |
def _CheckForAckOrNack(self, response):
"""Check for all the different error conditions.
Returns:
True if this response was an ACK or NACK, False for all other cases.
"""
if not response.status.Succeeded():
print('Status: %s' % response.status.message)
self.wrapper.Stop()
return False
if response.response_code != OlaClient.RDM_COMPLETED_OK:
print('Got RDM failure: %s' % response.ResponseCodeAsString())
self.wrapper.Stop()
return False
if response.response_type == OlaClient.RDM_ACK_TIMER:
# schedule the fetch
logging.debug('Got ack timer for %d ms' % response.ack_timer)
self.wrapper.AddEvent(response.ack_timer, self._FetchQueuedMessages)
return False
return True
|
[
"def",
"_CheckForAckOrNack",
"(",
"self",
",",
"response",
")",
":",
"if",
"not",
"response",
".",
"status",
".",
"Succeeded",
"(",
")",
":",
"print",
"(",
"'Status: %s'",
"%",
"response",
".",
"status",
".",
"message",
")",
"self",
".",
"wrapper",
".",
"Stop",
"(",
")",
"return",
"False",
"if",
"response",
".",
"response_code",
"!=",
"OlaClient",
".",
"RDM_COMPLETED_OK",
":",
"print",
"(",
"'Got RDM failure: %s'",
"%",
"response",
".",
"ResponseCodeAsString",
"(",
")",
")",
"self",
".",
"wrapper",
".",
"Stop",
"(",
")",
"return",
"False",
"if",
"response",
".",
"response_type",
"==",
"OlaClient",
".",
"RDM_ACK_TIMER",
":",
"# schedule the fetch",
"logging",
".",
"debug",
"(",
"'Got ack timer for %d ms'",
"%",
"response",
".",
"ack_timer",
")",
"self",
".",
"wrapper",
".",
"AddEvent",
"(",
"response",
".",
"ack_timer",
",",
"self",
".",
"_FetchQueuedMessages",
")",
"return",
"False",
"return",
"True"
] |
https://github.com/OpenLightingProject/ola/blob/d1433a1bed73276fbe55ce18c03b1c208237decc/tools/rdm/ModelCollector.py#L665-L686
|
|
triton-inference-server/server
|
11a11d9cb1e9734ed9fd305e752da70f07d1992f
|
qa/common/check_valgrind_log.py
|
python
|
check_valgrind_log
|
(log_file)
|
return filtered_leak_records
|
Counts the definite leaks reported
by valgrind, matches them against
the whitelist.
Parameters
----------
log_file: str
The path to the log file
Returns
-------
list of str
a list of the leak records as strings
|
Counts the definite leaks reported
by valgrind, matches them against
the whitelist.
|
[
"Counts",
"the",
"definite",
"leaks",
"reported",
"by",
"valgrind",
"matches",
"them",
"against",
"the",
"whitelist",
"."
] |
def check_valgrind_log(log_file):
"""
Counts the definite leaks reported
by valgrind, matches them against
the whitelist.
Parameters
----------
log_file: str
The path to the log file
Returns
-------
list of str
a list of the leak records as strings
"""
with open(args.input_log_file, 'r') as f:
logs = f.read()
# Find the pid and start and end of definite leak reports
pid_token_end = logs.find('==', logs.find('==') + 1) + 2
pid_token = logs[:pid_token_end]
leaks_start = logs.find('are definitely lost')
first_leak_line = logs.rfind('\n', 0, leaks_start)
if leaks_start == -1 or first_leak_line == -1:
# No leaks in log
return []
end_of_leaks = logs.find(f"{pid_token} LEAK SUMMARY:")
if end_of_leaks == -1:
print(
f"\n***\n*** Test Failed for {log_file}: Malformed Valgrind log.\n***"
)
sys.exit(1)
leak_records_section = logs[first_leak_line + 1:end_of_leaks]
# Each leak record is separated by a line containing '==<pid>== \n'
record_separator = f"{pid_token} \n"
leak_records = leak_records_section.split(record_separator)
# Check each leak against whitelist
filtered_leak_records = []
for leak in leak_records:
for token in LEAK_WHITE_LIST:
if not leak or leak.find(token) != -1:
break
else:
filtered_leak_records.append(leak)
return filtered_leak_records
|
[
"def",
"check_valgrind_log",
"(",
"log_file",
")",
":",
"with",
"open",
"(",
"args",
".",
"input_log_file",
",",
"'r'",
")",
"as",
"f",
":",
"logs",
"=",
"f",
".",
"read",
"(",
")",
"# Find the pid and start and end of definite leak reports",
"pid_token_end",
"=",
"logs",
".",
"find",
"(",
"'=='",
",",
"logs",
".",
"find",
"(",
"'=='",
")",
"+",
"1",
")",
"+",
"2",
"pid_token",
"=",
"logs",
"[",
":",
"pid_token_end",
"]",
"leaks_start",
"=",
"logs",
".",
"find",
"(",
"'are definitely lost'",
")",
"first_leak_line",
"=",
"logs",
".",
"rfind",
"(",
"'\\n'",
",",
"0",
",",
"leaks_start",
")",
"if",
"leaks_start",
"==",
"-",
"1",
"or",
"first_leak_line",
"==",
"-",
"1",
":",
"# No leaks in log",
"return",
"[",
"]",
"end_of_leaks",
"=",
"logs",
".",
"find",
"(",
"f\"{pid_token} LEAK SUMMARY:\"",
")",
"if",
"end_of_leaks",
"==",
"-",
"1",
":",
"print",
"(",
"f\"\\n***\\n*** Test Failed for {log_file}: Malformed Valgrind log.\\n***\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"leak_records_section",
"=",
"logs",
"[",
"first_leak_line",
"+",
"1",
":",
"end_of_leaks",
"]",
"# Each leak record is separated by a line containing '==<pid>== \\n'",
"record_separator",
"=",
"f\"{pid_token} \\n\"",
"leak_records",
"=",
"leak_records_section",
".",
"split",
"(",
"record_separator",
")",
"# Check each leak against whitelist",
"filtered_leak_records",
"=",
"[",
"]",
"for",
"leak",
"in",
"leak_records",
":",
"for",
"token",
"in",
"LEAK_WHITE_LIST",
":",
"if",
"not",
"leak",
"or",
"leak",
".",
"find",
"(",
"token",
")",
"!=",
"-",
"1",
":",
"break",
"else",
":",
"filtered_leak_records",
".",
"append",
"(",
"leak",
")",
"return",
"filtered_leak_records"
] |
https://github.com/triton-inference-server/server/blob/11a11d9cb1e9734ed9fd305e752da70f07d1992f/qa/common/check_valgrind_log.py#L45-L94
|
|
microsoft/onnxruntime
|
f92e47e95b13a240e37caf7b36577983544f98fc
|
onnxruntime/python/tools/transformers/shape_optimizer.py
|
python
|
BertOnnxModelShapeOptimizer.add_shape_initializer
|
(self, shape)
|
return tensor
|
Add an initializer for constant shape.
|
Add an initializer for constant shape.
|
[
"Add",
"an",
"initializer",
"for",
"constant",
"shape",
"."
] |
def add_shape_initializer(self, shape):
"""
Add an initializer for constant shape.
"""
shape_value = np.asarray(shape, dtype=np.int64)
constant_shape_name = self.create_node_name('Constant', CONSTANT_SHAPE_NAME_PREFIX)
tensor = onnx.helper.make_tensor(name=constant_shape_name,
data_type=TensorProto.INT64,
dims=shape_value.shape,
vals=shape_value)
self.add_initializer(tensor)
return tensor
|
[
"def",
"add_shape_initializer",
"(",
"self",
",",
"shape",
")",
":",
"shape_value",
"=",
"np",
".",
"asarray",
"(",
"shape",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"constant_shape_name",
"=",
"self",
".",
"create_node_name",
"(",
"'Constant'",
",",
"CONSTANT_SHAPE_NAME_PREFIX",
")",
"tensor",
"=",
"onnx",
".",
"helper",
".",
"make_tensor",
"(",
"name",
"=",
"constant_shape_name",
",",
"data_type",
"=",
"TensorProto",
".",
"INT64",
",",
"dims",
"=",
"shape_value",
".",
"shape",
",",
"vals",
"=",
"shape_value",
")",
"self",
".",
"add_initializer",
"(",
"tensor",
")",
"return",
"tensor"
] |
https://github.com/microsoft/onnxruntime/blob/f92e47e95b13a240e37caf7b36577983544f98fc/onnxruntime/python/tools/transformers/shape_optimizer.py#L40-L51
|
|
OpenGenus/quark
|
225ad96efdfcc66cb6584a756c17eb3871e6eb62
|
code/code/cellular_automaton/src/genetic_algorithm/genetic_algorithm.py
|
python
|
weighted_choice
|
(items)
|
return item
|
Chooses a random element from items, where items is a list of tuples in
the form (item, weight). weight determines the probability of choosing its
respective item. Note: this function is borrowed from ActiveState Recipes.
|
Chooses a random element from items, where items is a list of tuples in
the form (item, weight). weight determines the probability of choosing its
respective item. Note: this function is borrowed from ActiveState Recipes.
|
[
"Chooses",
"a",
"random",
"element",
"from",
"items",
"where",
"items",
"is",
"a",
"list",
"of",
"tuples",
"in",
"the",
"form",
"(",
"item",
"weight",
")",
".",
"weight",
"determines",
"the",
"probability",
"of",
"choosing",
"its",
"respective",
"item",
".",
"Note",
":",
"this",
"function",
"is",
"borrowed",
"from",
"ActiveState",
"Recipes",
"."
] |
def weighted_choice(items):
"""
Chooses a random element from items, where items is a list of tuples in
the form (item, weight). weight determines the probability of choosing its
respective item. Note: this function is borrowed from ActiveState Recipes.
"""
weight_total = sum((item[1] for item in items))
n = random.uniform(0, weight_total)
for item, weight in items:
if n < weight:
return item
n = n - weight
return item
|
[
"def",
"weighted_choice",
"(",
"items",
")",
":",
"weight_total",
"=",
"sum",
"(",
"(",
"item",
"[",
"1",
"]",
"for",
"item",
"in",
"items",
")",
")",
"n",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"weight_total",
")",
"for",
"item",
",",
"weight",
"in",
"items",
":",
"if",
"n",
"<",
"weight",
":",
"return",
"item",
"n",
"=",
"n",
"-",
"weight",
"return",
"item"
] |
https://github.com/OpenGenus/quark/blob/225ad96efdfcc66cb6584a756c17eb3871e6eb62/code/code/cellular_automaton/src/genetic_algorithm/genetic_algorithm.py#L18-L30
|
|
lmnt-com/haste
|
5f704f6132c4aacf2310120b7a1c8d0eea441ab9
|
frameworks/pytorch/layer_norm_gru.py
|
python
|
LayerNormGRU.reset_parameters
|
(self)
|
Resets this layer's parameters to their initial values.
|
Resets this layer's parameters to their initial values.
|
[
"Resets",
"this",
"layer",
"s",
"parameters",
"to",
"their",
"initial",
"values",
"."
] |
def reset_parameters(self):
"""Resets this layer's parameters to their initial values."""
hidden_size = self.hidden_size
for i in range(3):
nn.init.xavier_uniform_(self.kernel[:, i*hidden_size:(i+1)*hidden_size])
nn.init.orthogonal_(self.recurrent_kernel[:, i*hidden_size:(i+1)*hidden_size])
nn.init.zeros_(self.bias)
nn.init.zeros_(self.recurrent_bias)
nn.init.ones_(self.gamma)
|
[
"def",
"reset_parameters",
"(",
"self",
")",
":",
"hidden_size",
"=",
"self",
".",
"hidden_size",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"nn",
".",
"init",
".",
"xavier_uniform_",
"(",
"self",
".",
"kernel",
"[",
":",
",",
"i",
"*",
"hidden_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"hidden_size",
"]",
")",
"nn",
".",
"init",
".",
"orthogonal_",
"(",
"self",
".",
"recurrent_kernel",
"[",
":",
",",
"i",
"*",
"hidden_size",
":",
"(",
"i",
"+",
"1",
")",
"*",
"hidden_size",
"]",
")",
"nn",
".",
"init",
".",
"zeros_",
"(",
"self",
".",
"bias",
")",
"nn",
".",
"init",
".",
"zeros_",
"(",
"self",
".",
"recurrent_bias",
")",
"nn",
".",
"init",
".",
"ones_",
"(",
"self",
".",
"gamma",
")"
] |
https://github.com/lmnt-com/haste/blob/5f704f6132c4aacf2310120b7a1c8d0eea441ab9/frameworks/pytorch/layer_norm_gru.py#L164-L172
|
||
PlatformLab/RAMCloud
|
b1866af19124325a6dfd8cbc267e2e3ef1f965d1
|
bindings/python/retries.py
|
python
|
ExponentialBackoff.__init__
|
(self, start=0.1, scale=2.0, limit=30.0)
|
@type start: C{float}
@param start: the time to sleep after the first iteration
@type scale: C{float}
@param scale: the factor to scale the time by
@type limit: C{float}
@param limit: the maximum time to sleep for between iterations
|
@type start: C{float}
@param start: the time to sleep after the first iteration
|
[
"@type",
"start",
":",
"C",
"{",
"float",
"}",
"@param",
"start",
":",
"the",
"time",
"to",
"sleep",
"after",
"the",
"first",
"iteration"
] |
def __init__(self, start=0.1, scale=2.0, limit=30.0):
"""
@type start: C{float}
@param start: the time to sleep after the first iteration
@type scale: C{float}
@param scale: the factor to scale the time by
@type limit: C{float}
@param limit: the maximum time to sleep for between iterations
"""
def wait_time_gen():
time = start
while time < limit:
yield time
time *= scale
yield limit
BackoffRetry.__init__(self, wait_time_gen())
|
[
"def",
"__init__",
"(",
"self",
",",
"start",
"=",
"0.1",
",",
"scale",
"=",
"2.0",
",",
"limit",
"=",
"30.0",
")",
":",
"def",
"wait_time_gen",
"(",
")",
":",
"time",
"=",
"start",
"while",
"time",
"<",
"limit",
":",
"yield",
"time",
"time",
"*=",
"scale",
"yield",
"limit",
"BackoffRetry",
".",
"__init__",
"(",
"self",
",",
"wait_time_gen",
"(",
")",
")"
] |
https://github.com/PlatformLab/RAMCloud/blob/b1866af19124325a6dfd8cbc267e2e3ef1f965d1/bindings/python/retries.py#L168-L186
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_misc.py
|
python
|
DateTime.GetMillisecond
|
(*args, **kwargs)
|
return _misc_.DateTime_GetMillisecond(*args, **kwargs)
|
GetMillisecond(self, wxDateTime::TimeZone tz=LOCAL_TZ) -> int
|
GetMillisecond(self, wxDateTime::TimeZone tz=LOCAL_TZ) -> int
|
[
"GetMillisecond",
"(",
"self",
"wxDateTime",
"::",
"TimeZone",
"tz",
"=",
"LOCAL_TZ",
")",
"-",
">",
"int"
] |
def GetMillisecond(*args, **kwargs):
"""GetMillisecond(self, wxDateTime::TimeZone tz=LOCAL_TZ) -> int"""
return _misc_.DateTime_GetMillisecond(*args, **kwargs)
|
[
"def",
"GetMillisecond",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime_GetMillisecond",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L4005-L4007
|
|
rizonesoft/Notepad3
|
33cbe20f7ce563541d2a6ceaf22cabeffc826542
|
src/uchardet/uchardet/script/charsets/db.py
|
python
|
load
|
(charset_names)
|
return charsets
|
Load a list of charsets.
This function will return a dictionary of charsets from our
charset database.
:param charset_names: a list of supported charset names.
:return: a dictionary with all the loaded charsets.
:rtype: dict
|
Load a list of charsets.
|
[
"Load",
"a",
"list",
"of",
"charsets",
"."
] |
def load(charset_names):
'''
Load a list of charsets.
This function will return a dictionary of charsets from our
charset database.
:param charset_names: a list of supported charset names.
:return: a dictionary with all the loaded charsets.
:rtype: dict
'''
charsets = {}
# Temporarily change the search path for modules.
sys_path_backup = sys.path
current_dir = os.path.dirname(os.path.realpath(__file__))
sys.path = [current_dir + '/../charsets']
for name in charset_names:
try:
charset = importlib.import_module(name.lower())
except ImportError:
print('Unknown charset "{}": '
'file "charsets/{}.py" does not exist.'.format(name, name.lower()))
exit(1)
charsets[charset.name] = charset
# Set back the default module paths.
sys.path = sys_path_backup
return charsets
|
[
"def",
"load",
"(",
"charset_names",
")",
":",
"charsets",
"=",
"{",
"}",
"# Temporarily change the search path for modules.",
"sys_path_backup",
"=",
"sys",
".",
"path",
"current_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"sys",
".",
"path",
"=",
"[",
"current_dir",
"+",
"'/../charsets'",
"]",
"for",
"name",
"in",
"charset_names",
":",
"try",
":",
"charset",
"=",
"importlib",
".",
"import_module",
"(",
"name",
".",
"lower",
"(",
")",
")",
"except",
"ImportError",
":",
"print",
"(",
"'Unknown charset \"{}\": '",
"'file \"charsets/{}.py\" does not exist.'",
".",
"format",
"(",
"name",
",",
"name",
".",
"lower",
"(",
")",
")",
")",
"exit",
"(",
"1",
")",
"charsets",
"[",
"charset",
".",
"name",
"]",
"=",
"charset",
"# Set back the default module paths.",
"sys",
".",
"path",
"=",
"sys_path_backup",
"return",
"charsets"
] |
https://github.com/rizonesoft/Notepad3/blob/33cbe20f7ce563541d2a6ceaf22cabeffc826542/src/uchardet/uchardet/script/charsets/db.py#L45-L73
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/autograph/impl/api.py
|
python
|
do_not_convert_internal
|
(f)
|
return f
|
Decorator that marks internal functions which do not need conversion.
|
Decorator that marks internal functions which do not need conversion.
|
[
"Decorator",
"that",
"marks",
"internal",
"functions",
"which",
"do",
"not",
"need",
"conversion",
"."
] |
def do_not_convert_internal(f):
"""Decorator that marks internal functions which do not need conversion."""
setattr(f, '__ag_compiled', True)
return f
|
[
"def",
"do_not_convert_internal",
"(",
"f",
")",
":",
"setattr",
"(",
"f",
",",
"'__ag_compiled'",
",",
"True",
")",
"return",
"f"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/autograph/impl/api.py#L267-L270
|
|
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/corrections_tab_widget/background_corrections_presenter.py
|
python
|
BackgroundCorrectionsPresenter._selected_runs_and_groups
|
(self)
|
return self.model.all_runs_and_groups() if apply_to_all else self.view.selected_run_and_group()
|
Returns the runs and groups to apply the parameter changes to.
|
Returns the runs and groups to apply the parameter changes to.
|
[
"Returns",
"the",
"runs",
"and",
"groups",
"to",
"apply",
"the",
"parameter",
"changes",
"to",
"."
] |
def _selected_runs_and_groups(self) -> tuple:
"""Returns the runs and groups to apply the parameter changes to."""
apply_to_all = self.view.apply_table_changes_to_all()
return self.model.all_runs_and_groups() if apply_to_all else self.view.selected_run_and_group()
|
[
"def",
"_selected_runs_and_groups",
"(",
"self",
")",
"->",
"tuple",
":",
"apply_to_all",
"=",
"self",
".",
"view",
".",
"apply_table_changes_to_all",
"(",
")",
"return",
"self",
".",
"model",
".",
"all_runs_and_groups",
"(",
")",
"if",
"apply_to_all",
"else",
"self",
".",
"view",
".",
"selected_run_and_group",
"(",
")"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/corrections_tab_widget/background_corrections_presenter.py#L208-L211
|
|
bigartm/bigartm
|
47e37f982de87aa67bfd475ff1f39da696b181b3
|
3rdparty/protobuf-3.0.0/python/google/protobuf/text_format.py
|
python
|
_SkipFieldContents
|
(tokenizer)
|
Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
|
Skips over contents (value or message) of a field.
|
[
"Skips",
"over",
"contents",
"(",
"value",
"or",
"message",
")",
"of",
"a",
"field",
"."
] |
def _SkipFieldContents(tokenizer):
"""Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
# Try to guess the type of this field.
# If this field is not a message, there should be a ":" between the
# field name and the field value and also the field value should not
# start with "{" or "<" which indicates the beginning of a message body.
# If there is no ":" or there is a "{" or "<" after ":", this field has
# to be a message or the input is ill-formed.
if tokenizer.TryConsume(':') and not tokenizer.LookingAt(
'{') and not tokenizer.LookingAt('<'):
_SkipFieldValue(tokenizer)
else:
_SkipFieldMessage(tokenizer)
|
[
"def",
"_SkipFieldContents",
"(",
"tokenizer",
")",
":",
"# Try to guess the type of this field.",
"# If this field is not a message, there should be a \":\" between the",
"# field name and the field value and also the field value should not",
"# start with \"{\" or \"<\" which indicates the beginning of a message body.",
"# If there is no \":\" or there is a \"{\" or \"<\" after \":\", this field has",
"# to be a message or the input is ill-formed.",
"if",
"tokenizer",
".",
"TryConsume",
"(",
"':'",
")",
"and",
"not",
"tokenizer",
".",
"LookingAt",
"(",
"'{'",
")",
"and",
"not",
"tokenizer",
".",
"LookingAt",
"(",
"'<'",
")",
":",
"_SkipFieldValue",
"(",
"tokenizer",
")",
"else",
":",
"_SkipFieldMessage",
"(",
"tokenizer",
")"
] |
https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/3rdparty/protobuf-3.0.0/python/google/protobuf/text_format.py#L841-L857
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/ticker.py
|
python
|
Ticker.IsTicking
|
(self)
|
return self.timer.IsRunning()
|
Is the ticker ticking? ie, is the text moving?
|
Is the ticker ticking? ie, is the text moving?
|
[
"Is",
"the",
"ticker",
"ticking?",
"ie",
"is",
"the",
"text",
"moving?"
] |
def IsTicking(self):
"""Is the ticker ticking? ie, is the text moving?"""
return self.timer.IsRunning()
|
[
"def",
"IsTicking",
"(",
"self",
")",
":",
"return",
"self",
".",
"timer",
".",
"IsRunning",
"(",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/ticker.py#L74-L76
|
|
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSILLAutoProcess.py
|
python
|
needs_processing
|
(property_value, process_reduction_type)
|
return [do_process, ws_name]
|
Checks whether a given unary reduction needs processing or is already cached
in ADS with expected name.
@param property_value: the string value of the corresponding MultipleFile
input property
@param process_reduction_type: the reduction_type of process
|
Checks whether a given unary reduction needs processing or is already cached
in ADS with expected name.
|
[
"Checks",
"whether",
"a",
"given",
"unary",
"reduction",
"needs",
"processing",
"or",
"is",
"already",
"cached",
"in",
"ADS",
"with",
"expected",
"name",
"."
] |
def needs_processing(property_value, process_reduction_type):
"""
Checks whether a given unary reduction needs processing or is already cached
in ADS with expected name.
@param property_value: the string value of the corresponding MultipleFile
input property
@param process_reduction_type: the reduction_type of process
"""
do_process = False
ws_name = ''
if property_value:
run_number = get_run_number(property_value)
ws_name = run_number + '_' + process_reduction_type
if mtd.doesExist(ws_name):
if isinstance(mtd[ws_name], WorkspaceGroup):
run = mtd[ws_name][0].getRun()
else:
run = mtd[ws_name].getRun()
if run.hasProperty('ProcessedAs'):
process = run.getLogData('ProcessedAs').value
if process == process_reduction_type:
logger.notice('Reusing {0} workspace: {1}'
.format(process_reduction_type, ws_name))
else:
logger.warning('{0} workspace found, but processed '
'differently: {1}'
.format(process_reduction_type, ws_name))
do_process = True
else:
logger.warning('{0} workspace found, but missing the '
'ProcessedAs flag: {1}'
.format(process_reduction_type, ws_name))
do_process = True
else:
do_process = True
return [do_process, ws_name]
|
[
"def",
"needs_processing",
"(",
"property_value",
",",
"process_reduction_type",
")",
":",
"do_process",
"=",
"False",
"ws_name",
"=",
"''",
"if",
"property_value",
":",
"run_number",
"=",
"get_run_number",
"(",
"property_value",
")",
"ws_name",
"=",
"run_number",
"+",
"'_'",
"+",
"process_reduction_type",
"if",
"mtd",
".",
"doesExist",
"(",
"ws_name",
")",
":",
"if",
"isinstance",
"(",
"mtd",
"[",
"ws_name",
"]",
",",
"WorkspaceGroup",
")",
":",
"run",
"=",
"mtd",
"[",
"ws_name",
"]",
"[",
"0",
"]",
".",
"getRun",
"(",
")",
"else",
":",
"run",
"=",
"mtd",
"[",
"ws_name",
"]",
".",
"getRun",
"(",
")",
"if",
"run",
".",
"hasProperty",
"(",
"'ProcessedAs'",
")",
":",
"process",
"=",
"run",
".",
"getLogData",
"(",
"'ProcessedAs'",
")",
".",
"value",
"if",
"process",
"==",
"process_reduction_type",
":",
"logger",
".",
"notice",
"(",
"'Reusing {0} workspace: {1}'",
".",
"format",
"(",
"process_reduction_type",
",",
"ws_name",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"'{0} workspace found, but processed '",
"'differently: {1}'",
".",
"format",
"(",
"process_reduction_type",
",",
"ws_name",
")",
")",
"do_process",
"=",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"'{0} workspace found, but missing the '",
"'ProcessedAs flag: {1}'",
".",
"format",
"(",
"process_reduction_type",
",",
"ws_name",
")",
")",
"do_process",
"=",
"True",
"else",
":",
"do_process",
"=",
"True",
"return",
"[",
"do_process",
",",
"ws_name",
"]"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSILLAutoProcess.py#L25-L60
|
|
miyosuda/TensorFlowAndroidDemo
|
35903e0221aa5f109ea2dbef27f20b52e317f42d
|
jni-build/jni/include/tensorflow/python/ops/math_ops.py
|
python
|
truediv
|
(x, y, name=None)
|
Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
|
Divides x / y elementwise, always producing floating point results.
|
[
"Divides",
"x",
"/",
"y",
"elementwise",
"always",
"producing",
"floating",
"point",
"results",
"."
] |
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
|
[
"def",
"truediv",
"(",
"x",
",",
"y",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"x",
",",
"y",
"]",
",",
"name",
",",
"\"truediv\"",
")",
"as",
"name",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"x\"",
")",
"y",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"y",
",",
"name",
"=",
"\"y\"",
")",
"x_dtype",
"=",
"x",
".",
"dtype",
".",
"base_dtype",
"y_dtype",
"=",
"y",
".",
"dtype",
".",
"base_dtype",
"if",
"x_dtype",
"!=",
"y_dtype",
":",
"raise",
"TypeError",
"(",
"\"x and y must have the same dtype, got %r != %r\"",
"%",
"(",
"x_dtype",
",",
"y_dtype",
")",
")",
"try",
":",
"dtype",
"=",
"_TRUEDIV_TABLE",
"[",
"x_dtype",
"]",
"except",
"KeyError",
":",
"raise",
"TypeError",
"(",
"\"Invalid dtype %r in __truediv__\"",
"%",
"x_dtype",
")",
"if",
"dtype",
"is",
"not",
"None",
":",
"x",
"=",
"cast",
"(",
"x",
",",
"dtype",
")",
"y",
"=",
"cast",
"(",
"y",
",",
"dtype",
")",
"return",
"gen_math_ops",
".",
"div",
"(",
"x",
",",
"y",
",",
"name",
"=",
"name",
")"
] |
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/math_ops.py#L825-L865
|
||
RobotLocomotion/drake
|
0e18a34604c45ed65bc9018a54f7610f91cdad5b
|
common/proto/call_python_client.py
|
python
|
CallPythonClient.execute_messages
|
(self, msgs)
|
Executes a set of recorded messages.
|
Executes a set of recorded messages.
|
[
"Executes",
"a",
"set",
"of",
"recorded",
"messages",
"."
] |
def execute_messages(self, msgs):
"""Executes a set of recorded messages."""
for msg in msgs:
self._execute_message(msg)
|
[
"def",
"execute_messages",
"(",
"self",
",",
"msgs",
")",
":",
"for",
"msg",
"in",
"msgs",
":",
"self",
".",
"_execute_message",
"(",
"msg",
")"
] |
https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/common/proto/call_python_client.py#L510-L513
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/extern/aui/auibar.py
|
python
|
AuiToolBar.FindTool
|
(self, tool_id)
|
return None
|
Finds a tool for the given tool id.
:param integer `tool_id`: the :class:`AuiToolBarItem` identifier.
|
Finds a tool for the given tool id.
|
[
"Finds",
"a",
"tool",
"for",
"the",
"given",
"tool",
"id",
"."
] |
def FindTool(self, tool_id):
"""
Finds a tool for the given tool id.
:param integer `tool_id`: the :class:`AuiToolBarItem` identifier.
"""
for item in self._items:
if item.id == tool_id:
return item
return None
|
[
"def",
"FindTool",
"(",
"self",
",",
"tool_id",
")",
":",
"for",
"item",
"in",
"self",
".",
"_items",
":",
"if",
"item",
".",
"id",
"==",
"tool_id",
":",
"return",
"item",
"return",
"None"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/aui/auibar.py#L2078-L2089
|
|
trilinos/Trilinos
|
6168be6dd51e35e1cd681e9c4b24433e709df140
|
packages/seacas/scripts/exodus3.in.py
|
python
|
exodus.title
|
(self)
|
return self.Title.value.decode('utf8')
|
get the database title
>>> title = exo.title()
Returns
-------
title : string
|
get the database title
|
[
"get",
"the",
"database",
"title"
] |
def title(self):
"""
get the database title
>>> title = exo.title()
Returns
-------
title : string
"""
return self.Title.value.decode('utf8')
|
[
"def",
"title",
"(",
"self",
")",
":",
"return",
"self",
".",
"Title",
".",
"value",
".",
"decode",
"(",
"'utf8'",
")"
] |
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/scripts/exodus3.in.py#L853-L863
|
|
kamyu104/LeetCode-Solutions
|
77605708a927ea3b85aee5a479db733938c7c211
|
Python/tuple-with-same-product.py
|
python
|
Solution.tupleSameProduct
|
(self, nums)
|
return 8*result
|
:type nums: List[int]
:rtype: int
|
:type nums: List[int]
:rtype: int
|
[
":",
"type",
"nums",
":",
"List",
"[",
"int",
"]",
":",
"rtype",
":",
"int"
] |
def tupleSameProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
count = collections.Counter()
for i in xrange(len(nums)):
for j in xrange(i+1, len(nums)):
result += count[nums[i]*nums[j]]
count[nums[i]*nums[j]] += 1
return 8*result
|
[
"def",
"tupleSameProduct",
"(",
"self",
",",
"nums",
")",
":",
"result",
"=",
"0",
"count",
"=",
"collections",
".",
"Counter",
"(",
")",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"nums",
")",
")",
":",
"for",
"j",
"in",
"xrange",
"(",
"i",
"+",
"1",
",",
"len",
"(",
"nums",
")",
")",
":",
"result",
"+=",
"count",
"[",
"nums",
"[",
"i",
"]",
"*",
"nums",
"[",
"j",
"]",
"]",
"count",
"[",
"nums",
"[",
"i",
"]",
"*",
"nums",
"[",
"j",
"]",
"]",
"+=",
"1",
"return",
"8",
"*",
"result"
] |
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/tuple-with-same-product.py#L8-L19
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/base.py
|
python
|
IndexOpsMixin.hasnans
|
(self)
|
return bool(isna(self).any())
|
Return if I have any nans; enables various perf speedups.
|
Return if I have any nans; enables various perf speedups.
|
[
"Return",
"if",
"I",
"have",
"any",
"nans",
";",
"enables",
"various",
"perf",
"speedups",
"."
] |
def hasnans(self):
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
|
[
"def",
"hasnans",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"isna",
"(",
"self",
")",
".",
"any",
"(",
")",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/base.py#L1047-L1051
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_misc.py
|
python
|
GetSingleChoiceIndex
|
(*args, **kwargs)
|
return _misc_.GetSingleChoiceIndex(*args, **kwargs)
|
GetSingleChoiceIndex(String message, String caption, int choices, Window parent=None,
int x=-1, int y=-1, bool centre=True,
int width=150, int height=200) -> int
|
GetSingleChoiceIndex(String message, String caption, int choices, Window parent=None,
int x=-1, int y=-1, bool centre=True,
int width=150, int height=200) -> int
|
[
"GetSingleChoiceIndex",
"(",
"String",
"message",
"String",
"caption",
"int",
"choices",
"Window",
"parent",
"=",
"None",
"int",
"x",
"=",
"-",
"1",
"int",
"y",
"=",
"-",
"1",
"bool",
"centre",
"=",
"True",
"int",
"width",
"=",
"150",
"int",
"height",
"=",
"200",
")",
"-",
">",
"int"
] |
def GetSingleChoiceIndex(*args, **kwargs):
"""
GetSingleChoiceIndex(String message, String caption, int choices, Window parent=None,
int x=-1, int y=-1, bool centre=True,
int width=150, int height=200) -> int
"""
return _misc_.GetSingleChoiceIndex(*args, **kwargs)
|
[
"def",
"GetSingleChoiceIndex",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"GetSingleChoiceIndex",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L477-L483
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_windows.py
|
python
|
VScrolledWindow.EstimateTotalHeight
|
(*args, **kwargs)
|
return _windows_.VScrolledWindow_EstimateTotalHeight(*args, **kwargs)
|
EstimateTotalHeight(self) -> int
|
EstimateTotalHeight(self) -> int
|
[
"EstimateTotalHeight",
"(",
"self",
")",
"-",
">",
"int"
] |
def EstimateTotalHeight(*args, **kwargs):
"""EstimateTotalHeight(self) -> int"""
return _windows_.VScrolledWindow_EstimateTotalHeight(*args, **kwargs)
|
[
"def",
"EstimateTotalHeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"VScrolledWindow_EstimateTotalHeight",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L2442-L2444
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/ribbon/panel.py
|
python
|
RibbonPanel.GetMinimisedIcon
|
(self)
|
return self._minimised_icon
|
Get the bitmap to be used in place of the panel children when it is minimised.
|
Get the bitmap to be used in place of the panel children when it is minimised.
|
[
"Get",
"the",
"bitmap",
"to",
"be",
"used",
"in",
"place",
"of",
"the",
"panel",
"children",
"when",
"it",
"is",
"minimised",
"."
] |
def GetMinimisedIcon(self):
"""
Get the bitmap to be used in place of the panel children when it is minimised.
"""
return self._minimised_icon
|
[
"def",
"GetMinimisedIcon",
"(",
"self",
")",
":",
"return",
"self",
".",
"_minimised_icon"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ribbon/panel.py#L1139-L1144
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/stc.py
|
python
|
StyledTextCtrl.SetCaretPeriod
|
(*args, **kwargs)
|
return _stc.StyledTextCtrl_SetCaretPeriod(*args, **kwargs)
|
SetCaretPeriod(self, int periodMilliseconds)
Get the time in milliseconds that the caret is on and off. 0 = steady on.
|
SetCaretPeriod(self, int periodMilliseconds)
|
[
"SetCaretPeriod",
"(",
"self",
"int",
"periodMilliseconds",
")"
] |
def SetCaretPeriod(*args, **kwargs):
"""
SetCaretPeriod(self, int periodMilliseconds)
Get the time in milliseconds that the caret is on and off. 0 = steady on.
"""
return _stc.StyledTextCtrl_SetCaretPeriod(*args, **kwargs)
|
[
"def",
"SetCaretPeriod",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetCaretPeriod",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L2827-L2833
|
|
kungfu-origin/kungfu
|
90c84b2b590855654cb9a6395ed050e0f7763512
|
core/deps/SQLiteCpp-2.3.0/cpplint.py
|
python
|
CheckSpacingForFunctionCall
|
(filename, line, linenum, error)
|
Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
Checks for the correctness of various spacing around function calls.
|
[
"Checks",
"for",
"the",
"correctness",
"of",
"various",
"spacing",
"around",
"function",
"calls",
"."
] |
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
|
[
"def",
"CheckSpacingForFunctionCall",
"(",
"filename",
",",
"line",
",",
"linenum",
",",
"error",
")",
":",
"# Since function calls often occur inside if/for/while/switch",
"# expressions - which have their own, more liberal conventions - we",
"# first see if we should be looking inside such an expression for a",
"# function call, to which we can apply more strict standards.",
"fncall",
"=",
"line",
"# if there's no control flow construct, look at whole line",
"for",
"pattern",
"in",
"(",
"r'\\bif\\s*\\((.*)\\)\\s*{'",
",",
"r'\\bfor\\s*\\((.*)\\)\\s*{'",
",",
"r'\\bwhile\\s*\\((.*)\\)\\s*[{;]'",
",",
"r'\\bswitch\\s*\\((.*)\\)\\s*{'",
")",
":",
"match",
"=",
"Search",
"(",
"pattern",
",",
"line",
")",
"if",
"match",
":",
"fncall",
"=",
"match",
".",
"group",
"(",
"1",
")",
"# look inside the parens for function calls",
"break",
"# Except in if/for/while/switch, there should never be space",
"# immediately inside parens (eg \"f( 3, 4 )\"). We make an exception",
"# for nested parens ( (a+b) + c ). Likewise, there should never be",
"# a space before a ( when it's a function argument. I assume it's a",
"# function argument when the char before the whitespace is legal in",
"# a function name (alnum + _) and we're not starting a macro. Also ignore",
"# pointers and references to arrays and functions coz they're too tricky:",
"# we use a very simple way to recognize these:",
"# \" (something)(maybe-something)\" or",
"# \" (something)(maybe-something,\" or",
"# \" (something)[something]\"",
"# Note that we assume the contents of [] to be short enough that",
"# they'll never need to wrap.",
"if",
"(",
"# Ignore control structures.",
"not",
"Search",
"(",
"r'\\b(if|for|while|switch|return|new|delete|catch|sizeof)\\b'",
",",
"fncall",
")",
"and",
"# Ignore pointers/references to functions.",
"not",
"Search",
"(",
"r' \\([^)]+\\)\\([^)]*(\\)|,$)'",
",",
"fncall",
")",
"and",
"# Ignore pointers/references to arrays.",
"not",
"Search",
"(",
"r' \\([^)]+\\)\\[[^\\]]+\\]'",
",",
"fncall",
")",
")",
":",
"if",
"Search",
"(",
"r'\\w\\s*\\(\\s(?!\\s*\\\\$)'",
",",
"fncall",
")",
":",
"# a ( used for a fn call",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"4",
",",
"'Extra space after ( in function call'",
")",
"elif",
"Search",
"(",
"r'\\(\\s+(?!(\\s*\\\\)|\\()'",
",",
"fncall",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"2",
",",
"'Extra space after ('",
")",
"if",
"(",
"Search",
"(",
"r'\\w\\s+\\('",
",",
"fncall",
")",
"and",
"not",
"Search",
"(",
"r'#\\s*define|typedef'",
",",
"fncall",
")",
"and",
"not",
"Search",
"(",
"r'\\w\\s+\\((\\w+::)*\\*\\w+\\)\\('",
",",
"fncall",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"4",
",",
"'Extra space before ( in function call'",
")",
"# If the ) is followed only by a newline or a { + newline, assume it's",
"# part of a control statement (if/while/etc), and don't complain",
"if",
"Search",
"(",
"r'[^)]\\s+\\)\\s*[^{\\s]'",
",",
"fncall",
")",
":",
"# If the closing parenthesis is preceded by only whitespaces,",
"# try to give a more descriptive error message.",
"if",
"Search",
"(",
"r'^\\s+\\)'",
",",
"fncall",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"2",
",",
"'Closing ) should be moved to the previous line'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"2",
",",
"'Extra space before )'",
")"
] |
https://github.com/kungfu-origin/kungfu/blob/90c84b2b590855654cb9a6395ed050e0f7763512/core/deps/SQLiteCpp-2.3.0/cpplint.py#L2230-L2295
|
||
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/beautifulsoup4/bs4/element.py
|
python
|
PageElement.find_next_sibling
|
(self, name=None, attrs={}, text=None, **kwargs)
|
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
|
Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document.
|
Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document.
|
[
"Returns",
"the",
"closest",
"sibling",
"to",
"this",
"Tag",
"that",
"matches",
"the",
"given",
"criteria",
"and",
"appears",
"after",
"this",
"Tag",
"in",
"the",
"document",
"."
] |
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
|
[
"def",
"find_next_sibling",
"(",
"self",
",",
"name",
"=",
"None",
",",
"attrs",
"=",
"{",
"}",
",",
"text",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_find_one",
"(",
"self",
".",
"find_next_siblings",
",",
"name",
",",
"attrs",
",",
"text",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/beautifulsoup4/bs4/element.py#L392-L396
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_core.py
|
python
|
Window.GetConstraints
|
(*args, **kwargs)
|
return _core_.Window_GetConstraints(*args, **kwargs)
|
GetConstraints(self) -> LayoutConstraints
Returns a pointer to the window's layout constraints, or None if there
are none.
|
GetConstraints(self) -> LayoutConstraints
|
[
"GetConstraints",
"(",
"self",
")",
"-",
">",
"LayoutConstraints"
] |
def GetConstraints(*args, **kwargs):
"""
GetConstraints(self) -> LayoutConstraints
Returns a pointer to the window's layout constraints, or None if there
are none.
"""
return _core_.Window_GetConstraints(*args, **kwargs)
|
[
"def",
"GetConstraints",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Window_GetConstraints",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L11459-L11466
|
|
zcash/zcash
|
944453065b40f6bed6bd59c4ff01c4d123c6cdb7
|
contrib/linearize/linearize-data.py
|
python
|
BlockDataCopier.copyOneBlock
|
(self)
|
Find the next block to be written in the input, and copy it to the output.
|
Find the next block to be written in the input, and copy it to the output.
|
[
"Find",
"the",
"next",
"block",
"to",
"be",
"written",
"in",
"the",
"input",
"and",
"copy",
"it",
"to",
"the",
"output",
"."
] |
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
|
[
"def",
"copyOneBlock",
"(",
"self",
")",
":",
"extent",
"=",
"self",
".",
"blockExtents",
".",
"pop",
"(",
"self",
".",
"blkCountOut",
")",
"if",
"self",
".",
"blkCountOut",
"in",
"self",
".",
"outOfOrderData",
":",
"# If the data is cached, use it from memory and remove from the cache",
"rawblock",
"=",
"self",
".",
"outOfOrderData",
".",
"pop",
"(",
"self",
".",
"blkCountOut",
")",
"self",
".",
"outOfOrderSize",
"-=",
"len",
"(",
"rawblock",
")",
"else",
":",
"# Otherwise look up data on disk",
"rawblock",
"=",
"self",
".",
"fetchBlock",
"(",
"extent",
")",
"self",
".",
"writeBlock",
"(",
"extent",
".",
"inhdr",
",",
"extent",
".",
"blkhdr",
",",
"rawblock",
")"
] |
https://github.com/zcash/zcash/blob/944453065b40f6bed6bd59c4ff01c4d123c6cdb7/contrib/linearize/linearize-data.py#L178-L188
|
||
esphome/esphome
|
40e06c9819f17409615d4f4eec5cfe4dc9a3776d
|
esphome/cpp_generator.py
|
python
|
add_build_flag
|
(build_flag: str)
|
Add a global build flag to the compiler flags.
|
Add a global build flag to the compiler flags.
|
[
"Add",
"a",
"global",
"build",
"flag",
"to",
"the",
"compiler",
"flags",
"."
] |
def add_build_flag(build_flag: str):
"""Add a global build flag to the compiler flags."""
CORE.add_build_flag(build_flag)
|
[
"def",
"add_build_flag",
"(",
"build_flag",
":",
"str",
")",
":",
"CORE",
".",
"add_build_flag",
"(",
"build_flag",
")"
] |
https://github.com/esphome/esphome/blob/40e06c9819f17409615d4f4eec5cfe4dc9a3776d/esphome/cpp_generator.py#L577-L579
|
||
gem5/gem5
|
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
|
ext/ply/example/GardenSnake/GardenSnake.py
|
python
|
p_argument
|
(p)
|
argument : test
|
argument : test
|
[
"argument",
":",
"test"
] |
def p_argument(p):
"argument : test"
p[0] = p[1]
|
[
"def",
"p_argument",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] |
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/ext/ply/example/GardenSnake/GardenSnake.py#L622-L624
|
||
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
Framework/PythonInterface/mantid/plots/mantidaxes.py
|
python
|
MantidAxes.errorbar
|
(self, *args, **kwargs)
|
If the **mantid** projection is chosen, it can be
used the same as :py:meth:`matplotlib.axes.Axes.errorbar` for arrays,
or it can be used to plot :class:`mantid.api.MatrixWorkspace`
or :class:`mantid.api.IMDHistoWorkspace`. You can have something like::
import matplotlib.pyplot as plt
from mantid import plots
...
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
ax.errorbar(workspace,'rs',specNum=1) #for workspaces
ax.errorbar(x,y,yerr,'bo') #for arrays
fig.show()
For keywords related to workspaces, see :func:`plotfunctions.errorbar`
|
If the **mantid** projection is chosen, it can be
used the same as :py:meth:`matplotlib.axes.Axes.errorbar` for arrays,
or it can be used to plot :class:`mantid.api.MatrixWorkspace`
or :class:`mantid.api.IMDHistoWorkspace`. You can have something like::
|
[
"If",
"the",
"**",
"mantid",
"**",
"projection",
"is",
"chosen",
"it",
"can",
"be",
"used",
"the",
"same",
"as",
":",
"py",
":",
"meth",
":",
"matplotlib",
".",
"axes",
".",
"Axes",
".",
"errorbar",
"for",
"arrays",
"or",
"it",
"can",
"be",
"used",
"to",
"plot",
":",
"class",
":",
"mantid",
".",
"api",
".",
"MatrixWorkspace",
"or",
":",
"class",
":",
"mantid",
".",
"api",
".",
"IMDHistoWorkspace",
".",
"You",
"can",
"have",
"something",
"like",
"::"
] |
def errorbar(self, *args, **kwargs):
"""
If the **mantid** projection is chosen, it can be
used the same as :py:meth:`matplotlib.axes.Axes.errorbar` for arrays,
or it can be used to plot :class:`mantid.api.MatrixWorkspace`
or :class:`mantid.api.IMDHistoWorkspace`. You can have something like::
import matplotlib.pyplot as plt
from mantid import plots
...
fig, ax = plt.subplots(subplot_kw={'projection':'mantid'})
ax.errorbar(workspace,'rs',specNum=1) #for workspaces
ax.errorbar(x,y,yerr,'bo') #for arrays
fig.show()
For keywords related to workspaces, see :func:`plotfunctions.errorbar`
"""
if datafunctions.validate_args(*args):
logger.debug('using plotfunctions')
autoscale_on = kwargs.pop("autoscale_on_update", self.get_autoscale_on())
def _data_update(artists, workspace, new_kwargs=None):
if new_kwargs:
_autoscale_on = new_kwargs.pop("autoscale_on_update", self.get_autoscale_on())
else:
_autoscale_on = self.get_autoscale_on()
# errorbar with workspaces can only return a single container
container_orig = artists[0]
# It is not possible to simply reset the error bars so
# we have to plot new lines but ensure we don't reorder them on the plot!
orig_idx = self.containers.index(container_orig)
container_orig.remove()
# The container does not remove itself from the containers list
# but protect this just in case matplotlib starts doing this
try:
self.containers.remove(container_orig)
except ValueError:
pass
# this gets pushed back onto the containers list
try:
with autoscale_on_update(self, _autoscale_on):
# this gets pushed back onto the containers list
if new_kwargs:
container_new = axesfunctions.errorbar(self, workspace, **new_kwargs)
else:
container_new = axesfunctions.errorbar(self, workspace, **kwargs)
self.containers.insert(orig_idx, container_new)
self.containers.pop()
# Update joining line
if container_new[0] and container_orig[0]:
container_new[0].update_from(container_orig[0])
# Update caps
for orig_caps, new_caps in zip(container_orig[1], container_new[1]):
new_caps.update_from(orig_caps)
# Update bars
for orig_bars, new_bars in zip(container_orig[2], container_new[2]):
new_bars.update_from(orig_bars)
# Re-plotting in the config dialog will assign this attr
if hasattr(container_orig, 'errorevery'):
setattr(container_new, 'errorevery', container_orig.errorevery)
# ax.relim does not support collections...
self._update_line_limits(container_new[0])
except RuntimeError as ex:
logger.information('Error bar not plotted: {0}'.format(ex.args[0]))
container_new = []
# also remove the curve from the legend
if (not self.is_empty(self)) and self.legend_ is not None:
legend_set_draggable(self.legend(), True)
return container_new
workspace = args[0]
spec_num = self.get_spec_number_or_bin(workspace, kwargs)
normalize_by_bin_width, kwargs = get_normalize_by_bin_width(workspace, self, **kwargs)
is_normalized = normalize_by_bin_width or \
(hasattr(workspace, 'isDistribution') and workspace.isDistribution())
with autoscale_on_update(self, autoscale_on):
artist = self.track_workspace_artist(workspace,
axesfunctions.errorbar(self, normalize_by_bin_width = is_normalized,
*args, **kwargs),
_data_update, spec_num, is_normalized,
MantidAxes.is_axis_of_type(MantidAxType.SPECTRUM, kwargs))
return artist
else:
return Axes.errorbar(self, *args, **kwargs)
|
[
"def",
"errorbar",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"datafunctions",
".",
"validate_args",
"(",
"*",
"args",
")",
":",
"logger",
".",
"debug",
"(",
"'using plotfunctions'",
")",
"autoscale_on",
"=",
"kwargs",
".",
"pop",
"(",
"\"autoscale_on_update\"",
",",
"self",
".",
"get_autoscale_on",
"(",
")",
")",
"def",
"_data_update",
"(",
"artists",
",",
"workspace",
",",
"new_kwargs",
"=",
"None",
")",
":",
"if",
"new_kwargs",
":",
"_autoscale_on",
"=",
"new_kwargs",
".",
"pop",
"(",
"\"autoscale_on_update\"",
",",
"self",
".",
"get_autoscale_on",
"(",
")",
")",
"else",
":",
"_autoscale_on",
"=",
"self",
".",
"get_autoscale_on",
"(",
")",
"# errorbar with workspaces can only return a single container",
"container_orig",
"=",
"artists",
"[",
"0",
"]",
"# It is not possible to simply reset the error bars so",
"# we have to plot new lines but ensure we don't reorder them on the plot!",
"orig_idx",
"=",
"self",
".",
"containers",
".",
"index",
"(",
"container_orig",
")",
"container_orig",
".",
"remove",
"(",
")",
"# The container does not remove itself from the containers list",
"# but protect this just in case matplotlib starts doing this",
"try",
":",
"self",
".",
"containers",
".",
"remove",
"(",
"container_orig",
")",
"except",
"ValueError",
":",
"pass",
"# this gets pushed back onto the containers list",
"try",
":",
"with",
"autoscale_on_update",
"(",
"self",
",",
"_autoscale_on",
")",
":",
"# this gets pushed back onto the containers list",
"if",
"new_kwargs",
":",
"container_new",
"=",
"axesfunctions",
".",
"errorbar",
"(",
"self",
",",
"workspace",
",",
"*",
"*",
"new_kwargs",
")",
"else",
":",
"container_new",
"=",
"axesfunctions",
".",
"errorbar",
"(",
"self",
",",
"workspace",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"containers",
".",
"insert",
"(",
"orig_idx",
",",
"container_new",
")",
"self",
".",
"containers",
".",
"pop",
"(",
")",
"# Update joining line",
"if",
"container_new",
"[",
"0",
"]",
"and",
"container_orig",
"[",
"0",
"]",
":",
"container_new",
"[",
"0",
"]",
".",
"update_from",
"(",
"container_orig",
"[",
"0",
"]",
")",
"# Update caps",
"for",
"orig_caps",
",",
"new_caps",
"in",
"zip",
"(",
"container_orig",
"[",
"1",
"]",
",",
"container_new",
"[",
"1",
"]",
")",
":",
"new_caps",
".",
"update_from",
"(",
"orig_caps",
")",
"# Update bars",
"for",
"orig_bars",
",",
"new_bars",
"in",
"zip",
"(",
"container_orig",
"[",
"2",
"]",
",",
"container_new",
"[",
"2",
"]",
")",
":",
"new_bars",
".",
"update_from",
"(",
"orig_bars",
")",
"# Re-plotting in the config dialog will assign this attr",
"if",
"hasattr",
"(",
"container_orig",
",",
"'errorevery'",
")",
":",
"setattr",
"(",
"container_new",
",",
"'errorevery'",
",",
"container_orig",
".",
"errorevery",
")",
"# ax.relim does not support collections...",
"self",
".",
"_update_line_limits",
"(",
"container_new",
"[",
"0",
"]",
")",
"except",
"RuntimeError",
"as",
"ex",
":",
"logger",
".",
"information",
"(",
"'Error bar not plotted: {0}'",
".",
"format",
"(",
"ex",
".",
"args",
"[",
"0",
"]",
")",
")",
"container_new",
"=",
"[",
"]",
"# also remove the curve from the legend",
"if",
"(",
"not",
"self",
".",
"is_empty",
"(",
"self",
")",
")",
"and",
"self",
".",
"legend_",
"is",
"not",
"None",
":",
"legend_set_draggable",
"(",
"self",
".",
"legend",
"(",
")",
",",
"True",
")",
"return",
"container_new",
"workspace",
"=",
"args",
"[",
"0",
"]",
"spec_num",
"=",
"self",
".",
"get_spec_number_or_bin",
"(",
"workspace",
",",
"kwargs",
")",
"normalize_by_bin_width",
",",
"kwargs",
"=",
"get_normalize_by_bin_width",
"(",
"workspace",
",",
"self",
",",
"*",
"*",
"kwargs",
")",
"is_normalized",
"=",
"normalize_by_bin_width",
"or",
"(",
"hasattr",
"(",
"workspace",
",",
"'isDistribution'",
")",
"and",
"workspace",
".",
"isDistribution",
"(",
")",
")",
"with",
"autoscale_on_update",
"(",
"self",
",",
"autoscale_on",
")",
":",
"artist",
"=",
"self",
".",
"track_workspace_artist",
"(",
"workspace",
",",
"axesfunctions",
".",
"errorbar",
"(",
"self",
",",
"normalize_by_bin_width",
"=",
"is_normalized",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"_data_update",
",",
"spec_num",
",",
"is_normalized",
",",
"MantidAxes",
".",
"is_axis_of_type",
"(",
"MantidAxType",
".",
"SPECTRUM",
",",
"kwargs",
")",
")",
"return",
"artist",
"else",
":",
"return",
"Axes",
".",
"errorbar",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/mantid/plots/mantidaxes.py#L711-L801
|
||
intel/llvm
|
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
|
lldb/utils/lui/lldbutil.py
|
python
|
bytearray_to_int
|
(bytes, bytesize)
|
return unpacked[0]
|
Utility function to convert a bytearray into an integer.
It interprets the bytearray in the little endian format. For a big endian
bytearray, just do ba.reverse() on the object before passing it in.
|
Utility function to convert a bytearray into an integer.
|
[
"Utility",
"function",
"to",
"convert",
"a",
"bytearray",
"into",
"an",
"integer",
"."
] |
def bytearray_to_int(bytes, bytesize):
"""Utility function to convert a bytearray into an integer.
It interprets the bytearray in the little endian format. For a big endian
bytearray, just do ba.reverse() on the object before passing it in.
"""
import struct
if bytesize == 1:
return bytes[0]
# Little endian followed by a format character.
template = "<%c"
if bytesize == 2:
fmt = template % 'h'
elif bytesize == 4:
fmt = template % 'i'
elif bytesize == 4:
fmt = template % 'q'
else:
return None
unpacked = struct.unpack(fmt, str(bytes))
return unpacked[0]
|
[
"def",
"bytearray_to_int",
"(",
"bytes",
",",
"bytesize",
")",
":",
"import",
"struct",
"if",
"bytesize",
"==",
"1",
":",
"return",
"bytes",
"[",
"0",
"]",
"# Little endian followed by a format character.",
"template",
"=",
"\"<%c\"",
"if",
"bytesize",
"==",
"2",
":",
"fmt",
"=",
"template",
"%",
"'h'",
"elif",
"bytesize",
"==",
"4",
":",
"fmt",
"=",
"template",
"%",
"'i'",
"elif",
"bytesize",
"==",
"4",
":",
"fmt",
"=",
"template",
"%",
"'q'",
"else",
":",
"return",
"None",
"unpacked",
"=",
"struct",
".",
"unpack",
"(",
"fmt",
",",
"str",
"(",
"bytes",
")",
")",
"return",
"unpacked",
"[",
"0",
"]"
] |
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/utils/lui/lldbutil.py#L92-L115
|
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/locations.py
|
python
|
virtualenv_no_global
|
()
|
Return True if in a venv and no system site packages.
|
Return True if in a venv and no system site packages.
|
[
"Return",
"True",
"if",
"in",
"a",
"venv",
"and",
"no",
"system",
"site",
"packages",
"."
] |
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
#this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
|
[
"def",
"virtualenv_no_global",
"(",
")",
":",
"#this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file",
"site_mod_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"site",
".",
"__file__",
")",
")",
"no_global_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"site_mod_dir",
",",
"'no-global-site-packages.txt'",
")",
"if",
"running_under_virtualenv",
"(",
")",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"no_global_file",
")",
":",
"return",
"True"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/locations.py#L41-L49
|
||
OSGeo/gdal
|
3748fc4ba4fba727492774b2b908a2130c864a83
|
swig/python/gdal-utils/osgeo_utils/gdal2xyz.py
|
python
|
gdal2xyz
|
(srcfile: PathOrDS, dstfile: PathLikeOrStr = None,
srcwin: Optional[Sequence[int]] = None,
skip: Union[int, Sequence[int]] = 1,
band_nums: Optional[Sequence[int]] = None, delim: str = ' ',
skip_nodata: bool = False,
src_nodata: Optional[Union[Sequence, Number]] = None, dst_nodata: Optional[Union[Sequence, Number]] = None,
return_np_arrays: bool = False, pre_allocate_np_arrays: bool = True,
progress_callback: OptionalProgressCallback = ...)
|
return result
|
translates a raster file (or dataset) into xyz format
skip - how many rows/cols to skip each iteration
srcwin (xoff, yoff, xsize, ysize) - Selects a subwindow from the source image for copying based on pixel/line location.
band_nums - selected input bands to process, None to process all.
delim - the delimiter to use between values in a line
skip_nodata - Exclude the output lines with nodata value (as determined by srcnodata)
src_nodata - The nodata value of the dataset (for skipping or replacing)
default (`None`) - Use the dataset NoDataValue;
`Sequence`/`Number` - use the given nodata value (per band or per dataset).
dst_nodata - Replace source nodata with a given nodata. Has an effect only if not setting `-skipnodata`
default(`None`) - use srcnodata, no replacement;
`Sequence`/`Number` - replace the `srcnodata` with the given nodata value (per band or per dataset).
srcfile - The source dataset filename or dataset object
dstfile - The output dataset filename; for dstfile=None - if return_np_arrays=False then output will be printed to stdout
return_np_arrays - return numpy arrays of the result, otherwise returns None
pre_allocate_np_arrays - pre-allocated result arrays.
Should be faster unless skip_nodata and the input is very sparse thus most data points will be skipped.
progress_callback - progress callback function. use None for quiet or Ellipsis for using the default callback
|
translates a raster file (or dataset) into xyz format
|
[
"translates",
"a",
"raster",
"file",
"(",
"or",
"dataset",
")",
"into",
"xyz",
"format"
] |
def gdal2xyz(srcfile: PathOrDS, dstfile: PathLikeOrStr = None,
srcwin: Optional[Sequence[int]] = None,
skip: Union[int, Sequence[int]] = 1,
band_nums: Optional[Sequence[int]] = None, delim: str = ' ',
skip_nodata: bool = False,
src_nodata: Optional[Union[Sequence, Number]] = None, dst_nodata: Optional[Union[Sequence, Number]] = None,
return_np_arrays: bool = False, pre_allocate_np_arrays: bool = True,
progress_callback: OptionalProgressCallback = ...) -> Optional[Tuple]:
"""
translates a raster file (or dataset) into xyz format
skip - how many rows/cols to skip each iteration
srcwin (xoff, yoff, xsize, ysize) - Selects a subwindow from the source image for copying based on pixel/line location.
band_nums - selected input bands to process, None to process all.
delim - the delimiter to use between values in a line
skip_nodata - Exclude the output lines with nodata value (as determined by srcnodata)
src_nodata - The nodata value of the dataset (for skipping or replacing)
default (`None`) - Use the dataset NoDataValue;
`Sequence`/`Number` - use the given nodata value (per band or per dataset).
dst_nodata - Replace source nodata with a given nodata. Has an effect only if not setting `-skipnodata`
default(`None`) - use srcnodata, no replacement;
`Sequence`/`Number` - replace the `srcnodata` with the given nodata value (per band or per dataset).
srcfile - The source dataset filename or dataset object
dstfile - The output dataset filename; for dstfile=None - if return_np_arrays=False then output will be printed to stdout
return_np_arrays - return numpy arrays of the result, otherwise returns None
pre_allocate_np_arrays - pre-allocated result arrays.
Should be faster unless skip_nodata and the input is very sparse thus most data points will be skipped.
progress_callback - progress callback function. use None for quiet or Ellipsis for using the default callback
"""
result = None
progress_callback = get_progress_callback(progress_callback)
# Open source file.
ds = open_ds(srcfile, access_mode=gdal.GA_ReadOnly)
if ds is None:
raise Exception(f'Could not open {srcfile}.')
bands = get_bands(ds, band_nums)
band_count = len(bands)
gt = ds.GetGeoTransform()
# Collect information on all the source files.
if srcwin is None:
srcwin = (0, 0, ds.RasterXSize, ds.RasterYSize)
dt, np_dt = GDALTypeCodeAndNumericTypeCodeFromDataSet(ds)
# Open the output file.
if dstfile is not None:
dst_fh = open(dstfile, 'wt')
elif return_np_arrays:
dst_fh = None
else:
dst_fh = sys.stdout
if dst_fh:
if dt == gdal.GDT_Int32 or dt == gdal.GDT_UInt32:
band_format = (("%d" + delim) * len(bands)).rstrip(delim) + '\n'
else:
band_format = (("%g" + delim) * len(bands)).rstrip(delim) + '\n'
# Setup an appropriate print format.
if abs(gt[0]) < 180 and abs(gt[3]) < 180 \
and abs(ds.RasterXSize * gt[1]) < 180 \
and abs(ds.RasterYSize * gt[5]) < 180:
frmt = '%.10g' + delim + '%.10g' + delim + '%s'
else:
frmt = '%.3f' + delim + '%.3f' + delim + '%s'
if isinstance(src_nodata, Number):
src_nodata = [src_nodata] * band_count
elif src_nodata is None:
src_nodata = list(band.GetNoDataValue() for band in bands)
if None in src_nodata:
src_nodata = None
if src_nodata is not None:
src_nodata = np.asarray(src_nodata, dtype=np_dt)
if isinstance(dst_nodata, Number):
dst_nodata = [dst_nodata] * band_count
if (dst_nodata is None) or (None in dst_nodata) or (src_nodata is None):
dst_nodata = None
if dst_nodata is not None:
dst_nodata = np.asarray(dst_nodata, dtype=np_dt)
skip_nodata = skip_nodata and (src_nodata is not None)
replace_nodata = (not skip_nodata) and (dst_nodata is not None)
process_nodata = skip_nodata or replace_nodata
if isinstance(skip, Sequence):
x_skip, y_skip = skip
else:
x_skip = y_skip = skip
x_off, y_off, x_size, y_size = srcwin
bands_count = len(bands)
nXBlocks = (x_size - x_off) // x_skip
nYBlocks = (y_size - y_off) // y_skip
progress_end = nXBlocks * nYBlocks
progress_curr = 0
progress_prev = -1
progress_parts = 100
if return_np_arrays:
size = progress_end if pre_allocate_np_arrays else 0
all_geo_x = np.empty(size)
all_geo_y = np.empty(size)
all_data = np.empty((size, band_count), dtype=np_dt)
# Loop emitting data.
idx = 0
for y in range(y_off, y_off + y_size, y_skip):
size = bands_count if pre_allocate_np_arrays else 0
data = np.empty((size, x_size), dtype=np_dt) # dims: (bands_count, x_size)
for i_bnd, band in enumerate(bands):
band_data = band.ReadAsArray(x_off, y, x_size, 1) # read one band line
if pre_allocate_np_arrays:
data[i_bnd] = band_data[0]
else:
data = np.append(data, band_data, axis=0)
for x_i in range(0, x_size, x_skip):
progress_curr += 1
if progress_callback:
progress_frac = progress_curr / progress_end
progress = int(progress_frac * progress_parts)
if progress > progress_prev:
progress_prev = progress
progress_callback(progress_frac)
x_i_data = data[:, x_i] # single pixel, dims: (bands)
if process_nodata and np.array_equal(src_nodata, x_i_data):
if skip_nodata:
continue
elif replace_nodata:
x_i_data = dst_nodata
x = x_i + x_off
geo_x = gt[0] + (x + 0.5) * gt[1] + (y + 0.5) * gt[2]
geo_y = gt[3] + (x + 0.5) * gt[4] + (y + 0.5) * gt[5]
if dst_fh:
band_str = band_format % tuple(x_i_data)
line = frmt % (float(geo_x), float(geo_y), band_str)
dst_fh.write(line)
if return_np_arrays:
if pre_allocate_np_arrays:
all_geo_x[idx] = geo_x
all_geo_y[idx] = geo_y
all_data[idx] = x_i_data
else:
all_geo_x = np.append(all_geo_x, geo_x)
all_geo_y = np.append(all_geo_y, geo_y)
all_data = np.append(all_data, [x_i_data], axis=0)
idx += 1
if return_np_arrays:
nodata = None if skip_nodata else dst_nodata if replace_nodata else src_nodata
if idx != progress_curr:
all_geo_x = all_geo_x[:idx]
all_geo_y = all_geo_y[:idx]
all_data = all_data[:idx, :]
result = all_geo_x, all_geo_y, all_data.transpose(), nodata
return result
|
[
"def",
"gdal2xyz",
"(",
"srcfile",
":",
"PathOrDS",
",",
"dstfile",
":",
"PathLikeOrStr",
"=",
"None",
",",
"srcwin",
":",
"Optional",
"[",
"Sequence",
"[",
"int",
"]",
"]",
"=",
"None",
",",
"skip",
":",
"Union",
"[",
"int",
",",
"Sequence",
"[",
"int",
"]",
"]",
"=",
"1",
",",
"band_nums",
":",
"Optional",
"[",
"Sequence",
"[",
"int",
"]",
"]",
"=",
"None",
",",
"delim",
":",
"str",
"=",
"' '",
",",
"skip_nodata",
":",
"bool",
"=",
"False",
",",
"src_nodata",
":",
"Optional",
"[",
"Union",
"[",
"Sequence",
",",
"Number",
"]",
"]",
"=",
"None",
",",
"dst_nodata",
":",
"Optional",
"[",
"Union",
"[",
"Sequence",
",",
"Number",
"]",
"]",
"=",
"None",
",",
"return_np_arrays",
":",
"bool",
"=",
"False",
",",
"pre_allocate_np_arrays",
":",
"bool",
"=",
"True",
",",
"progress_callback",
":",
"OptionalProgressCallback",
"=",
"...",
")",
"->",
"Optional",
"[",
"Tuple",
"]",
":",
"result",
"=",
"None",
"progress_callback",
"=",
"get_progress_callback",
"(",
"progress_callback",
")",
"# Open source file.",
"ds",
"=",
"open_ds",
"(",
"srcfile",
",",
"access_mode",
"=",
"gdal",
".",
"GA_ReadOnly",
")",
"if",
"ds",
"is",
"None",
":",
"raise",
"Exception",
"(",
"f'Could not open {srcfile}.'",
")",
"bands",
"=",
"get_bands",
"(",
"ds",
",",
"band_nums",
")",
"band_count",
"=",
"len",
"(",
"bands",
")",
"gt",
"=",
"ds",
".",
"GetGeoTransform",
"(",
")",
"# Collect information on all the source files.",
"if",
"srcwin",
"is",
"None",
":",
"srcwin",
"=",
"(",
"0",
",",
"0",
",",
"ds",
".",
"RasterXSize",
",",
"ds",
".",
"RasterYSize",
")",
"dt",
",",
"np_dt",
"=",
"GDALTypeCodeAndNumericTypeCodeFromDataSet",
"(",
"ds",
")",
"# Open the output file.",
"if",
"dstfile",
"is",
"not",
"None",
":",
"dst_fh",
"=",
"open",
"(",
"dstfile",
",",
"'wt'",
")",
"elif",
"return_np_arrays",
":",
"dst_fh",
"=",
"None",
"else",
":",
"dst_fh",
"=",
"sys",
".",
"stdout",
"if",
"dst_fh",
":",
"if",
"dt",
"==",
"gdal",
".",
"GDT_Int32",
"or",
"dt",
"==",
"gdal",
".",
"GDT_UInt32",
":",
"band_format",
"=",
"(",
"(",
"\"%d\"",
"+",
"delim",
")",
"*",
"len",
"(",
"bands",
")",
")",
".",
"rstrip",
"(",
"delim",
")",
"+",
"'\\n'",
"else",
":",
"band_format",
"=",
"(",
"(",
"\"%g\"",
"+",
"delim",
")",
"*",
"len",
"(",
"bands",
")",
")",
".",
"rstrip",
"(",
"delim",
")",
"+",
"'\\n'",
"# Setup an appropriate print format.",
"if",
"abs",
"(",
"gt",
"[",
"0",
"]",
")",
"<",
"180",
"and",
"abs",
"(",
"gt",
"[",
"3",
"]",
")",
"<",
"180",
"and",
"abs",
"(",
"ds",
".",
"RasterXSize",
"*",
"gt",
"[",
"1",
"]",
")",
"<",
"180",
"and",
"abs",
"(",
"ds",
".",
"RasterYSize",
"*",
"gt",
"[",
"5",
"]",
")",
"<",
"180",
":",
"frmt",
"=",
"'%.10g'",
"+",
"delim",
"+",
"'%.10g'",
"+",
"delim",
"+",
"'%s'",
"else",
":",
"frmt",
"=",
"'%.3f'",
"+",
"delim",
"+",
"'%.3f'",
"+",
"delim",
"+",
"'%s'",
"if",
"isinstance",
"(",
"src_nodata",
",",
"Number",
")",
":",
"src_nodata",
"=",
"[",
"src_nodata",
"]",
"*",
"band_count",
"elif",
"src_nodata",
"is",
"None",
":",
"src_nodata",
"=",
"list",
"(",
"band",
".",
"GetNoDataValue",
"(",
")",
"for",
"band",
"in",
"bands",
")",
"if",
"None",
"in",
"src_nodata",
":",
"src_nodata",
"=",
"None",
"if",
"src_nodata",
"is",
"not",
"None",
":",
"src_nodata",
"=",
"np",
".",
"asarray",
"(",
"src_nodata",
",",
"dtype",
"=",
"np_dt",
")",
"if",
"isinstance",
"(",
"dst_nodata",
",",
"Number",
")",
":",
"dst_nodata",
"=",
"[",
"dst_nodata",
"]",
"*",
"band_count",
"if",
"(",
"dst_nodata",
"is",
"None",
")",
"or",
"(",
"None",
"in",
"dst_nodata",
")",
"or",
"(",
"src_nodata",
"is",
"None",
")",
":",
"dst_nodata",
"=",
"None",
"if",
"dst_nodata",
"is",
"not",
"None",
":",
"dst_nodata",
"=",
"np",
".",
"asarray",
"(",
"dst_nodata",
",",
"dtype",
"=",
"np_dt",
")",
"skip_nodata",
"=",
"skip_nodata",
"and",
"(",
"src_nodata",
"is",
"not",
"None",
")",
"replace_nodata",
"=",
"(",
"not",
"skip_nodata",
")",
"and",
"(",
"dst_nodata",
"is",
"not",
"None",
")",
"process_nodata",
"=",
"skip_nodata",
"or",
"replace_nodata",
"if",
"isinstance",
"(",
"skip",
",",
"Sequence",
")",
":",
"x_skip",
",",
"y_skip",
"=",
"skip",
"else",
":",
"x_skip",
"=",
"y_skip",
"=",
"skip",
"x_off",
",",
"y_off",
",",
"x_size",
",",
"y_size",
"=",
"srcwin",
"bands_count",
"=",
"len",
"(",
"bands",
")",
"nXBlocks",
"=",
"(",
"x_size",
"-",
"x_off",
")",
"//",
"x_skip",
"nYBlocks",
"=",
"(",
"y_size",
"-",
"y_off",
")",
"//",
"y_skip",
"progress_end",
"=",
"nXBlocks",
"*",
"nYBlocks",
"progress_curr",
"=",
"0",
"progress_prev",
"=",
"-",
"1",
"progress_parts",
"=",
"100",
"if",
"return_np_arrays",
":",
"size",
"=",
"progress_end",
"if",
"pre_allocate_np_arrays",
"else",
"0",
"all_geo_x",
"=",
"np",
".",
"empty",
"(",
"size",
")",
"all_geo_y",
"=",
"np",
".",
"empty",
"(",
"size",
")",
"all_data",
"=",
"np",
".",
"empty",
"(",
"(",
"size",
",",
"band_count",
")",
",",
"dtype",
"=",
"np_dt",
")",
"# Loop emitting data.",
"idx",
"=",
"0",
"for",
"y",
"in",
"range",
"(",
"y_off",
",",
"y_off",
"+",
"y_size",
",",
"y_skip",
")",
":",
"size",
"=",
"bands_count",
"if",
"pre_allocate_np_arrays",
"else",
"0",
"data",
"=",
"np",
".",
"empty",
"(",
"(",
"size",
",",
"x_size",
")",
",",
"dtype",
"=",
"np_dt",
")",
"# dims: (bands_count, x_size)",
"for",
"i_bnd",
",",
"band",
"in",
"enumerate",
"(",
"bands",
")",
":",
"band_data",
"=",
"band",
".",
"ReadAsArray",
"(",
"x_off",
",",
"y",
",",
"x_size",
",",
"1",
")",
"# read one band line",
"if",
"pre_allocate_np_arrays",
":",
"data",
"[",
"i_bnd",
"]",
"=",
"band_data",
"[",
"0",
"]",
"else",
":",
"data",
"=",
"np",
".",
"append",
"(",
"data",
",",
"band_data",
",",
"axis",
"=",
"0",
")",
"for",
"x_i",
"in",
"range",
"(",
"0",
",",
"x_size",
",",
"x_skip",
")",
":",
"progress_curr",
"+=",
"1",
"if",
"progress_callback",
":",
"progress_frac",
"=",
"progress_curr",
"/",
"progress_end",
"progress",
"=",
"int",
"(",
"progress_frac",
"*",
"progress_parts",
")",
"if",
"progress",
">",
"progress_prev",
":",
"progress_prev",
"=",
"progress",
"progress_callback",
"(",
"progress_frac",
")",
"x_i_data",
"=",
"data",
"[",
":",
",",
"x_i",
"]",
"# single pixel, dims: (bands)",
"if",
"process_nodata",
"and",
"np",
".",
"array_equal",
"(",
"src_nodata",
",",
"x_i_data",
")",
":",
"if",
"skip_nodata",
":",
"continue",
"elif",
"replace_nodata",
":",
"x_i_data",
"=",
"dst_nodata",
"x",
"=",
"x_i",
"+",
"x_off",
"geo_x",
"=",
"gt",
"[",
"0",
"]",
"+",
"(",
"x",
"+",
"0.5",
")",
"*",
"gt",
"[",
"1",
"]",
"+",
"(",
"y",
"+",
"0.5",
")",
"*",
"gt",
"[",
"2",
"]",
"geo_y",
"=",
"gt",
"[",
"3",
"]",
"+",
"(",
"x",
"+",
"0.5",
")",
"*",
"gt",
"[",
"4",
"]",
"+",
"(",
"y",
"+",
"0.5",
")",
"*",
"gt",
"[",
"5",
"]",
"if",
"dst_fh",
":",
"band_str",
"=",
"band_format",
"%",
"tuple",
"(",
"x_i_data",
")",
"line",
"=",
"frmt",
"%",
"(",
"float",
"(",
"geo_x",
")",
",",
"float",
"(",
"geo_y",
")",
",",
"band_str",
")",
"dst_fh",
".",
"write",
"(",
"line",
")",
"if",
"return_np_arrays",
":",
"if",
"pre_allocate_np_arrays",
":",
"all_geo_x",
"[",
"idx",
"]",
"=",
"geo_x",
"all_geo_y",
"[",
"idx",
"]",
"=",
"geo_y",
"all_data",
"[",
"idx",
"]",
"=",
"x_i_data",
"else",
":",
"all_geo_x",
"=",
"np",
".",
"append",
"(",
"all_geo_x",
",",
"geo_x",
")",
"all_geo_y",
"=",
"np",
".",
"append",
"(",
"all_geo_y",
",",
"geo_y",
")",
"all_data",
"=",
"np",
".",
"append",
"(",
"all_data",
",",
"[",
"x_i_data",
"]",
",",
"axis",
"=",
"0",
")",
"idx",
"+=",
"1",
"if",
"return_np_arrays",
":",
"nodata",
"=",
"None",
"if",
"skip_nodata",
"else",
"dst_nodata",
"if",
"replace_nodata",
"else",
"src_nodata",
"if",
"idx",
"!=",
"progress_curr",
":",
"all_geo_x",
"=",
"all_geo_x",
"[",
":",
"idx",
"]",
"all_geo_y",
"=",
"all_geo_y",
"[",
":",
"idx",
"]",
"all_data",
"=",
"all_data",
"[",
":",
"idx",
",",
":",
"]",
"result",
"=",
"all_geo_x",
",",
"all_geo_y",
",",
"all_data",
".",
"transpose",
"(",
")",
",",
"nodata",
"return",
"result"
] |
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/gdal-utils/osgeo_utils/gdal2xyz.py#L46-L217
|
|
fengbingchun/NN_Test
|
d6305825d5273e4569ccd1eda9ffa2a9c72e18d2
|
src/tiny-dnn/third_party/cpplint.py
|
python
|
ProcessFileData
|
(filename, file_extension, lines, error,
extra_check_functions=None)
|
Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
|
Performs lint checks and reports any errors to the given error function.
|
[
"Performs",
"lint",
"checks",
"and",
"reports",
"any",
"errors",
"to",
"the",
"given",
"error",
"function",
"."
] |
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if file_extension in GetHeaderExtensions():
CheckForHeaderGuard(filename, clean_lines, error)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
|
[
"def",
"ProcessFileData",
"(",
"filename",
",",
"file_extension",
",",
"lines",
",",
"error",
",",
"extra_check_functions",
"=",
"None",
")",
":",
"lines",
"=",
"(",
"[",
"'// marker so line numbers and indices both start at 1'",
"]",
"+",
"lines",
"+",
"[",
"'// marker so line numbers end in a known way'",
"]",
")",
"include_state",
"=",
"_IncludeState",
"(",
")",
"function_state",
"=",
"_FunctionState",
"(",
")",
"nesting_state",
"=",
"NestingState",
"(",
")",
"ResetNolintSuppressions",
"(",
")",
"CheckForCopyright",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"ProcessGlobalSuppresions",
"(",
"lines",
")",
"RemoveMultiLineComments",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"clean_lines",
"=",
"CleansedLines",
"(",
"lines",
")",
"if",
"file_extension",
"in",
"GetHeaderExtensions",
"(",
")",
":",
"CheckForHeaderGuard",
"(",
"filename",
",",
"clean_lines",
",",
"error",
")",
"for",
"line",
"in",
"range",
"(",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
":",
"ProcessLine",
"(",
"filename",
",",
"file_extension",
",",
"clean_lines",
",",
"line",
",",
"include_state",
",",
"function_state",
",",
"nesting_state",
",",
"error",
",",
"extra_check_functions",
")",
"FlagCxx11Features",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"nesting_state",
".",
"CheckCompletedBlocks",
"(",
"filename",
",",
"error",
")",
"CheckForIncludeWhatYouUse",
"(",
"filename",
",",
"clean_lines",
",",
"include_state",
",",
"error",
")",
"# Check that the .cc file has included its header if it exists.",
"if",
"_IsSourceExtension",
"(",
"file_extension",
")",
":",
"CheckHeaderFileIncluded",
"(",
"filename",
",",
"include_state",
",",
"error",
")",
"# We check here rather than inside ProcessLine so that we see raw",
"# lines rather than \"cleaned\" lines.",
"CheckForBadCharacters",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"CheckForNewlineAtEOF",
"(",
"filename",
",",
"lines",
",",
"error",
")"
] |
https://github.com/fengbingchun/NN_Test/blob/d6305825d5273e4569ccd1eda9ffa2a9c72e18d2/src/tiny-dnn/third_party/cpplint.py#L6054-L6103
|
||
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
deps/src/libxml2-2.9.1/python/libxml2.py
|
python
|
readerForFile
|
(filename, encoding, options)
|
return xmlTextReader(_obj=ret)
|
parse an XML file from the filesystem or the network. The
parsing flags @options are a combination of xmlParserOption.
|
parse an XML file from the filesystem or the network. The
parsing flags
|
[
"parse",
"an",
"XML",
"file",
"from",
"the",
"filesystem",
"or",
"the",
"network",
".",
"The",
"parsing",
"flags"
] |
def readerForFile(filename, encoding, options):
"""parse an XML file from the filesystem or the network. The
parsing flags @options are a combination of xmlParserOption. """
ret = libxml2mod.xmlReaderForFile(filename, encoding, options)
if ret is None:raise treeError('xmlReaderForFile() failed')
return xmlTextReader(_obj=ret)
|
[
"def",
"readerForFile",
"(",
"filename",
",",
"encoding",
",",
"options",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlReaderForFile",
"(",
"filename",
",",
"encoding",
",",
"options",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlReaderForFile() failed'",
")",
"return",
"xmlTextReader",
"(",
"_obj",
"=",
"ret",
")"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L1964-L1969
|
|
pytorch/pytorch
|
7176c92687d3cc847cc046bf002269c6949a21c2
|
torch/storage.py
|
python
|
TypedStorage.bool
|
(self)
|
return self._to(torch.bool)
|
Casts this storage to bool type
|
Casts this storage to bool type
|
[
"Casts",
"this",
"storage",
"to",
"bool",
"type"
] |
def bool(self):
"""Casts this storage to bool type"""
return self._to(torch.bool)
|
[
"def",
"bool",
"(",
"self",
")",
":",
"return",
"self",
".",
"_to",
"(",
"torch",
".",
"bool",
")"
] |
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/storage.py#L579-L581
|
|
okex/V3-Open-API-SDK
|
c5abb0db7e2287718e0055e17e57672ce0ec7fd9
|
okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/html5lib/treebuilders/base.py
|
python
|
TreeBuilder._setInsertFromTable
|
(self, value)
|
Switch the function used to insert an element from the
normal one to the misnested table one and back again
|
Switch the function used to insert an element from the
normal one to the misnested table one and back again
|
[
"Switch",
"the",
"function",
"used",
"to",
"insert",
"an",
"element",
"from",
"the",
"normal",
"one",
"to",
"the",
"misnested",
"table",
"one",
"and",
"back",
"again"
] |
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
|
[
"def",
"_setInsertFromTable",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_insertFromTable",
"=",
"value",
"if",
"value",
":",
"self",
".",
"insertElement",
"=",
"self",
".",
"insertElementTable",
"else",
":",
"self",
".",
"insertElement",
"=",
"self",
".",
"insertElementNormal"
] |
https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/html5lib/treebuilders/base.py#L312-L319
|
||
google/llvm-propeller
|
45c226984fe8377ebfb2ad7713c680d652ba678d
|
llvm/utils/lit/lit/util.py
|
python
|
to_string
|
(b)
|
Return the parameter as type 'str', possibly encoding it.
In Python2, the 'str' type is the same as 'bytes'. In Python3, the
'str' type is (essentially) Python2's 'unicode' type, and 'bytes' is
distinct.
|
Return the parameter as type 'str', possibly encoding it.
|
[
"Return",
"the",
"parameter",
"as",
"type",
"str",
"possibly",
"encoding",
"it",
"."
] |
def to_string(b):
"""Return the parameter as type 'str', possibly encoding it.
In Python2, the 'str' type is the same as 'bytes'. In Python3, the
'str' type is (essentially) Python2's 'unicode' type, and 'bytes' is
distinct.
"""
if isinstance(b, str):
# In Python2, this branch is taken for types 'str' and 'bytes'.
# In Python3, this branch is taken only for 'str'.
return b
if isinstance(b, bytes):
# In Python2, this branch is never taken ('bytes' is handled as 'str').
# In Python3, this is true only for 'bytes'.
try:
return b.decode('utf-8')
except UnicodeDecodeError:
# If the value is not valid Unicode, return the default
# repr-line encoding.
return str(b)
# By this point, here's what we *don't* have:
#
# - In Python2:
# - 'str' or 'bytes' (1st branch above)
# - In Python3:
# - 'str' (1st branch above)
# - 'bytes' (2nd branch above)
#
# The last type we might expect is the Python2 'unicode' type. There is no
# 'unicode' type in Python3 (all the Python3 cases were already handled). In
# order to get a 'str' object, we need to encode the 'unicode' object.
try:
return b.encode('utf-8')
except AttributeError:
raise TypeError('not sure how to convert %s to %s' % (type(b), str))
|
[
"def",
"to_string",
"(",
"b",
")",
":",
"if",
"isinstance",
"(",
"b",
",",
"str",
")",
":",
"# In Python2, this branch is taken for types 'str' and 'bytes'.",
"# In Python3, this branch is taken only for 'str'.",
"return",
"b",
"if",
"isinstance",
"(",
"b",
",",
"bytes",
")",
":",
"# In Python2, this branch is never taken ('bytes' is handled as 'str').",
"# In Python3, this is true only for 'bytes'.",
"try",
":",
"return",
"b",
".",
"decode",
"(",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
":",
"# If the value is not valid Unicode, return the default",
"# repr-line encoding.",
"return",
"str",
"(",
"b",
")",
"# By this point, here's what we *don't* have:",
"#",
"# - In Python2:",
"# - 'str' or 'bytes' (1st branch above)",
"# - In Python3:",
"# - 'str' (1st branch above)",
"# - 'bytes' (2nd branch above)",
"#",
"# The last type we might expect is the Python2 'unicode' type. There is no",
"# 'unicode' type in Python3 (all the Python3 cases were already handled). In",
"# order to get a 'str' object, we need to encode the 'unicode' object.",
"try",
":",
"return",
"b",
".",
"encode",
"(",
"'utf-8'",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'not sure how to convert %s to %s'",
"%",
"(",
"type",
"(",
"b",
")",
",",
"str",
")",
")"
] |
https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/llvm/utils/lit/lit/util.py#L59-L95
|
||
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/klampt/math/vectorops.py
|
python
|
normSquared
|
(a)
|
return sum(ai*ai for ai in a)
|
Returns the norm of a, squared.
|
Returns the norm of a, squared.
|
[
"Returns",
"the",
"norm",
"of",
"a",
"squared",
"."
] |
def normSquared(a):
"""Returns the norm of a, squared."""
return sum(ai*ai for ai in a)
|
[
"def",
"normSquared",
"(",
"a",
")",
":",
"return",
"sum",
"(",
"ai",
"*",
"ai",
"for",
"ai",
"in",
"a",
")"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/math/vectorops.py#L68-L70
|
|
cms-sw/cmssw
|
fd9de012d503d3405420bcbeec0ec879baa57cf2
|
Validation/RecoTrack/python/plotting/ntupleDataFormat.py
|
python
|
_SimHitMatchAdaptor.matchedSimHitInfos
|
(self)
|
Returns a generator for matched SimHits.
The generator returns SimHitMatchInfo objects.
|
Returns a generator for matched SimHits.
|
[
"Returns",
"a",
"generator",
"for",
"matched",
"SimHits",
"."
] |
def matchedSimHitInfos(self):
"""Returns a generator for matched SimHits.
The generator returns SimHitMatchInfo objects.
"""
self._checkIsValid()
for imatch in range(self._nMatchedSimHits()):
yield SimHitMatchInfo(self._tree, self._index, imatch, self._prefix)
|
[
"def",
"matchedSimHitInfos",
"(",
"self",
")",
":",
"self",
".",
"_checkIsValid",
"(",
")",
"for",
"imatch",
"in",
"range",
"(",
"self",
".",
"_nMatchedSimHits",
"(",
")",
")",
":",
"yield",
"SimHitMatchInfo",
"(",
"self",
".",
"_tree",
",",
"self",
".",
"_index",
",",
"imatch",
",",
"self",
".",
"_prefix",
")"
] |
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/Validation/RecoTrack/python/plotting/ntupleDataFormat.py#L288-L295
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/ed_txt.py
|
python
|
EdFile.Clone
|
(self)
|
return fileobj
|
Clone the file object
@return: EdFile
|
Clone the file object
@return: EdFile
|
[
"Clone",
"the",
"file",
"object",
"@return",
":",
"EdFile"
] |
def Clone(self):
"""Clone the file object
@return: EdFile
"""
fileobj = EdFile(self.Path, self.ModTime)
fileobj.SetLastError(self.last_err)
fileobj.SetEncoding(self.encoding)
fileobj.bom = self.bom
fileobj._magic = dict(self._magic)
fileobj._fuzzy_enc = self._fuzzy_enc
for cback in self._mcallback:
fileobj.AddModifiedCallback(cback)
return fileobj
|
[
"def",
"Clone",
"(",
"self",
")",
":",
"fileobj",
"=",
"EdFile",
"(",
"self",
".",
"Path",
",",
"self",
".",
"ModTime",
")",
"fileobj",
".",
"SetLastError",
"(",
"self",
".",
"last_err",
")",
"fileobj",
".",
"SetEncoding",
"(",
"self",
".",
"encoding",
")",
"fileobj",
".",
"bom",
"=",
"self",
".",
"bom",
"fileobj",
".",
"_magic",
"=",
"dict",
"(",
"self",
".",
"_magic",
")",
"fileobj",
".",
"_fuzzy_enc",
"=",
"self",
".",
"_fuzzy_enc",
"for",
"cback",
"in",
"self",
".",
"_mcallback",
":",
"fileobj",
".",
"AddModifiedCallback",
"(",
"cback",
")",
"return",
"fileobj"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_txt.py#L143-L156
|
|
yue/yue
|
619d62c191b13c51c01be451dc48917c34a5aefc
|
building/tools/cpplint.py
|
python
|
CheckStyle
|
(filename, clean_lines, linenum, file_extension, nesting_state,
error)
|
Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
|
Checks rules from the 'C++ style rules' section of cppguide.html.
|
[
"Checks",
"rules",
"from",
"the",
"C",
"++",
"style",
"rules",
"section",
"of",
"cppguide",
".",
"html",
"."
] |
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
|
[
"def",
"CheckStyle",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"file_extension",
",",
"nesting_state",
",",
"error",
")",
":",
"# Don't use \"elided\" lines here, otherwise we can't check commented lines.",
"# Don't want to use \"raw\" either, because we don't want to check inside C++11",
"# raw strings,",
"raw_lines",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"raw_lines",
"[",
"linenum",
"]",
"prev",
"=",
"raw_lines",
"[",
"linenum",
"-",
"1",
"]",
"if",
"linenum",
">",
"0",
"else",
"''",
"if",
"line",
".",
"find",
"(",
"'\\t'",
")",
"!=",
"-",
"1",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/tab'",
",",
"1",
",",
"'Tab found; better to use spaces'",
")",
"# One or three blank spaces at the beginning of the line is weird; it's",
"# hard to reconcile that with 2-space indents.",
"# NOTE: here are the conditions rob pike used for his tests. Mine aren't",
"# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces",
"# if(RLENGTH > 20) complain = 0;",
"# if(match($0, \" +(error|private|public|protected):\")) complain = 0;",
"# if(match(prev, \"&& *$\")) complain = 0;",
"# if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;",
"# if(match(prev, \"[\\\",=><] *$\")) complain = 0;",
"# if(match($0, \" <<\")) complain = 0;",
"# if(match(prev, \" +for \\\\(\")) complain = 0;",
"# if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;",
"scope_or_label_pattern",
"=",
"r'\\s*\\w+\\s*:\\s*\\\\?$'",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"initial_spaces",
"=",
"0",
"cleansed_line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"while",
"initial_spaces",
"<",
"len",
"(",
"line",
")",
"and",
"line",
"[",
"initial_spaces",
"]",
"==",
"' '",
":",
"initial_spaces",
"+=",
"1",
"# There are certain situations we allow one space, notably for",
"# section labels, and also lines containing multi-line raw strings.",
"# We also don't check for lines that look like continuation lines",
"# (of lines ending in double quotes, commas, equals, or angle brackets)",
"# because the rules for how to indent those are non-trivial.",
"if",
"(",
"not",
"Search",
"(",
"r'[\",=><] *$'",
",",
"prev",
")",
"and",
"(",
"initial_spaces",
"==",
"1",
"or",
"initial_spaces",
"==",
"3",
")",
"and",
"not",
"Match",
"(",
"scope_or_label_pattern",
",",
"cleansed_line",
")",
"and",
"not",
"(",
"clean_lines",
".",
"raw_lines",
"[",
"linenum",
"]",
"!=",
"line",
"and",
"Match",
"(",
"r'^\\s*\"\"'",
",",
"line",
")",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/indent'",
",",
"3",
",",
"'Weird number of spaces at line-start. '",
"'Are you using a 2-space indent?'",
")",
"if",
"line",
"and",
"line",
"[",
"-",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/end_of_line'",
",",
"4",
",",
"'Line ends in whitespace. Consider deleting these extra spaces.'",
")",
"# Check if the line is a header guard.",
"is_header_guard",
"=",
"False",
"if",
"IsHeaderExtension",
"(",
"file_extension",
")",
":",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"if",
"(",
"line",
".",
"startswith",
"(",
"'#ifndef %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#define %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#endif // %s'",
"%",
"cppvar",
")",
")",
":",
"is_header_guard",
"=",
"True",
"# #include lines and header guards can be long, since there's no clean way to",
"# split them.",
"#",
"# URLs can be long too. It's possible to split these, but it makes them",
"# harder to cut&paste.",
"#",
"# The \"$Id:...$\" comment may also get very long without it being the",
"# developers fault.",
"if",
"(",
"not",
"line",
".",
"startswith",
"(",
"'#include'",
")",
"and",
"not",
"is_header_guard",
"and",
"not",
"Match",
"(",
"r'^\\s*//.*http(s?)://\\S*$'",
",",
"line",
")",
"and",
"not",
"Match",
"(",
"r'^\\s*//\\s*[^\\s]*$'",
",",
"line",
")",
"and",
"not",
"Match",
"(",
"r'^// \\$Id:.*#[0-9]+ \\$$'",
",",
"line",
")",
")",
":",
"line_width",
"=",
"GetLineWidth",
"(",
"line",
")",
"if",
"line_width",
">",
"_line_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"2",
",",
"'Lines should be <= %i characters long'",
"%",
"_line_length",
")",
"if",
"(",
"cleansed_line",
".",
"count",
"(",
"';'",
")",
">",
"1",
"and",
"# for loops are allowed two ;'s (and may run over two lines).",
"cleansed_line",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"and",
"(",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"or",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"';'",
")",
"!=",
"-",
"1",
")",
"and",
"# It's ok to have many commands in a switch case that fits in 1 line",
"not",
"(",
"(",
"cleansed_line",
".",
"find",
"(",
"'case '",
")",
"!=",
"-",
"1",
"or",
"cleansed_line",
".",
"find",
"(",
"'default:'",
")",
"!=",
"-",
"1",
")",
"and",
"cleansed_line",
".",
"find",
"(",
"'break;'",
")",
"!=",
"-",
"1",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"0",
",",
"'More than one command on the same line'",
")",
"# Some more style checks",
"CheckBraces",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckTrailingSemicolon",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckEmptyBlockBody",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckOperatorSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckParenthesisSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckCommaSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckBracesSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckSpacingForFunctionCall",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAltTokens",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"classinfo",
":",
"CheckSectionSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"classinfo",
",",
"linenum",
",",
"error",
")"
] |
https://github.com/yue/yue/blob/619d62c191b13c51c01be451dc48917c34a5aefc/building/tools/cpplint.py#L4295-L4411
|
||
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/contrib/graph_editor/subgraph.py
|
python
|
SubGraphView._remap_outputs
|
(self, new_output_indices)
|
Remap the outputs of the subgraph in-place.
|
Remap the outputs of the subgraph in-place.
|
[
"Remap",
"the",
"outputs",
"of",
"the",
"subgraph",
"in",
"-",
"place",
"."
] |
def _remap_outputs(self, new_output_indices):
"""Remap the outputs of the subgraph in-place."""
new_output_indices = _finalize_indices(new_output_indices, self._output_ts)
_check_within_range(
new_output_indices, len(self._output_ts), repetition=True)
self._output_ts = [self._output_ts[i] for i in new_output_indices]
|
[
"def",
"_remap_outputs",
"(",
"self",
",",
"new_output_indices",
")",
":",
"new_output_indices",
"=",
"_finalize_indices",
"(",
"new_output_indices",
",",
"self",
".",
"_output_ts",
")",
"_check_within_range",
"(",
"new_output_indices",
",",
"len",
"(",
"self",
".",
"_output_ts",
")",
",",
"repetition",
"=",
"True",
")",
"self",
".",
"_output_ts",
"=",
"[",
"self",
".",
"_output_ts",
"[",
"i",
"]",
"for",
"i",
"in",
"new_output_indices",
"]"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/graph_editor/subgraph.py#L303-L308
|
||
ceph/ceph
|
959663007321a369c83218414a29bd9dbc8bda3a
|
src/pybind/mgr/rbd_support/module.py
|
python
|
Module.mirror_snapshot_schedule_remove
|
(self,
level_spec: str,
interval: Optional[str] = None,
start_time: Optional[str] = None)
|
return self.mirror_snapshot_schedule.remove_schedule(spec, interval, start_time)
|
Remove rbd mirror snapshot schedule
|
Remove rbd mirror snapshot schedule
|
[
"Remove",
"rbd",
"mirror",
"snapshot",
"schedule"
] |
def mirror_snapshot_schedule_remove(self,
level_spec: str,
interval: Optional[str] = None,
start_time: Optional[str] = None) -> Tuple[int, str, str]:
"""
Remove rbd mirror snapshot schedule
"""
spec = LevelSpec.from_name(self, level_spec, namespace_validator, image_validator)
return self.mirror_snapshot_schedule.remove_schedule(spec, interval, start_time)
|
[
"def",
"mirror_snapshot_schedule_remove",
"(",
"self",
",",
"level_spec",
":",
"str",
",",
"interval",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"start_time",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"int",
",",
"str",
",",
"str",
"]",
":",
"spec",
"=",
"LevelSpec",
".",
"from_name",
"(",
"self",
",",
"level_spec",
",",
"namespace_validator",
",",
"image_validator",
")",
"return",
"self",
".",
"mirror_snapshot_schedule",
".",
"remove_schedule",
"(",
"spec",
",",
"interval",
",",
"start_time",
")"
] |
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/rbd_support/module.py#L98-L106
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/turtle.py
|
python
|
RawTurtle._newLine
|
(self, usePos=True)
|
Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
|
Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
|
[
"Closes",
"current",
"line",
"item",
"and",
"starts",
"a",
"new",
"one",
".",
"Remark",
":",
"if",
"current",
"line",
"became",
"too",
"long",
"animation",
"performance",
"(",
"via",
"_drawline",
")",
"slowed",
"down",
"considerably",
"."
] |
def _newLine(self, usePos=True):
"""Closes current line item and starts a new one.
Remark: if current line became too long, animation
performance (via _drawline) slowed down considerably.
"""
if len(self.currentLine) > 1:
self.screen._drawline(self.currentLineItem, self.currentLine,
self._pencolor, self._pensize)
self.currentLineItem = self.screen._createline()
self.items.append(self.currentLineItem)
else:
self.screen._drawline(self.currentLineItem, top=True)
self.currentLine = []
if usePos:
self.currentLine = [self._position]
|
[
"def",
"_newLine",
"(",
"self",
",",
"usePos",
"=",
"True",
")",
":",
"if",
"len",
"(",
"self",
".",
"currentLine",
")",
">",
"1",
":",
"self",
".",
"screen",
".",
"_drawline",
"(",
"self",
".",
"currentLineItem",
",",
"self",
".",
"currentLine",
",",
"self",
".",
"_pencolor",
",",
"self",
".",
"_pensize",
")",
"self",
".",
"currentLineItem",
"=",
"self",
".",
"screen",
".",
"_createline",
"(",
")",
"self",
".",
"items",
".",
"append",
"(",
"self",
".",
"currentLineItem",
")",
"else",
":",
"self",
".",
"screen",
".",
"_drawline",
"(",
"self",
".",
"currentLineItem",
",",
"top",
"=",
"True",
")",
"self",
".",
"currentLine",
"=",
"[",
"]",
"if",
"usePos",
":",
"self",
".",
"currentLine",
"=",
"[",
"self",
".",
"_position",
"]"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/turtle.py#L3280-L3294
|
||
giuspen/cherrytree
|
84712f206478fcf9acf30174009ad28c648c6344
|
pygtk2/modules/core.py
|
python
|
CherryTree.get_tree_iter_from_node_id_children
|
(self, father_iter, node_id)
|
return None
|
Iterative function searching for Node Id between Children
|
Iterative function searching for Node Id between Children
|
[
"Iterative",
"function",
"searching",
"for",
"Node",
"Id",
"between",
"Children"
] |
def get_tree_iter_from_node_id_children(self, father_iter, node_id):
"""Iterative function searching for Node Id between Children"""
tree_iter = self.treestore.iter_children(father_iter)
while tree_iter != None:
if self.treestore[tree_iter][3] == node_id: return tree_iter
child_tree_iter = self.get_tree_iter_from_node_id_children(tree_iter, node_id)
if child_tree_iter != None: return child_tree_iter
tree_iter = self.treestore.iter_next(tree_iter)
return None
|
[
"def",
"get_tree_iter_from_node_id_children",
"(",
"self",
",",
"father_iter",
",",
"node_id",
")",
":",
"tree_iter",
"=",
"self",
".",
"treestore",
".",
"iter_children",
"(",
"father_iter",
")",
"while",
"tree_iter",
"!=",
"None",
":",
"if",
"self",
".",
"treestore",
"[",
"tree_iter",
"]",
"[",
"3",
"]",
"==",
"node_id",
":",
"return",
"tree_iter",
"child_tree_iter",
"=",
"self",
".",
"get_tree_iter_from_node_id_children",
"(",
"tree_iter",
",",
"node_id",
")",
"if",
"child_tree_iter",
"!=",
"None",
":",
"return",
"child_tree_iter",
"tree_iter",
"=",
"self",
".",
"treestore",
".",
"iter_next",
"(",
"tree_iter",
")",
"return",
"None"
] |
https://github.com/giuspen/cherrytree/blob/84712f206478fcf9acf30174009ad28c648c6344/pygtk2/modules/core.py#L2597-L2605
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/pyparsing.py
|
python
|
unicode_set.alphas
|
(cls)
|
return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
|
all alphabetic characters in this range
|
all alphabetic characters in this range
|
[
"all",
"alphabetic",
"characters",
"in",
"this",
"range"
] |
def alphas(cls):
"all alphabetic characters in this range"
return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
|
[
"def",
"alphas",
"(",
"cls",
")",
":",
"return",
"u''",
".",
"join",
"(",
"filter",
"(",
"unicode",
".",
"isalpha",
",",
"cls",
".",
"_get_chars_for_ranges",
"(",
")",
")",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/pyparsing.py#L13495-L13499
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/tree.py
|
python
|
TreeItem.GetSelectedIconName
|
(self)
|
Return name of icon to be displayed when selected.
|
Return name of icon to be displayed when selected.
|
[
"Return",
"name",
"of",
"icon",
"to",
"be",
"displayed",
"when",
"selected",
"."
] |
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
|
[
"def",
"GetSelectedIconName",
"(",
"self",
")",
":"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/tree.py#L381-L382
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/jedi/jedi/api/classes.py
|
python
|
BaseDefinition._path
|
(self)
|
return reversed(list(to_reverse()))
|
The path to a module/class/function definition.
|
The path to a module/class/function definition.
|
[
"The",
"path",
"to",
"a",
"module",
"/",
"class",
"/",
"function",
"definition",
"."
] |
def _path(self):
"""The path to a module/class/function definition."""
def to_reverse():
name = self._name
if name.api_type == 'module':
try:
name = list(name.infer())[0].name
except IndexError:
pass
if name.api_type in 'module':
module_contexts = name.infer()
if module_contexts:
module_context, = module_contexts
for n in reversed(module_context.py__name__().split('.')):
yield n
else:
# We don't really know anything about the path here. This
# module is just an import that would lead in an
# ImportError. So simply return the name.
yield name.string_name
return
else:
yield name.string_name
parent_context = name.parent_context
while parent_context is not None:
try:
method = parent_context.py__name__
except AttributeError:
try:
yield parent_context.name.string_name
except AttributeError:
pass
else:
for name in reversed(method().split('.')):
yield name
parent_context = parent_context.parent_context
return reversed(list(to_reverse()))
|
[
"def",
"_path",
"(",
"self",
")",
":",
"def",
"to_reverse",
"(",
")",
":",
"name",
"=",
"self",
".",
"_name",
"if",
"name",
".",
"api_type",
"==",
"'module'",
":",
"try",
":",
"name",
"=",
"list",
"(",
"name",
".",
"infer",
"(",
")",
")",
"[",
"0",
"]",
".",
"name",
"except",
"IndexError",
":",
"pass",
"if",
"name",
".",
"api_type",
"in",
"'module'",
":",
"module_contexts",
"=",
"name",
".",
"infer",
"(",
")",
"if",
"module_contexts",
":",
"module_context",
",",
"=",
"module_contexts",
"for",
"n",
"in",
"reversed",
"(",
"module_context",
".",
"py__name__",
"(",
")",
".",
"split",
"(",
"'.'",
")",
")",
":",
"yield",
"n",
"else",
":",
"# We don't really know anything about the path here. This",
"# module is just an import that would lead in an",
"# ImportError. So simply return the name.",
"yield",
"name",
".",
"string_name",
"return",
"else",
":",
"yield",
"name",
".",
"string_name",
"parent_context",
"=",
"name",
".",
"parent_context",
"while",
"parent_context",
"is",
"not",
"None",
":",
"try",
":",
"method",
"=",
"parent_context",
".",
"py__name__",
"except",
"AttributeError",
":",
"try",
":",
"yield",
"parent_context",
".",
"name",
".",
"string_name",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"for",
"name",
"in",
"reversed",
"(",
"method",
"(",
")",
".",
"split",
"(",
"'.'",
")",
")",
":",
"yield",
"name",
"parent_context",
"=",
"parent_context",
".",
"parent_context",
"return",
"reversed",
"(",
"list",
"(",
"to_reverse",
"(",
")",
")",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/jedi/jedi/api/classes.py#L150-L188
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
samples/ide/activegrid/tool/FindService.py
|
python
|
FindService.GetReplaceString
|
(self)
|
return wx.ConfigBase_Get().Read(FIND_MATCHREPLACE, "")
|
Load the replace pattern from registry
|
Load the replace pattern from registry
|
[
"Load",
"the",
"replace",
"pattern",
"from",
"registry"
] |
def GetReplaceString(self):
""" Load the replace pattern from registry """
return wx.ConfigBase_Get().Read(FIND_MATCHREPLACE, "")
|
[
"def",
"GetReplaceString",
"(",
"self",
")",
":",
"return",
"wx",
".",
"ConfigBase_Get",
"(",
")",
".",
"Read",
"(",
"FIND_MATCHREPLACE",
",",
"\"\"",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/ide/activegrid/tool/FindService.py#L297-L299
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/richtext.py
|
python
|
TextAttrDimension.Apply
|
(*args, **kwargs)
|
return _richtext.TextAttrDimension_Apply(*args, **kwargs)
|
Apply(self, TextAttrDimension dim, TextAttrDimension compareWith=None) -> bool
|
Apply(self, TextAttrDimension dim, TextAttrDimension compareWith=None) -> bool
|
[
"Apply",
"(",
"self",
"TextAttrDimension",
"dim",
"TextAttrDimension",
"compareWith",
"=",
"None",
")",
"-",
">",
"bool"
] |
def Apply(*args, **kwargs):
"""Apply(self, TextAttrDimension dim, TextAttrDimension compareWith=None) -> bool"""
return _richtext.TextAttrDimension_Apply(*args, **kwargs)
|
[
"def",
"Apply",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"TextAttrDimension_Apply",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L133-L135
|
|
pytorch/pytorch
|
7176c92687d3cc847cc046bf002269c6949a21c2
|
caffe2/python/tt_core.py
|
python
|
init_tt_cores
|
(inp_sizes, out_sizes, tt_ranks, seed=1234)
|
return (0.1 / glarot_style) * np.array(cores).astype(np.float32)
|
Initialize randomized orthogonalized TT-cores.
This method should be used when a TT-layer is trained from scratch. The
sizes of each of the cores are specified by the inp_sizes and out_sizes, and
the respective tt_ranks will dictate the ranks of each of the cores. Note
that a larger set of tt_ranks will result in slower computation but will
result in more accurate approximations. The size of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
Args:
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
seed: integer to seed the random number generator
Returns:
cores: One-dimensional list of cores concatentated along an axis
|
Initialize randomized orthogonalized TT-cores.
|
[
"Initialize",
"randomized",
"orthogonalized",
"TT",
"-",
"cores",
"."
] |
def init_tt_cores(inp_sizes, out_sizes, tt_ranks, seed=1234):
"""
Initialize randomized orthogonalized TT-cores.
This method should be used when a TT-layer is trained from scratch. The
sizes of each of the cores are specified by the inp_sizes and out_sizes, and
the respective tt_ranks will dictate the ranks of each of the cores. Note
that a larger set of tt_ranks will result in slower computation but will
result in more accurate approximations. The size of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
Args:
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
seed: integer to seed the random number generator
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
np.random.seed(seed)
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dims (" + \
str(len(out_sizes)) + ")."
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Initialize the cores array
cores_len = np.sum(
inp_sizes * out_sizes * tt_ranks[1:] * tt_ranks[:-1])
cores = np.zeros(cores_len)
cores_idx = 0
rv = 1
# Compute the full list of cores by computing each individual one
for i in range(inp_sizes.shape[0]):
shape = [tt_ranks[i],
inp_sizes[i],
out_sizes[i],
tt_ranks[i + 1]]
# Precompute the shape of each core
tall_shape = (np.prod(shape[:3]), shape[3])
# Randomly initialize the current core using a normal distribution
curr_core = np.dot(rv, np.random.normal(
0, 1, size=(shape[0], np.prod(shape[1:]))))
curr_core = curr_core.reshape(tall_shape)
# Orthogonalize the initialized current core and append to cores list
if i < inp_sizes.shape[0] - 1:
curr_core, rv = np.linalg.qr(curr_core)
cores[cores_idx:cores_idx +
curr_core.size] = curr_core.flatten()
cores_idx += curr_core.size
# Normalize the list of arrays using this Glarot trick
glarot_style = (np.prod(inp_sizes) *
np.prod(tt_ranks))**(1.0 / inp_sizes.shape[0])
return (0.1 / glarot_style) * np.array(cores).astype(np.float32)
|
[
"def",
"init_tt_cores",
"(",
"inp_sizes",
",",
"out_sizes",
",",
"tt_ranks",
",",
"seed",
"=",
"1234",
")",
":",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"# Assert that the sizes of each input is correct",
"assert",
"(",
"len",
"(",
"inp_sizes",
")",
"==",
"len",
"(",
"out_sizes",
")",
")",
",",
"\"The number of input dimensions (\"",
"+",
"str",
"(",
"len",
"(",
"inp_sizes",
")",
")",
"+",
"\") must be equal to the number of output dimensions (\"",
"+",
"str",
"(",
"len",
"(",
"out_sizes",
")",
")",
"+",
"\").\"",
"assert",
"(",
"len",
"(",
"tt_ranks",
")",
"==",
"len",
"(",
"inp_sizes",
")",
"+",
"1",
")",
",",
"\"The number of tt-ranks (\"",
"+",
"str",
"(",
"len",
"(",
"tt_ranks",
")",
")",
"+",
"\") must be \"",
"+",
"\"one more than the number of input and output dims (\"",
"+",
"str",
"(",
"len",
"(",
"out_sizes",
")",
")",
"+",
"\").\"",
"# Convert to numpy arrays",
"inp_sizes",
"=",
"np",
".",
"array",
"(",
"inp_sizes",
")",
"out_sizes",
"=",
"np",
".",
"array",
"(",
"out_sizes",
")",
"tt_ranks",
"=",
"np",
".",
"array",
"(",
"tt_ranks",
")",
"# Initialize the cores array",
"cores_len",
"=",
"np",
".",
"sum",
"(",
"inp_sizes",
"*",
"out_sizes",
"*",
"tt_ranks",
"[",
"1",
":",
"]",
"*",
"tt_ranks",
"[",
":",
"-",
"1",
"]",
")",
"cores",
"=",
"np",
".",
"zeros",
"(",
"cores_len",
")",
"cores_idx",
"=",
"0",
"rv",
"=",
"1",
"# Compute the full list of cores by computing each individual one",
"for",
"i",
"in",
"range",
"(",
"inp_sizes",
".",
"shape",
"[",
"0",
"]",
")",
":",
"shape",
"=",
"[",
"tt_ranks",
"[",
"i",
"]",
",",
"inp_sizes",
"[",
"i",
"]",
",",
"out_sizes",
"[",
"i",
"]",
",",
"tt_ranks",
"[",
"i",
"+",
"1",
"]",
"]",
"# Precompute the shape of each core",
"tall_shape",
"=",
"(",
"np",
".",
"prod",
"(",
"shape",
"[",
":",
"3",
"]",
")",
",",
"shape",
"[",
"3",
"]",
")",
"# Randomly initialize the current core using a normal distribution",
"curr_core",
"=",
"np",
".",
"dot",
"(",
"rv",
",",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"1",
",",
"size",
"=",
"(",
"shape",
"[",
"0",
"]",
",",
"np",
".",
"prod",
"(",
"shape",
"[",
"1",
":",
"]",
")",
")",
")",
")",
"curr_core",
"=",
"curr_core",
".",
"reshape",
"(",
"tall_shape",
")",
"# Orthogonalize the initialized current core and append to cores list",
"if",
"i",
"<",
"inp_sizes",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
":",
"curr_core",
",",
"rv",
"=",
"np",
".",
"linalg",
".",
"qr",
"(",
"curr_core",
")",
"cores",
"[",
"cores_idx",
":",
"cores_idx",
"+",
"curr_core",
".",
"size",
"]",
"=",
"curr_core",
".",
"flatten",
"(",
")",
"cores_idx",
"+=",
"curr_core",
".",
"size",
"# Normalize the list of arrays using this Glarot trick",
"glarot_style",
"=",
"(",
"np",
".",
"prod",
"(",
"inp_sizes",
")",
"*",
"np",
".",
"prod",
"(",
"tt_ranks",
")",
")",
"**",
"(",
"1.0",
"/",
"inp_sizes",
".",
"shape",
"[",
"0",
"]",
")",
"return",
"(",
"0.1",
"/",
"glarot_style",
")",
"*",
"np",
".",
"array",
"(",
"cores",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")"
] |
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/tt_core.py#L21-L97
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_controls.py
|
python
|
TreeCtrl.GetItemTextColour
|
(*args, **kwargs)
|
return _controls_.TreeCtrl_GetItemTextColour(*args, **kwargs)
|
GetItemTextColour(self, TreeItemId item) -> Colour
|
GetItemTextColour(self, TreeItemId item) -> Colour
|
[
"GetItemTextColour",
"(",
"self",
"TreeItemId",
"item",
")",
"-",
">",
"Colour"
] |
def GetItemTextColour(*args, **kwargs):
"""GetItemTextColour(self, TreeItemId item) -> Colour"""
return _controls_.TreeCtrl_GetItemTextColour(*args, **kwargs)
|
[
"def",
"GetItemTextColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TreeCtrl_GetItemTextColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L5270-L5272
|
|
eventql/eventql
|
7ca0dbb2e683b525620ea30dc40540a22d5eb227
|
deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/mozjar.py
|
python
|
JarStruct.size
|
(self)
|
return size
|
Return the size of the data structure, given the current values of all
variable length fields.
|
Return the size of the data structure, given the current values of all
variable length fields.
|
[
"Return",
"the",
"size",
"of",
"the",
"data",
"structure",
"given",
"the",
"current",
"values",
"of",
"all",
"variable",
"length",
"fields",
"."
] |
def size(self):
'''
Return the size of the data structure, given the current values of all
variable length fields.
'''
size = JarStruct.TYPE_MAPPING['uint32'][1]
for name, type in self.STRUCT.iteritems():
if type in JarStruct.TYPE_MAPPING:
size += JarStruct.TYPE_MAPPING[type][1]
else:
size += len(self[name])
return size
|
[
"def",
"size",
"(",
"self",
")",
":",
"size",
"=",
"JarStruct",
".",
"TYPE_MAPPING",
"[",
"'uint32'",
"]",
"[",
"1",
"]",
"for",
"name",
",",
"type",
"in",
"self",
".",
"STRUCT",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"in",
"JarStruct",
".",
"TYPE_MAPPING",
":",
"size",
"+=",
"JarStruct",
".",
"TYPE_MAPPING",
"[",
"type",
"]",
"[",
"1",
"]",
"else",
":",
"size",
"+=",
"len",
"(",
"self",
"[",
"name",
"]",
")",
"return",
"size"
] |
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/mozjar.py#L150-L161
|
|
runtimejs/runtime
|
0a6e84c30823d35a4548d6634166784260ae7b74
|
deps/v8/tools/sanitizers/sancov_formatter.py
|
python
|
executables
|
()
|
Iterates over executable files in the build directory.
|
Iterates over executable files in the build directory.
|
[
"Iterates",
"over",
"executable",
"files",
"in",
"the",
"build",
"directory",
"."
] |
def executables():
"""Iterates over executable files in the build directory."""
for f in os.listdir(BUILD_DIR):
file_path = os.path.join(BUILD_DIR, f)
if (os.path.isfile(file_path) and
os.access(file_path, os.X_OK) and
f not in EXE_BLACKLIST):
yield file_path
|
[
"def",
"executables",
"(",
")",
":",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"BUILD_DIR",
")",
":",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"BUILD_DIR",
",",
"f",
")",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
"and",
"os",
".",
"access",
"(",
"file_path",
",",
"os",
".",
"X_OK",
")",
"and",
"f",
"not",
"in",
"EXE_BLACKLIST",
")",
":",
"yield",
"file_path"
] |
https://github.com/runtimejs/runtime/blob/0a6e84c30823d35a4548d6634166784260ae7b74/deps/v8/tools/sanitizers/sancov_formatter.py#L108-L115
|
||
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
qt/python/mantidqtinterfaces/mantidqtinterfaces/reduction_gui/widgets/sans/hfir_sample_data.py
|
python
|
BeamSpreader.initialize_content
|
(self)
|
Declare the validators and event connections for the
widgets loaded through the .ui file.
|
Declare the validators and event connections for the
widgets loaded through the .ui file.
|
[
"Declare",
"the",
"validators",
"and",
"event",
"connections",
"for",
"the",
"widgets",
"loaded",
"through",
"the",
".",
"ui",
"file",
"."
] |
def initialize_content(self):
"""
Declare the validators and event connections for the
widgets loaded through the .ui file.
"""
# Validators
self._content.spreader_trans_edit.setValidator(QDoubleValidator(self._content.spreader_trans_edit))
self._content.spreader_trans_spread_edit.setValidator(QDoubleValidator(self._content.spreader_trans_spread_edit))
# Connections
self._content.sample_scatt_browse.clicked.connect(self._sample_scatt_browse)
self._content.sample_spread_browse.clicked.connect(self._sample_spread_browse)
self._content.direct_scatt_browse.clicked.connect(self._direct_scatt_browse)
self._content.direct_spread_browse.clicked.connect(self._direct_spread_browse)
self._content.sample_scatt_plot.clicked.connect(self._sample_scatt_plot_clicked)
self._content.sample_spread_plot.clicked.connect(self._sample_spread_plot_clicked)
self._content.direct_scatt_plot.clicked.connect(self._direct_scatt_plot_clicked)
self._content.direct_spread_plot.clicked.connect(self._direct_spread_plot_clicked)
if not self._has_instrument_view:
self._content.sample_scatt_plot.hide()
self._content.sample_spread_plot.hide()
self._content.direct_scatt_plot.hide()
self._content.direct_spread_plot.hide()
|
[
"def",
"initialize_content",
"(",
"self",
")",
":",
"# Validators",
"self",
".",
"_content",
".",
"spreader_trans_edit",
".",
"setValidator",
"(",
"QDoubleValidator",
"(",
"self",
".",
"_content",
".",
"spreader_trans_edit",
")",
")",
"self",
".",
"_content",
".",
"spreader_trans_spread_edit",
".",
"setValidator",
"(",
"QDoubleValidator",
"(",
"self",
".",
"_content",
".",
"spreader_trans_spread_edit",
")",
")",
"# Connections",
"self",
".",
"_content",
".",
"sample_scatt_browse",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_sample_scatt_browse",
")",
"self",
".",
"_content",
".",
"sample_spread_browse",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_sample_spread_browse",
")",
"self",
".",
"_content",
".",
"direct_scatt_browse",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_direct_scatt_browse",
")",
"self",
".",
"_content",
".",
"direct_spread_browse",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_direct_spread_browse",
")",
"self",
".",
"_content",
".",
"sample_scatt_plot",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_sample_scatt_plot_clicked",
")",
"self",
".",
"_content",
".",
"sample_spread_plot",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_sample_spread_plot_clicked",
")",
"self",
".",
"_content",
".",
"direct_scatt_plot",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_direct_scatt_plot_clicked",
")",
"self",
".",
"_content",
".",
"direct_spread_plot",
".",
"clicked",
".",
"connect",
"(",
"self",
".",
"_direct_spread_plot_clicked",
")",
"if",
"not",
"self",
".",
"_has_instrument_view",
":",
"self",
".",
"_content",
".",
"sample_scatt_plot",
".",
"hide",
"(",
")",
"self",
".",
"_content",
".",
"sample_spread_plot",
".",
"hide",
"(",
")",
"self",
".",
"_content",
".",
"direct_scatt_plot",
".",
"hide",
"(",
")",
"self",
".",
"_content",
".",
"direct_spread_plot",
".",
"hide",
"(",
")"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/reduction_gui/widgets/sans/hfir_sample_data.py#L125-L149
|
||
devsisters/libquic
|
8954789a056d8e7d5fcb6452fd1572ca57eb5c4e
|
src/third_party/protobuf/python/google/protobuf/service.py
|
python
|
RpcController.SetFailed
|
(self, reason)
|
Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
|
Sets a failure reason.
|
[
"Sets",
"a",
"failure",
"reason",
"."
] |
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
|
[
"def",
"SetFailed",
"(",
"self",
",",
"reason",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/src/third_party/protobuf/python/google/protobuf/service.py#L167-L176
|
||
nnrg/opennero
|
43e12a1bcba6e228639db3886fec1dc47ddc24cb
|
mods/TowerofHanoi/agent.py
|
python
|
TowerAgentNLP.act
|
(self, time, observations, reward)
|
return an action given the reward for the previous
action and the new observations
|
return an action given the reward for the previous
action and the new observations
|
[
"return",
"an",
"action",
"given",
"the",
"reward",
"for",
"the",
"previous",
"action",
"and",
"the",
"new",
"observations"
] |
def act(self, time, observations, reward):
"""
return an action given the reward for the previous
action and the new observations
"""
if len(self.action_list) > 0:
return self.action_list.pop(0)
else:
self.action_list = self.generate_action_list()
return 0
|
[
"def",
"act",
"(",
"self",
",",
"time",
",",
"observations",
",",
"reward",
")",
":",
"if",
"len",
"(",
"self",
".",
"action_list",
")",
">",
"0",
":",
"return",
"self",
".",
"action_list",
".",
"pop",
"(",
"0",
")",
"else",
":",
"self",
".",
"action_list",
"=",
"self",
".",
"generate_action_list",
"(",
")",
"return",
"0"
] |
https://github.com/nnrg/opennero/blob/43e12a1bcba6e228639db3886fec1dc47ddc24cb/mods/TowerofHanoi/agent.py#L552-L561
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.