nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
shoaibrayeen/Programmers-Community | 1d352fb3e6ac5e2e7d9472d90527bdcc8d5ec355 | Data Structure/Array Or Vector/Sort An Array of 0s and 1s/SolutionByEnthusiastDeveloper.py | python | sortArray | (array) | return sorted_array | Sort a given array which should only include 0's and 1's
Raise a value error on the first value which is not 0 or 1, otherwise returns a sorted array
Logic:
1. Go over the supplied array once and:
a. verify the input values
b. count how many 1's are in there
2. Fill the sorted array with zeros (length of given array - number of 1's)
3. Fill the rest of the sorted array with ones
4. Return the sorted array | Sort a given array which should only include 0's and 1's
Raise a value error on the first value which is not 0 or 1, otherwise returns a sorted array | [
"Sort",
"a",
"given",
"array",
"which",
"should",
"only",
"include",
"0",
"s",
"and",
"1",
"s",
"Raise",
"a",
"value",
"error",
"on",
"the",
"first",
"value",
"which",
"is",
"not",
"0",
"or",
"1",
"otherwise",
"returns",
"a",
"sorted",
"array"
] | def sortArray(array):
'''
Sort a given array which should only include 0's and 1's
Raise a value error on the first value which is not 0 or 1, otherwise returns a sorted array
Logic:
1. Go over the supplied array once and:
a. verify the input values
b. count how many 1's are in there
2. Fill the sorted array with zeros (length of given array - number of 1's)
3. Fill the rest of the sorted array with ones
4. Return the sorted array
'''
num_of_ones = 0
for x in array:
if x == 0:
pass
elif x == 1:
num_of_ones += 1
else:
raise ValueError
sorted_array = []
for i in range(len(array)-num_of_ones):
sorted_array.append(0)
for i in range(len(array)-num_of_ones, len(array)):
sorted_array.append(1)
return sorted_array | [
"def",
"sortArray",
"(",
"array",
")",
":",
"num_of_ones",
"=",
"0",
"for",
"x",
"in",
"array",
":",
"if",
"x",
"==",
"0",
":",
"pass",
"elif",
"x",
"==",
"1",
":",
"num_of_ones",
"+=",
"1",
"else",
":",
"raise",
"ValueError",
"sorted_array",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"array",
")",
"-",
"num_of_ones",
")",
":",
"sorted_array",
".",
"append",
"(",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"array",
")",
"-",
"num_of_ones",
",",
"len",
"(",
"array",
")",
")",
":",
"sorted_array",
".",
"append",
"(",
"1",
")",
"return",
"sorted_array"
] | https://github.com/shoaibrayeen/Programmers-Community/blob/1d352fb3e6ac5e2e7d9472d90527bdcc8d5ec355/Data Structure/Array Or Vector/Sort An Array of 0s and 1s/SolutionByEnthusiastDeveloper.py#L5-L32 |
|
KratosMultiphysics/Kratos | 0000833054ed0503424eb28205d6508d9ca6cbbc | applications/FemToDemApplication/python_scripts/MainCouplingPfemFemDemAitken.py | python | KratosPrintInfo | (message) | This function prints info on screen | This function prints info on screen | [
"This",
"function",
"prints",
"info",
"on",
"screen"
] | def KratosPrintInfo(message):
"""This function prints info on screen
"""
KM.Logger.Print("", message)
KM.Logger.Flush() | [
"def",
"KratosPrintInfo",
"(",
"message",
")",
":",
"KM",
".",
"Logger",
".",
"Print",
"(",
"\"\"",
",",
"message",
")",
"KM",
".",
"Logger",
".",
"Flush",
"(",
")"
] | https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/applications/FemToDemApplication/python_scripts/MainCouplingPfemFemDemAitken.py#L13-L17 |
||
GoSSIP-SJTU/Armariris | ad5d868482956b2194a77b39c8d543c7c2318200 | tools/clang/bindings/python/clang/cindex.py | python | Cursor.get_usr | (self) | return conf.lib.clang_getCursorUSR(self) | Return the Unified Symbol Resultion (USR) for the entity referenced
by the given cursor (or None).
A Unified Symbol Resolution (USR) is a string that identifies a
particular entity (function, class, variable, etc.) within a
program. USRs can be compared across translation units to determine,
e.g., when references in one translation refer to an entity defined in
another translation unit. | Return the Unified Symbol Resultion (USR) for the entity referenced
by the given cursor (or None). | [
"Return",
"the",
"Unified",
"Symbol",
"Resultion",
"(",
"USR",
")",
"for",
"the",
"entity",
"referenced",
"by",
"the",
"given",
"cursor",
"(",
"or",
"None",
")",
"."
] | def get_usr(self):
"""Return the Unified Symbol Resultion (USR) for the entity referenced
by the given cursor (or None).
A Unified Symbol Resolution (USR) is a string that identifies a
particular entity (function, class, variable, etc.) within a
program. USRs can be compared across translation units to determine,
e.g., when references in one translation refer to an entity defined in
another translation unit."""
return conf.lib.clang_getCursorUSR(self) | [
"def",
"get_usr",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getCursorUSR",
"(",
"self",
")"
] | https://github.com/GoSSIP-SJTU/Armariris/blob/ad5d868482956b2194a77b39c8d543c7c2318200/tools/clang/bindings/python/clang/cindex.py#L1257-L1266 |
|
openvinotoolkit/openvino | dedcbeafa8b84cccdc55ca64b8da516682b381c7 | tools/mo/openvino/tools/mo/ops/If.py | python | If.re_numerate_internal_id_and_get_if_id | (if_node) | return if_node.node | This method is called before IR generation. This method sets internal_layer_id.
:param if_node: The If node where is necessary to set internal_layer_id in bodies.
:return: if_node | This method is called before IR generation. This method sets internal_layer_id. | [
"This",
"method",
"is",
"called",
"before",
"IR",
"generation",
".",
"This",
"method",
"sets",
"internal_layer_id",
"."
] | def re_numerate_internal_id_and_get_if_id(if_node):
"""
This method is called before IR generation. This method sets internal_layer_id.
:param if_node: The If node where is necessary to set internal_layer_id in bodies.
:return: if_node
"""
then_graph_nodes = if_node.then_graph.nodes()
for idx in range(len(if_node.then_graph.get_op_nodes())):
then_graph_nodes[idx]['internal_layer_id'] = idx
else_graph_nodes = if_node.else_graph.nodes()
for idx in range(len(if_node.else_graph.get_op_nodes())):
else_graph_nodes[idx]['internal_layer_id'] = idx
return if_node.node | [
"def",
"re_numerate_internal_id_and_get_if_id",
"(",
"if_node",
")",
":",
"then_graph_nodes",
"=",
"if_node",
".",
"then_graph",
".",
"nodes",
"(",
")",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"if_node",
".",
"then_graph",
".",
"get_op_nodes",
"(",
")",
")",
")",
":",
"then_graph_nodes",
"[",
"idx",
"]",
"[",
"'internal_layer_id'",
"]",
"=",
"idx",
"else_graph_nodes",
"=",
"if_node",
".",
"else_graph",
".",
"nodes",
"(",
")",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"if_node",
".",
"else_graph",
".",
"get_op_nodes",
"(",
")",
")",
")",
":",
"else_graph_nodes",
"[",
"idx",
"]",
"[",
"'internal_layer_id'",
"]",
"=",
"idx",
"return",
"if_node",
".",
"node"
] | https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/mo/openvino/tools/mo/ops/If.py#L273-L286 |
|
rapidsai/cudf | d5b2448fc69f17509304d594f029d0df56984962 | python/cudf/cudf/core/df_protocol.py | python | _CuDFBuffer.bufsize | (self) | return self._buf.nbytes | Buffer size in bytes. | Buffer size in bytes. | [
"Buffer",
"size",
"in",
"bytes",
"."
] | def bufsize(self) -> int:
"""
Buffer size in bytes.
"""
return self._buf.nbytes | [
"def",
"bufsize",
"(",
"self",
")",
"->",
"int",
":",
"return",
"self",
".",
"_buf",
".",
"nbytes"
] | https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/df_protocol.py#L79-L83 |
|
facebookarchive/LogDevice | ce7726050edc49a1e15d9160e81c890736b779e2 | logdevice/ops/ldshell/autoload/commands/safety.py | python | location_up_to_scope | (
shard: ShardID,
location_per_scope: Mapping[LocationScope, str],
scope: LocationScope,
) | return ".".join(locs) | Generates a string of the location string up to a given scope. The input
scope is inclusive. | Generates a string of the location string up to a given scope. The input
scope is inclusive. | [
"Generates",
"a",
"string",
"of",
"the",
"location",
"string",
"up",
"to",
"a",
"given",
"scope",
".",
"The",
"input",
"scope",
"is",
"inclusive",
"."
] | def location_up_to_scope(
shard: ShardID,
location_per_scope: Mapping[LocationScope, str],
scope: LocationScope,
) -> str:
"""
Generates a string of the location string up to a given scope. The input
scope is inclusive.
"""
if not location_per_scope:
return "UNKNOWN"
locs = []
# Sort scopes from bigger to smaller (ROOT > REGION > CLUSTER > ...)
for loc_scope in sorted(
location_per_scope.keys(), key=lambda x: x.value, reverse=True
):
if loc_scope.value >= scope.value:
locs.append(location_per_scope[loc_scope])
else:
break
if scope == LocationScope.NODE:
locs.append(str(shard.node.node_index))
return ".".join(locs) | [
"def",
"location_up_to_scope",
"(",
"shard",
":",
"ShardID",
",",
"location_per_scope",
":",
"Mapping",
"[",
"LocationScope",
",",
"str",
"]",
",",
"scope",
":",
"LocationScope",
",",
")",
"->",
"str",
":",
"if",
"not",
"location_per_scope",
":",
"return",
"\"UNKNOWN\"",
"locs",
"=",
"[",
"]",
"# Sort scopes from bigger to smaller (ROOT > REGION > CLUSTER > ...)",
"for",
"loc_scope",
"in",
"sorted",
"(",
"location_per_scope",
".",
"keys",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"value",
",",
"reverse",
"=",
"True",
")",
":",
"if",
"loc_scope",
".",
"value",
">=",
"scope",
".",
"value",
":",
"locs",
".",
"append",
"(",
"location_per_scope",
"[",
"loc_scope",
"]",
")",
"else",
":",
"break",
"if",
"scope",
"==",
"LocationScope",
".",
"NODE",
":",
"locs",
".",
"append",
"(",
"str",
"(",
"shard",
".",
"node",
".",
"node_index",
")",
")",
"return",
"\".\"",
".",
"join",
"(",
"locs",
")"
] | https://github.com/facebookarchive/LogDevice/blob/ce7726050edc49a1e15d9160e81c890736b779e2/logdevice/ops/ldshell/autoload/commands/safety.py#L307-L329 |
|
metashell/metashell | f4177e4854ea00c8dbc722cadab26ef413d798ea | 3rd/templight/clang/utils/check_cfc/check_cfc.py | python | get_output_file | (args) | return None | Return the output file specified by this command or None if not
specified. | Return the output file specified by this command or None if not
specified. | [
"Return",
"the",
"output",
"file",
"specified",
"by",
"this",
"command",
"or",
"None",
"if",
"not",
"specified",
"."
] | def get_output_file(args):
"""Return the output file specified by this command or None if not
specified."""
grabnext = False
for arg in args:
if grabnext:
return arg
if arg == '-o':
# Specified as a separate arg
grabnext = True
elif arg.startswith('-o'):
# Specified conjoined with -o
return arg[2:]
assert grabnext == False
return None | [
"def",
"get_output_file",
"(",
"args",
")",
":",
"grabnext",
"=",
"False",
"for",
"arg",
"in",
"args",
":",
"if",
"grabnext",
":",
"return",
"arg",
"if",
"arg",
"==",
"'-o'",
":",
"# Specified as a separate arg",
"grabnext",
"=",
"True",
"elif",
"arg",
".",
"startswith",
"(",
"'-o'",
")",
":",
"# Specified conjoined with -o",
"return",
"arg",
"[",
"2",
":",
"]",
"assert",
"grabnext",
"==",
"False",
"return",
"None"
] | https://github.com/metashell/metashell/blob/f4177e4854ea00c8dbc722cadab26ef413d798ea/3rd/templight/clang/utils/check_cfc/check_cfc.py#L130-L145 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py | python | MockMethod.AndReturn | (self, return_value) | return return_value | Set the value to return when this method is called.
Args:
# return_value can be anything. | Set the value to return when this method is called. | [
"Set",
"the",
"value",
"to",
"return",
"when",
"this",
"method",
"is",
"called",
"."
] | def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value | [
"def",
"AndReturn",
"(",
"self",
",",
"return_value",
")",
":",
"self",
".",
"_return_value",
"=",
"return_value",
"return",
"return_value"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L718-L726 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/misc.py | python | dist_is_editable | (dist) | return False | Return True if given Distribution is an editable install. | [] | def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False | [
"def",
"dist_is_editable",
"(",
"dist",
")",
":",
"# type: (Distribution) -> bool",
"for",
"path_item",
"in",
"sys",
".",
"path",
":",
"egg_link",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_item",
",",
"dist",
".",
"project_name",
"+",
"'.egg-link'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"egg_link",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/misc.py#L801-L819 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/logging/handlers.py | python | SocketHandler.makeSocket | (self, timeout=1) | return result | A factory method which allows subclasses to define the precise
type of socket they want. | A factory method which allows subclasses to define the precise
type of socket they want. | [
"A",
"factory",
"method",
"which",
"allows",
"subclasses",
"to",
"define",
"the",
"precise",
"type",
"of",
"socket",
"they",
"want",
"."
] | def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result | [
"def",
"makeSocket",
"(",
"self",
",",
"timeout",
"=",
"1",
")",
":",
"if",
"self",
".",
"port",
"is",
"not",
"None",
":",
"result",
"=",
"socket",
".",
"create_connection",
"(",
"self",
".",
"address",
",",
"timeout",
"=",
"timeout",
")",
"else",
":",
"result",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_UNIX",
",",
"socket",
".",
"SOCK_STREAM",
")",
"result",
".",
"settimeout",
"(",
"timeout",
")",
"try",
":",
"result",
".",
"connect",
"(",
"self",
".",
"address",
")",
"except",
"OSError",
":",
"result",
".",
"close",
"(",
")",
"# Issue 19182",
"raise",
"return",
"result"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/logging/handlers.py#L558-L573 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/histograms.py | python | histogram_bin_edges | (a, bins=10, range=None, weights=None) | return bin_edges | r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ]) | r"""
Function to calculate only the edges of the bins used by the `histogram`
function. | [
"r",
"Function",
"to",
"calculate",
"only",
"the",
"edges",
"of",
"the",
"bins",
"used",
"by",
"the",
"histogram",
"function",
"."
] | def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram`
function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges | [
"def",
"histogram_bin_edges",
"(",
"a",
",",
"bins",
"=",
"10",
",",
"range",
"=",
"None",
",",
"weights",
"=",
"None",
")",
":",
"a",
",",
"weights",
"=",
"_ravel_and_check_weights",
"(",
"a",
",",
"weights",
")",
"bin_edges",
",",
"_",
"=",
"_get_bin_edges",
"(",
"a",
",",
"bins",
",",
"range",
",",
"weights",
")",
"return",
"bin_edges"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/histograms.py#L474-L672 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/signal/_peak_finding.py | python | _boolrelextrema | (data, comparator, axis=0, order=1, mode='clip') | return results | Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool) | Calculate the relative extrema of `data`. | [
"Calculate",
"the",
"relative",
"extrema",
"of",
"data",
"."
] | def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results | [
"def",
"_boolrelextrema",
"(",
"data",
",",
"comparator",
",",
"axis",
"=",
"0",
",",
"order",
"=",
"1",
",",
"mode",
"=",
"'clip'",
")",
":",
"if",
"(",
"(",
"int",
"(",
"order",
")",
"!=",
"order",
")",
"or",
"(",
"order",
"<",
"1",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Order must be an int >= 1'",
")",
"datalen",
"=",
"data",
".",
"shape",
"[",
"axis",
"]",
"locs",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"datalen",
")",
"results",
"=",
"np",
".",
"ones",
"(",
"data",
".",
"shape",
",",
"dtype",
"=",
"bool",
")",
"main",
"=",
"data",
".",
"take",
"(",
"locs",
",",
"axis",
"=",
"axis",
",",
"mode",
"=",
"mode",
")",
"for",
"shift",
"in",
"xrange",
"(",
"1",
",",
"order",
"+",
"1",
")",
":",
"plus",
"=",
"data",
".",
"take",
"(",
"locs",
"+",
"shift",
",",
"axis",
"=",
"axis",
",",
"mode",
"=",
"mode",
")",
"minus",
"=",
"data",
".",
"take",
"(",
"locs",
"-",
"shift",
",",
"axis",
"=",
"axis",
",",
"mode",
"=",
"mode",
")",
"results",
"&=",
"comparator",
"(",
"main",
",",
"plus",
")",
"results",
"&=",
"comparator",
"(",
"main",
",",
"minus",
")",
"if",
"(",
"~",
"results",
".",
"any",
"(",
")",
")",
":",
"return",
"results",
"return",
"results"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/_peak_finding.py#L16-L72 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/codecs.py | python | iterencode | (iterator, encoding, errors='strict', **kwargs) | Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor. | Encoding iterator. | [
"Encoding",
"iterator",
"."
] | def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output | [
"def",
"iterencode",
"(",
"iterator",
",",
"encoding",
",",
"errors",
"=",
"'strict'",
",",
"*",
"*",
"kwargs",
")",
":",
"encoder",
"=",
"getincrementalencoder",
"(",
"encoding",
")",
"(",
"errors",
",",
"*",
"*",
"kwargs",
")",
"for",
"input",
"in",
"iterator",
":",
"output",
"=",
"encoder",
".",
"encode",
"(",
"input",
")",
"if",
"output",
":",
"yield",
"output",
"output",
"=",
"encoder",
".",
"encode",
"(",
"\"\"",
",",
"True",
")",
"if",
"output",
":",
"yield",
"output"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/codecs.py#L996-L1012 |
||
bilibili/libyuv | 2e9f3e5cf5f3c71a4a34893ceb20c5d69689390f | tools/valgrind-libyuv/tsan/PRESUBMIT.py | python | CheckChange | (input_api, output_api) | return suppressions.PresubmitCheck(input_api, output_api) | Checks the TSan suppressions files for bad suppressions. | Checks the TSan suppressions files for bad suppressions. | [
"Checks",
"the",
"TSan",
"suppressions",
"files",
"for",
"bad",
"suppressions",
"."
] | def CheckChange(input_api, output_api):
"""Checks the TSan suppressions files for bad suppressions."""
# Add the path to the Chrome valgrind dir to the import path:
tools_vg_path = os.path.join(input_api.PresubmitLocalPath(), '..', '..',
'valgrind')
sys.path.append(tools_vg_path)
import suppressions
return suppressions.PresubmitCheck(input_api, output_api) | [
"def",
"CheckChange",
"(",
"input_api",
",",
"output_api",
")",
":",
"# Add the path to the Chrome valgrind dir to the import path:",
"tools_vg_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"input_api",
".",
"PresubmitLocalPath",
"(",
")",
",",
"'..'",
",",
"'..'",
",",
"'valgrind'",
")",
"sys",
".",
"path",
".",
"append",
"(",
"tools_vg_path",
")",
"import",
"suppressions",
"return",
"suppressions",
".",
"PresubmitCheck",
"(",
"input_api",
",",
"output_api",
")"
] | https://github.com/bilibili/libyuv/blob/2e9f3e5cf5f3c71a4a34893ceb20c5d69689390f/tools/valgrind-libyuv/tsan/PRESUBMIT.py#L21-L30 |
|
1989Ryan/Semantic_SLAM | 0284b3f832ca431c494f9c134fe46c40ec86ee38 | Third_Part/PSPNet_Keras_tensorflow/caffe-tensorflow/examples/imagenet/validate.py | python | validate | (net, model_path, image_producer, top_k=5) | Compute the top_k classification accuracy for the given network and images. | Compute the top_k classification accuracy for the given network and images. | [
"Compute",
"the",
"top_k",
"classification",
"accuracy",
"for",
"the",
"given",
"network",
"and",
"images",
"."
] | def validate(net, model_path, image_producer, top_k=5):
'''Compute the top_k classification accuracy for the given network and images.'''
# Get the data specifications for given network
spec = models.get_data_spec(model_instance=net)
# Get the input node for feeding in the images
input_node = net.inputs['data']
# Create a placeholder for the ground truth labels
label_node = tf.placeholder(tf.int32)
# Get the output of the network (class probabilities)
probs = net.get_output()
# Create a top_k accuracy node
top_k_op = tf.nn.in_top_k(probs, label_node, top_k)
# The number of images processed
count = 0
# The number of correctly classified images
correct = 0
# The total number of images
total = len(image_producer)
with tf.Session() as sesh:
coordinator = tf.train.Coordinator()
# Load the converted parameters
net.load(data_path=model_path, session=sesh)
# Start the image processing workers
threads = image_producer.start(session=sesh, coordinator=coordinator)
# Iterate over and classify mini-batches
for (labels, images) in image_producer.batches(sesh):
correct += np.sum(sesh.run(top_k_op,
feed_dict={input_node: images,
label_node: labels}))
count += len(labels)
cur_accuracy = float(correct) * 100 / count
print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))
# Stop the worker threads
coordinator.request_stop()
coordinator.join(threads, stop_grace_period_secs=2)
print('Top {} Accuracy: {}'.format(top_k, float(correct) / total)) | [
"def",
"validate",
"(",
"net",
",",
"model_path",
",",
"image_producer",
",",
"top_k",
"=",
"5",
")",
":",
"# Get the data specifications for given network",
"spec",
"=",
"models",
".",
"get_data_spec",
"(",
"model_instance",
"=",
"net",
")",
"# Get the input node for feeding in the images",
"input_node",
"=",
"net",
".",
"inputs",
"[",
"'data'",
"]",
"# Create a placeholder for the ground truth labels",
"label_node",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
")",
"# Get the output of the network (class probabilities)",
"probs",
"=",
"net",
".",
"get_output",
"(",
")",
"# Create a top_k accuracy node",
"top_k_op",
"=",
"tf",
".",
"nn",
".",
"in_top_k",
"(",
"probs",
",",
"label_node",
",",
"top_k",
")",
"# The number of images processed",
"count",
"=",
"0",
"# The number of correctly classified images",
"correct",
"=",
"0",
"# The total number of images",
"total",
"=",
"len",
"(",
"image_producer",
")",
"with",
"tf",
".",
"Session",
"(",
")",
"as",
"sesh",
":",
"coordinator",
"=",
"tf",
".",
"train",
".",
"Coordinator",
"(",
")",
"# Load the converted parameters",
"net",
".",
"load",
"(",
"data_path",
"=",
"model_path",
",",
"session",
"=",
"sesh",
")",
"# Start the image processing workers",
"threads",
"=",
"image_producer",
".",
"start",
"(",
"session",
"=",
"sesh",
",",
"coordinator",
"=",
"coordinator",
")",
"# Iterate over and classify mini-batches",
"for",
"(",
"labels",
",",
"images",
")",
"in",
"image_producer",
".",
"batches",
"(",
"sesh",
")",
":",
"correct",
"+=",
"np",
".",
"sum",
"(",
"sesh",
".",
"run",
"(",
"top_k_op",
",",
"feed_dict",
"=",
"{",
"input_node",
":",
"images",
",",
"label_node",
":",
"labels",
"}",
")",
")",
"count",
"+=",
"len",
"(",
"labels",
")",
"cur_accuracy",
"=",
"float",
"(",
"correct",
")",
"*",
"100",
"/",
"count",
"print",
"(",
"'{:>6}/{:<6} {:>6.2f}%'",
".",
"format",
"(",
"count",
",",
"total",
",",
"cur_accuracy",
")",
")",
"# Stop the worker threads",
"coordinator",
".",
"request_stop",
"(",
")",
"coordinator",
".",
"join",
"(",
"threads",
",",
"stop_grace_period_secs",
"=",
"2",
")",
"print",
"(",
"'Top {} Accuracy: {}'",
".",
"format",
"(",
"top_k",
",",
"float",
"(",
"correct",
")",
"/",
"total",
")",
")"
] | https://github.com/1989Ryan/Semantic_SLAM/blob/0284b3f832ca431c494f9c134fe46c40ec86ee38/Third_Part/PSPNet_Keras_tensorflow/caffe-tensorflow/examples/imagenet/validate.py#L37-L73 |
||
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Variables/PathVariable.py | python | _PathVariableClass.PathIsFile | (self, key, val, env) | Validator to check if Path is a file | Validator to check if Path is a file | [
"Validator",
"to",
"check",
"if",
"Path",
"is",
"a",
"file"
] | def PathIsFile(self, key, val, env):
"""Validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val)) | [
"def",
"PathIsFile",
"(",
"self",
",",
"key",
",",
"val",
",",
"env",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"val",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"val",
")",
":",
"m",
"=",
"'File path for option %s is a directory: %s'",
"else",
":",
"m",
"=",
"'File path for option %s does not exist: %s'",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"m",
"%",
"(",
"key",
",",
"val",
")",
")"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Variables/PathVariable.py#L104-L111 |
||
oracle/graaljs | 36a56e8e993d45fc40939a3a4d9c0c24990720f1 | graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | python | Writer.AddToolFile | (self, path) | Adds a tool file to the project.
Args:
path: Relative path from project to tool file. | Adds a tool file to the project. | [
"Adds",
"a",
"tool",
"file",
"to",
"the",
"project",
"."
] | def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(["ToolFile", {"RelativePath": path}]) | [
"def",
"AddToolFile",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"tool_files_section",
".",
"append",
"(",
"[",
"\"ToolFile\"",
",",
"{",
"\"RelativePath\"",
":",
"path",
"}",
"]",
")"
] | https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py#L84-L90 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/history.py | python | History.__init__ | (self, text) | Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not). | Initialize data attributes and bind event methods. | [
"Initialize",
"data",
"attributes",
"and",
"bind",
"event",
"methods",
"."
] | def __init__(self, text):
'''Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not).
'''
self.text = text
self.history = []
self.prefix = None
self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next) | [
"def",
"__init__",
"(",
"self",
",",
"text",
")",
":",
"self",
".",
"text",
"=",
"text",
"self",
".",
"history",
"=",
"[",
"]",
"self",
".",
"prefix",
"=",
"None",
"self",
".",
"pointer",
"=",
"None",
"self",
".",
"cyclic",
"=",
"idleConf",
".",
"GetOption",
"(",
"\"main\"",
",",
"\"History\"",
",",
"\"cyclic\"",
",",
"1",
",",
"\"bool\"",
")",
"text",
".",
"bind",
"(",
"\"<<history-previous>>\"",
",",
"self",
".",
"history_prev",
")",
"text",
".",
"bind",
"(",
"\"<<history-next>>\"",
",",
"self",
".",
"history_next",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/history.py#L14-L29 |
||
PrincetonUniversity/athena-public-version | 9c266692b9423743d8e23509b3ab266a232a92d2 | tst/style/cpplint.py | python | CheckForNonStandardConstructs | (filename, clean_lines, linenum,
nesting_state, error) | r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message | r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. | [
"r",
"Logs",
"an",
"error",
"if",
"we",
"see",
"certain",
"non",
"-",
"ANSI",
"constructs",
"ignored",
"by",
"gcc",
"-",
"2",
"."
] | def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
r'(?:(?:inline|constexpr)\s+)*%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>')
or constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args # empty arg list
# 'void' arg specifier
or (len(constructor_args) == 1
and constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 # exactly one arg
and not noarg_constructor)
# all but at most one arg defaulted
or (len(constructor_args) >= 1
and not noarg_constructor
and len(defaulted_args) >= len(constructor_args) - 1)
# variadic arguments with zero or one argument
or (len(constructor_args) <= 2
and len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor
and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor
and Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit
and onearg_constructor
and not initializer_list_constructor
and not copy_constructor):
if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.') | [
"def",
"CheckForNonStandardConstructs",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
":",
"# Remove comments from the line, but leave in strings for now.",
"line",
"=",
"clean_lines",
".",
"lines",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'printf\\s*\\(.*\".*%[-+ ]?\\d*q'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf_format'",
",",
"3",
",",
"'%q in format strings is deprecated. Use %ll instead.'",
")",
"if",
"Search",
"(",
"r'printf\\s*\\(.*\".*%\\d+\\$'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf_format'",
",",
"2",
",",
"'%N$ formats are unconventional. Try rewriting to avoid them.'",
")",
"# Remove escaped backslashes before looking for undefined escapes.",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\\\\\\\'",
",",
"''",
")",
"if",
"Search",
"(",
"r'(\"|\\').*\\\\(%|\\[|\\(|{)'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/printf_format'",
",",
"3",
",",
"'%, [, (, and { are undefined character escapes. Unescape them.'",
")",
"# For the rest, work with both comments and strings removed.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'\\b(const|volatile|void|char|short|int|long'",
"r'|float|double|signed|unsigned'",
"r'|schar|u?int8|u?int16|u?int32|u?int64)'",
"r'\\s+(register|static|extern|typedef)\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/storage_class'",
",",
"5",
",",
"'Storage-class specifier (static, extern, typedef, etc) should be '",
"'at the beginning of the declaration.'",
")",
"if",
"Match",
"(",
"r'\\s*#\\s*endif\\s*[^/\\s]+'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/endif_comment'",
",",
"5",
",",
"'Uncommented text after #endif is non-standard. Use a comment.'",
")",
"if",
"Match",
"(",
"r'\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/forward_decl'",
",",
"5",
",",
"'Inner-style forward declarations are invalid. Remove this line.'",
")",
"if",
"Search",
"(",
"r'(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/deprecated'",
",",
"3",
",",
"'>? and <? (max and min) operators are non-standard and deprecated.'",
")",
"if",
"Search",
"(",
"r'^\\s*const\\s*string\\s*&\\s*\\w+\\s*;'",
",",
"line",
")",
":",
"# TODO(unknown): Could it be expanded safely to arbitrary references,",
"# without triggering too many false positives? The first",
"# attempt triggered 5 warnings for mostly benign code in the regtest, hence",
"# the restriction.",
"# Here's the original regexp, for the reference:",
"# type_name = r'\\w+((\\s*::\\s*\\w+)|(\\s*<\\s*\\w+?\\s*>))?'",
"# r'\\s*const\\s*' + type_name + '\\s*&\\s*\\w+\\s*;'",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/member_string_references'",
",",
"2",
",",
"'const string& members are dangerous. It is much better to use '",
"'alternatives, such as pointers or simple constants.'",
")",
"# Everything else in this function operates on class declarations.",
"# Return early if the top of the nesting stack is not a class, or if",
"# the class head is not completed yet.",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"not",
"classinfo",
"or",
"not",
"classinfo",
".",
"seen_open_brace",
":",
"return",
"# The class may have been declared with namespace or classname qualifiers.",
"# The constructor and destructor will not have those qualifiers.",
"base_classname",
"=",
"classinfo",
".",
"name",
".",
"split",
"(",
"'::'",
")",
"[",
"-",
"1",
"]",
"# Look for single-argument constructors that aren't marked explicit.",
"# Technically a valid construct, but against style.",
"explicit_constructor_match",
"=",
"Match",
"(",
"r'\\s+(?:(?:inline|constexpr)\\s+)*(explicit\\s+)?'",
"r'(?:(?:inline|constexpr)\\s+)*%s\\s*'",
"r'\\(((?:[^()]|\\([^()]*\\))*)\\)'",
"%",
"re",
".",
"escape",
"(",
"base_classname",
")",
",",
"line",
")",
"if",
"explicit_constructor_match",
":",
"is_marked_explicit",
"=",
"explicit_constructor_match",
".",
"group",
"(",
"1",
")",
"if",
"not",
"explicit_constructor_match",
".",
"group",
"(",
"2",
")",
":",
"constructor_args",
"=",
"[",
"]",
"else",
":",
"constructor_args",
"=",
"explicit_constructor_match",
".",
"group",
"(",
"2",
")",
".",
"split",
"(",
"','",
")",
"# collapse arguments so that commas in template parameter lists and function",
"# argument parameter lists don't split arguments in two",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"constructor_args",
")",
":",
"constructor_arg",
"=",
"constructor_args",
"[",
"i",
"]",
"while",
"(",
"constructor_arg",
".",
"count",
"(",
"'<'",
")",
">",
"constructor_arg",
".",
"count",
"(",
"'>'",
")",
"or",
"constructor_arg",
".",
"count",
"(",
"'('",
")",
">",
"constructor_arg",
".",
"count",
"(",
"')'",
")",
")",
":",
"constructor_arg",
"+=",
"','",
"+",
"constructor_args",
"[",
"i",
"+",
"1",
"]",
"del",
"constructor_args",
"[",
"i",
"+",
"1",
"]",
"constructor_args",
"[",
"i",
"]",
"=",
"constructor_arg",
"i",
"+=",
"1",
"variadic_args",
"=",
"[",
"arg",
"for",
"arg",
"in",
"constructor_args",
"if",
"'&&...'",
"in",
"arg",
"]",
"defaulted_args",
"=",
"[",
"arg",
"for",
"arg",
"in",
"constructor_args",
"if",
"'='",
"in",
"arg",
"]",
"noarg_constructor",
"=",
"(",
"not",
"constructor_args",
"# empty arg list",
"# 'void' arg specifier",
"or",
"(",
"len",
"(",
"constructor_args",
")",
"==",
"1",
"and",
"constructor_args",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"==",
"'void'",
")",
")",
"onearg_constructor",
"=",
"(",
"(",
"len",
"(",
"constructor_args",
")",
"==",
"1",
"# exactly one arg",
"and",
"not",
"noarg_constructor",
")",
"# all but at most one arg defaulted",
"or",
"(",
"len",
"(",
"constructor_args",
")",
">=",
"1",
"and",
"not",
"noarg_constructor",
"and",
"len",
"(",
"defaulted_args",
")",
">=",
"len",
"(",
"constructor_args",
")",
"-",
"1",
")",
"# variadic arguments with zero or one argument",
"or",
"(",
"len",
"(",
"constructor_args",
")",
"<=",
"2",
"and",
"len",
"(",
"variadic_args",
")",
">=",
"1",
")",
")",
"initializer_list_constructor",
"=",
"bool",
"(",
"onearg_constructor",
"and",
"Search",
"(",
"r'\\bstd\\s*::\\s*initializer_list\\b'",
",",
"constructor_args",
"[",
"0",
"]",
")",
")",
"copy_constructor",
"=",
"bool",
"(",
"onearg_constructor",
"and",
"Match",
"(",
"r'(const\\s+)?%s(\\s*<[^>]*>)?(\\s+const)?\\s*(?:<\\w+>\\s*)?&'",
"%",
"re",
".",
"escape",
"(",
"base_classname",
")",
",",
"constructor_args",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
")",
"if",
"(",
"not",
"is_marked_explicit",
"and",
"onearg_constructor",
"and",
"not",
"initializer_list_constructor",
"and",
"not",
"copy_constructor",
")",
":",
"if",
"defaulted_args",
"or",
"variadic_args",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Constructors callable with one argument '",
"'should be marked explicit.'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Single-parameter constructors should be marked explicit.'",
")",
"elif",
"is_marked_explicit",
"and",
"not",
"onearg_constructor",
":",
"if",
"noarg_constructor",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Zero-parameter constructors should not be marked explicit.'",
")"
] | https://github.com/PrincetonUniversity/athena-public-version/blob/9c266692b9423743d8e23509b3ab266a232a92d2/tst/style/cpplint.py#L3026-L3187 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | Gauge.SetValue | (*args, **kwargs) | return _controls_.Gauge_SetValue(*args, **kwargs) | SetValue(self, int pos) | SetValue(self, int pos) | [
"SetValue",
"(",
"self",
"int",
"pos",
")"
] | def SetValue(*args, **kwargs):
"""SetValue(self, int pos)"""
return _controls_.Gauge_SetValue(*args, **kwargs) | [
"def",
"SetValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"Gauge_SetValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L755-L757 |
|
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/internal/decoder.py | python | _ModifiedDecoder | (wire_type, decode_value, modify_value) | return _SimpleDecoder(wire_type, InnerDecode) | Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode. | Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode. | [
"Like",
"SimpleDecoder",
"but",
"additionally",
"invokes",
"modify_value",
"on",
"every",
"value",
"before",
"storing",
"it",
".",
"Usually",
"modify_value",
"is",
"ZigZagDecode",
"."
] | def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode) | [
"def",
"_ModifiedDecoder",
"(",
"wire_type",
",",
"decode_value",
",",
"modify_value",
")",
":",
"# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but",
"# not enough to make a significant difference.",
"def",
"InnerDecode",
"(",
"buffer",
",",
"pos",
")",
":",
"(",
"result",
",",
"new_pos",
")",
"=",
"decode_value",
"(",
"buffer",
",",
"pos",
")",
"return",
"(",
"modify_value",
"(",
"result",
")",
",",
"new_pos",
")",
"return",
"_SimpleDecoder",
"(",
"wire_type",
",",
"InnerDecode",
")"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/internal/decoder.py#L249-L260 |
|
Komnomnomnom/swigibpy | cfd307fdbfaffabc69a2dc037538d7e34a8b8daf | swigibpy.py | python | OrderComboLegList.__getslice__ | (self, i, j) | return _swigibpy.OrderComboLegList___getslice__(self, i, j) | __getslice__(OrderComboLegList self, std::vector< shared_ptr< OrderComboLeg > >::difference_type i, std::vector< shared_ptr< OrderComboLeg > >::difference_type j) -> OrderComboLegList | __getslice__(OrderComboLegList self, std::vector< shared_ptr< OrderComboLeg > >::difference_type i, std::vector< shared_ptr< OrderComboLeg > >::difference_type j) -> OrderComboLegList | [
"__getslice__",
"(",
"OrderComboLegList",
"self",
"std",
"::",
"vector<",
"shared_ptr<",
"OrderComboLeg",
">",
">",
"::",
"difference_type",
"i",
"std",
"::",
"vector<",
"shared_ptr<",
"OrderComboLeg",
">",
">",
"::",
"difference_type",
"j",
")",
"-",
">",
"OrderComboLegList"
] | def __getslice__(self, i, j):
"""__getslice__(OrderComboLegList self, std::vector< shared_ptr< OrderComboLeg > >::difference_type i, std::vector< shared_ptr< OrderComboLeg > >::difference_type j) -> OrderComboLegList"""
return _swigibpy.OrderComboLegList___getslice__(self, i, j) | [
"def",
"__getslice__",
"(",
"self",
",",
"i",
",",
"j",
")",
":",
"return",
"_swigibpy",
".",
"OrderComboLegList___getslice__",
"(",
"self",
",",
"i",
",",
"j",
")"
] | https://github.com/Komnomnomnom/swigibpy/blob/cfd307fdbfaffabc69a2dc037538d7e34a8b8daf/swigibpy.py#L495-L497 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/compatibility/ast_edits.py | python | _PastaEditVisitor._get_applicable_dict | (self, transformer_field, full_name, name) | return transformers | Get all dict entries indexed by name that apply to full_name or name. | Get all dict entries indexed by name that apply to full_name or name. | [
"Get",
"all",
"dict",
"entries",
"indexed",
"by",
"name",
"that",
"apply",
"to",
"full_name",
"or",
"name",
"."
] | def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers | [
"def",
"_get_applicable_dict",
"(",
"self",
",",
"transformer_field",
",",
"full_name",
",",
"name",
")",
":",
"# Transformers are indexed to full name, name, or no name",
"# as a performance optimization.",
"function_transformers",
"=",
"getattr",
"(",
"self",
".",
"_api_change_spec",
",",
"transformer_field",
",",
"{",
"}",
")",
"glob_name",
"=",
"\"*.\"",
"+",
"name",
"if",
"name",
"else",
"None",
"transformers",
"=",
"function_transformers",
".",
"get",
"(",
"\"*\"",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"transformers",
".",
"update",
"(",
"function_transformers",
".",
"get",
"(",
"glob_name",
",",
"{",
"}",
")",
")",
"transformers",
".",
"update",
"(",
"function_transformers",
".",
"get",
"(",
"full_name",
",",
"{",
"}",
")",
")",
"return",
"transformers"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/tools/compatibility/ast_edits.py#L314-L325 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_misc.py | python | ArtProvider.GetBitmap | (*args, **kwargs) | return _misc_.ArtProvider_GetBitmap(*args, **kwargs) | GetBitmap(String id, String client=ART_OTHER, Size size=DefaultSize) -> Bitmap
Query the providers for bitmap with given ID and return it. Return
wx.NullBitmap if no provider provides it. | GetBitmap(String id, String client=ART_OTHER, Size size=DefaultSize) -> Bitmap | [
"GetBitmap",
"(",
"String",
"id",
"String",
"client",
"=",
"ART_OTHER",
"Size",
"size",
"=",
"DefaultSize",
")",
"-",
">",
"Bitmap"
] | def GetBitmap(*args, **kwargs):
"""
GetBitmap(String id, String client=ART_OTHER, Size size=DefaultSize) -> Bitmap
Query the providers for bitmap with given ID and return it. Return
wx.NullBitmap if no provider provides it.
"""
return _misc_.ArtProvider_GetBitmap(*args, **kwargs) | [
"def",
"GetBitmap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"ArtProvider_GetBitmap",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L2819-L2826 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/reduction_workflow/instruments/sans/hfir_command_interface.py | python | SaveIqAscii | (reducer=None, process='') | Old command for backward compatibility | Old command for backward compatibility | [
"Old",
"command",
"for",
"backward",
"compatibility"
] | def SaveIqAscii(reducer=None, process=''):
""" Old command for backward compatibility """
msg = "SaveIqAscii is not longer used:\n "
msg += "Please use 'SaveIq' instead\n "
Logger("CommandInterface").warning(msg)
ReductionSingleton().reduction_properties["ProcessInfo"] = str(process) | [
"def",
"SaveIqAscii",
"(",
"reducer",
"=",
"None",
",",
"process",
"=",
"''",
")",
":",
"msg",
"=",
"\"SaveIqAscii is not longer used:\\n \"",
"msg",
"+=",
"\"Please use 'SaveIq' instead\\n \"",
"Logger",
"(",
"\"CommandInterface\"",
")",
".",
"warning",
"(",
"msg",
")",
"ReductionSingleton",
"(",
")",
".",
"reduction_properties",
"[",
"\"ProcessInfo\"",
"]",
"=",
"str",
"(",
"process",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/reduction_workflow/instruments/sans/hfir_command_interface.py#L490-L495 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/__init__.py | python | PackageFinder._find_packages_iter | (cls, where, exclude, include) | All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter. | All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter. | [
"All",
"the",
"packages",
"found",
"in",
"where",
"that",
"pass",
"the",
"include",
"filter",
"but",
"not",
"the",
"exclude",
"filter",
"."
] | def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir) | [
"def",
"_find_packages_iter",
"(",
"cls",
",",
"where",
",",
"exclude",
",",
"include",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"where",
",",
"followlinks",
"=",
"True",
")",
":",
"# Copy dirs to iterate over it, then empty dirs.",
"all_dirs",
"=",
"dirs",
"[",
":",
"]",
"dirs",
"[",
":",
"]",
"=",
"[",
"]",
"for",
"dir",
"in",
"all_dirs",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dir",
")",
"rel_path",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"full_path",
",",
"where",
")",
"package",
"=",
"rel_path",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'.'",
")",
"# Skip directory trees that are not valid packages",
"if",
"(",
"'.'",
"in",
"dir",
"or",
"not",
"cls",
".",
"_looks_like_package",
"(",
"full_path",
")",
")",
":",
"continue",
"# Should this package be included?",
"if",
"include",
"(",
"package",
")",
"and",
"not",
"exclude",
"(",
"package",
")",
":",
"yield",
"package",
"# Keep searching subdirectories, as there may be more packages",
"# down there, even if the parent was excluded.",
"dirs",
".",
"append",
"(",
"dir",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/__init__.py#L75-L100 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/datetime.py | python | time.replace | (self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True, *, fold=None) | return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold) | Return a new time with new values for the specified fields. | Return a new time with new values for the specified fields. | [
"Return",
"a",
"new",
"time",
"with",
"new",
"values",
"for",
"the",
"specified",
"fields",
"."
] | def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True, *, fold=None):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
if fold is None:
fold = self._fold
return type(self)(hour, minute, second, microsecond, tzinfo, fold=fold) | [
"def",
"replace",
"(",
"self",
",",
"hour",
"=",
"None",
",",
"minute",
"=",
"None",
",",
"second",
"=",
"None",
",",
"microsecond",
"=",
"None",
",",
"tzinfo",
"=",
"True",
",",
"*",
",",
"fold",
"=",
"None",
")",
":",
"if",
"hour",
"is",
"None",
":",
"hour",
"=",
"self",
".",
"hour",
"if",
"minute",
"is",
"None",
":",
"minute",
"=",
"self",
".",
"minute",
"if",
"second",
"is",
"None",
":",
"second",
"=",
"self",
".",
"second",
"if",
"microsecond",
"is",
"None",
":",
"microsecond",
"=",
"self",
".",
"microsecond",
"if",
"tzinfo",
"is",
"True",
":",
"tzinfo",
"=",
"self",
".",
"tzinfo",
"if",
"fold",
"is",
"None",
":",
"fold",
"=",
"self",
".",
"_fold",
"return",
"type",
"(",
"self",
")",
"(",
"hour",
",",
"minute",
",",
"second",
",",
"microsecond",
",",
"tzinfo",
",",
"fold",
"=",
"fold",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/datetime.py#L1452-L1467 |
|
Slicer/SlicerGitSVNArchive | 65e92bb16c2b32ea47a1a66bee71f238891ee1ca | Modules/Scripted/SegmentEditor/SegmentEditor.py | python | SegmentEditorTest.test_SegmentEditor1 | (self) | Add test here later. | Add test here later. | [
"Add",
"test",
"here",
"later",
"."
] | def test_SegmentEditor1(self):
"""Add test here later.
"""
self.delayDisplay("Starting the test")
self.delayDisplay('Test passed!') | [
"def",
"test_SegmentEditor1",
"(",
"self",
")",
":",
"self",
".",
"delayDisplay",
"(",
"\"Starting the test\"",
")",
"self",
".",
"delayDisplay",
"(",
"'Test passed!'",
")"
] | https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Modules/Scripted/SegmentEditor/SegmentEditor.py#L183-L187 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py3/numpy/lib/utils.py | python | _lookfor_generate_cache | (module, import_modules, regenerate) | return cache | Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated. | Generate docstring cache for given module. | [
"Generate",
"docstring",
"cache",
"for",
"given",
"module",
"."
] | def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate : bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
# Local import to speed up numpy's import time.
import inspect
from io import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen:
continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if (os.path.isfile(this_py) and
mod_path.endswith('.py')):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
# Catch SystemExit, too
except BaseException:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError:
# ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache | [
"def",
"_lookfor_generate_cache",
"(",
"module",
",",
"import_modules",
",",
"regenerate",
")",
":",
"# Local import to speed up numpy's import time.",
"import",
"inspect",
"from",
"io",
"import",
"StringIO",
"if",
"module",
"is",
"None",
":",
"module",
"=",
"\"numpy\"",
"if",
"isinstance",
"(",
"module",
",",
"str",
")",
":",
"try",
":",
"__import__",
"(",
"module",
")",
"except",
"ImportError",
":",
"return",
"{",
"}",
"module",
"=",
"sys",
".",
"modules",
"[",
"module",
"]",
"elif",
"isinstance",
"(",
"module",
",",
"list",
")",
"or",
"isinstance",
"(",
"module",
",",
"tuple",
")",
":",
"cache",
"=",
"{",
"}",
"for",
"mod",
"in",
"module",
":",
"cache",
".",
"update",
"(",
"_lookfor_generate_cache",
"(",
"mod",
",",
"import_modules",
",",
"regenerate",
")",
")",
"return",
"cache",
"if",
"id",
"(",
"module",
")",
"in",
"_lookfor_caches",
"and",
"not",
"regenerate",
":",
"return",
"_lookfor_caches",
"[",
"id",
"(",
"module",
")",
"]",
"# walk items and collect docstrings",
"cache",
"=",
"{",
"}",
"_lookfor_caches",
"[",
"id",
"(",
"module",
")",
"]",
"=",
"cache",
"seen",
"=",
"{",
"}",
"index",
"=",
"0",
"stack",
"=",
"[",
"(",
"module",
".",
"__name__",
",",
"module",
")",
"]",
"while",
"stack",
":",
"name",
",",
"item",
"=",
"stack",
".",
"pop",
"(",
"0",
")",
"if",
"id",
"(",
"item",
")",
"in",
"seen",
":",
"continue",
"seen",
"[",
"id",
"(",
"item",
")",
"]",
"=",
"True",
"index",
"+=",
"1",
"kind",
"=",
"\"object\"",
"if",
"inspect",
".",
"ismodule",
"(",
"item",
")",
":",
"kind",
"=",
"\"module\"",
"try",
":",
"_all",
"=",
"item",
".",
"__all__",
"except",
"AttributeError",
":",
"_all",
"=",
"None",
"# import sub-packages",
"if",
"import_modules",
"and",
"hasattr",
"(",
"item",
",",
"'__path__'",
")",
":",
"for",
"pth",
"in",
"item",
".",
"__path__",
":",
"for",
"mod_path",
"in",
"os",
".",
"listdir",
"(",
"pth",
")",
":",
"this_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pth",
",",
"mod_path",
")",
"init_py",
"=",
"os",
".",
"path",
".",
"join",
"(",
"pth",
",",
"mod_path",
",",
"'__init__.py'",
")",
"if",
"(",
"os",
".",
"path",
".",
"isfile",
"(",
"this_py",
")",
"and",
"mod_path",
".",
"endswith",
"(",
"'.py'",
")",
")",
":",
"to_import",
"=",
"mod_path",
"[",
":",
"-",
"3",
"]",
"elif",
"os",
".",
"path",
".",
"isfile",
"(",
"init_py",
")",
":",
"to_import",
"=",
"mod_path",
"else",
":",
"continue",
"if",
"to_import",
"==",
"'__init__'",
":",
"continue",
"try",
":",
"old_stdout",
"=",
"sys",
".",
"stdout",
"old_stderr",
"=",
"sys",
".",
"stderr",
"try",
":",
"sys",
".",
"stdout",
"=",
"StringIO",
"(",
")",
"sys",
".",
"stderr",
"=",
"StringIO",
"(",
")",
"__import__",
"(",
"\"%s.%s\"",
"%",
"(",
"name",
",",
"to_import",
")",
")",
"finally",
":",
"sys",
".",
"stdout",
"=",
"old_stdout",
"sys",
".",
"stderr",
"=",
"old_stderr",
"# Catch SystemExit, too",
"except",
"BaseException",
":",
"continue",
"for",
"n",
",",
"v",
"in",
"_getmembers",
"(",
"item",
")",
":",
"try",
":",
"item_name",
"=",
"getattr",
"(",
"v",
",",
"'__name__'",
",",
"\"%s.%s\"",
"%",
"(",
"name",
",",
"n",
")",
")",
"mod_name",
"=",
"getattr",
"(",
"v",
",",
"'__module__'",
",",
"None",
")",
"except",
"NameError",
":",
"# ref. SWIG's global cvars",
"# NameError: Unknown C global variable",
"item_name",
"=",
"\"%s.%s\"",
"%",
"(",
"name",
",",
"n",
")",
"mod_name",
"=",
"None",
"if",
"'.'",
"not",
"in",
"item_name",
"and",
"mod_name",
":",
"item_name",
"=",
"\"%s.%s\"",
"%",
"(",
"mod_name",
",",
"item_name",
")",
"if",
"not",
"item_name",
".",
"startswith",
"(",
"name",
"+",
"'.'",
")",
":",
"# don't crawl \"foreign\" objects",
"if",
"isinstance",
"(",
"v",
",",
"ufunc",
")",
":",
"# ... unless they are ufuncs",
"pass",
"else",
":",
"continue",
"elif",
"not",
"(",
"inspect",
".",
"ismodule",
"(",
"v",
")",
"or",
"_all",
"is",
"None",
"or",
"n",
"in",
"_all",
")",
":",
"continue",
"stack",
".",
"append",
"(",
"(",
"\"%s.%s\"",
"%",
"(",
"name",
",",
"n",
")",
",",
"v",
")",
")",
"elif",
"inspect",
".",
"isclass",
"(",
"item",
")",
":",
"kind",
"=",
"\"class\"",
"for",
"n",
",",
"v",
"in",
"_getmembers",
"(",
"item",
")",
":",
"stack",
".",
"append",
"(",
"(",
"\"%s.%s\"",
"%",
"(",
"name",
",",
"n",
")",
",",
"v",
")",
")",
"elif",
"hasattr",
"(",
"item",
",",
"\"__call__\"",
")",
":",
"kind",
"=",
"\"func\"",
"try",
":",
"doc",
"=",
"inspect",
".",
"getdoc",
"(",
"item",
")",
"except",
"NameError",
":",
"# ref SWIG's NameError: Unknown C global variable",
"doc",
"=",
"None",
"if",
"doc",
"is",
"not",
"None",
":",
"cache",
"[",
"name",
"]",
"=",
"(",
"doc",
",",
"kind",
",",
"index",
")",
"return",
"cache"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/lib/utils.py#L814-L947 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/core/fromnumeric.py | python | trace | (a, offset=0, axis1=0, axis2=1, dtype=None, out=None) | Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3) | Return the sum along diagonals of the array. | [
"Return",
"the",
"sum",
"along",
"diagonals",
"of",
"the",
"array",
"."
] | def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out) | [
"def",
"trace",
"(",
"a",
",",
"offset",
"=",
"0",
",",
"axis1",
"=",
"0",
",",
"axis2",
"=",
"1",
",",
"dtype",
"=",
"None",
",",
"out",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"a",
",",
"np",
".",
"matrix",
")",
":",
"# Get trace of matrix via an array to preserve backward compatibility.",
"return",
"asarray",
"(",
"a",
")",
".",
"trace",
"(",
"offset",
"=",
"offset",
",",
"axis1",
"=",
"axis1",
",",
"axis2",
"=",
"axis2",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
")",
"else",
":",
"return",
"asanyarray",
"(",
"a",
")",
".",
"trace",
"(",
"offset",
"=",
"offset",
",",
"axis1",
"=",
"axis1",
",",
"axis2",
"=",
"axis2",
",",
"dtype",
"=",
"dtype",
",",
"out",
"=",
"out",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/core/fromnumeric.py#L1626-L1686 |
||
lballabio/quantlib-old | 136336947ed4fea9ecc1da6edad188700e821739 | gensrc/gensrc/configuration/configuration.py | python | Configuration.prefix | (self) | return self.prefix_ | Return text to be prefixed to addin function names. | Return text to be prefixed to addin function names. | [
"Return",
"text",
"to",
"be",
"prefixed",
"to",
"addin",
"function",
"names",
"."
] | def prefix(self):
"""Return text to be prefixed to addin function names."""
return self.prefix_ | [
"def",
"prefix",
"(",
"self",
")",
":",
"return",
"self",
".",
"prefix_"
] | https://github.com/lballabio/quantlib-old/blob/136336947ed4fea9ecc1da6edad188700e821739/gensrc/gensrc/configuration/configuration.py#L55-L57 |
|
pgRouting/osm2pgrouting | 8491929fc4037d308f271e84d59bb96da3c28aa2 | tools/cpplint.py | python | CleansedLines.NumLines | (self) | return self.num_lines | Returns the number of lines represented. | Returns the number of lines represented. | [
"Returns",
"the",
"number",
"of",
"lines",
"represented",
"."
] | def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines | [
"def",
"NumLines",
"(",
"self",
")",
":",
"return",
"self",
".",
"num_lines"
] | https://github.com/pgRouting/osm2pgrouting/blob/8491929fc4037d308f271e84d59bb96da3c28aa2/tools/cpplint.py#L1311-L1313 |
|
generalized-intelligence/GAAS | 29ab17d3e8a4ba18edef3a57c36d8db6329fac73 | deprecated/algorithms/sfm/OpenSfM/opensfm/types.py | python | FisheyeCamera.pixel_bearing | (self, pixel) | return np.array([x / l, y / l, 1.0 / l]) | Unit vector pointing to the pixel viewing direction. | Unit vector pointing to the pixel viewing direction. | [
"Unit",
"vector",
"pointing",
"to",
"the",
"pixel",
"viewing",
"direction",
"."
] | def pixel_bearing(self, pixel):
"""Unit vector pointing to the pixel viewing direction."""
point = np.asarray(pixel).reshape((1, 1, 2))
distortion = np.array([self.k1, self.k2, 0., 0.])
x, y = cv2.fisheye.undistortPoints(point, self.get_K(), distortion).flat
l = np.sqrt(x * x + y * y + 1.0)
return np.array([x / l, y / l, 1.0 / l]) | [
"def",
"pixel_bearing",
"(",
"self",
",",
"pixel",
")",
":",
"point",
"=",
"np",
".",
"asarray",
"(",
"pixel",
")",
".",
"reshape",
"(",
"(",
"1",
",",
"1",
",",
"2",
")",
")",
"distortion",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"k1",
",",
"self",
".",
"k2",
",",
"0.",
",",
"0.",
"]",
")",
"x",
",",
"y",
"=",
"cv2",
".",
"fisheye",
".",
"undistortPoints",
"(",
"point",
",",
"self",
".",
"get_K",
"(",
")",
",",
"distortion",
")",
".",
"flat",
"l",
"=",
"np",
".",
"sqrt",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
"+",
"1.0",
")",
"return",
"np",
".",
"array",
"(",
"[",
"x",
"/",
"l",
",",
"y",
"/",
"l",
",",
"1.0",
"/",
"l",
"]",
")"
] | https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/deprecated/algorithms/sfm/OpenSfM/opensfm/types.py#L479-L485 |
|
microsoft/onnxruntime | f92e47e95b13a240e37caf7b36577983544f98fc | onnxruntime/python/onnxruntime_inference_collection.py | python | OrtValue.ortvalue_from_shape_and_type | (shape=None, element_type=None, device_type='cpu', device_id=0) | return OrtValue(C.OrtValue.ortvalue_from_shape_and_type(shape, element_type,
C.OrtDevice(get_ort_device_type(device_type), C.OrtDevice.default_memory(), device_id))) | Factory method to construct an OrtValue (which holds a Tensor) from given shape and element_type
:param shape: List of integers indicating the shape of the OrtValue
:param element_type: The data type of the elements in the OrtValue (numpy type)
:param device_type: e.g. cpu, cuda, cpu by default
:param device_id: device id, e.g. 0 | Factory method to construct an OrtValue (which holds a Tensor) from given shape and element_type | [
"Factory",
"method",
"to",
"construct",
"an",
"OrtValue",
"(",
"which",
"holds",
"a",
"Tensor",
")",
"from",
"given",
"shape",
"and",
"element_type"
] | def ortvalue_from_shape_and_type(shape=None, element_type=None, device_type='cpu', device_id=0):
'''
Factory method to construct an OrtValue (which holds a Tensor) from given shape and element_type
:param shape: List of integers indicating the shape of the OrtValue
:param element_type: The data type of the elements in the OrtValue (numpy type)
:param device_type: e.g. cpu, cuda, cpu by default
:param device_id: device id, e.g. 0
'''
if shape is None or element_type is None:
raise ValueError("`element_type` and `shape` are to be provided if pre-allocated memory is provided")
return OrtValue(C.OrtValue.ortvalue_from_shape_and_type(shape, element_type,
C.OrtDevice(get_ort_device_type(device_type), C.OrtDevice.default_memory(), device_id))) | [
"def",
"ortvalue_from_shape_and_type",
"(",
"shape",
"=",
"None",
",",
"element_type",
"=",
"None",
",",
"device_type",
"=",
"'cpu'",
",",
"device_id",
"=",
"0",
")",
":",
"if",
"shape",
"is",
"None",
"or",
"element_type",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"`element_type` and `shape` are to be provided if pre-allocated memory is provided\"",
")",
"return",
"OrtValue",
"(",
"C",
".",
"OrtValue",
".",
"ortvalue_from_shape_and_type",
"(",
"shape",
",",
"element_type",
",",
"C",
".",
"OrtDevice",
"(",
"get_ort_device_type",
"(",
"device_type",
")",
",",
"C",
".",
"OrtDevice",
".",
"default_memory",
"(",
")",
",",
"device_id",
")",
")",
")"
] | https://github.com/microsoft/onnxruntime/blob/f92e47e95b13a240e37caf7b36577983544f98fc/onnxruntime/python/onnxruntime_inference_collection.py#L554-L567 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_misc.py | python | Joystick.GetPOVCTSPosition | (*args, **kwargs) | return _misc_.Joystick_GetPOVCTSPosition(*args, **kwargs) | GetPOVCTSPosition(self) -> int | GetPOVCTSPosition(self) -> int | [
"GetPOVCTSPosition",
"(",
"self",
")",
"-",
">",
"int"
] | def GetPOVCTSPosition(*args, **kwargs):
"""GetPOVCTSPosition(self) -> int"""
return _misc_.Joystick_GetPOVCTSPosition(*args, **kwargs) | [
"def",
"GetPOVCTSPosition",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"Joystick_GetPOVCTSPosition",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L2142-L2144 |
|
Yelp/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | moe/views/pretty_view.py | python | PrettyView.form_response | (self, response_dict) | return self.response_schema.serialize(response_dict) | Return the serialized response object from a dict.
:param response_dict: a dict that can be serialized by self.response_schema
:type response_dict: dict
:returns: a serialized self.response_schema object | Return the serialized response object from a dict. | [
"Return",
"the",
"serialized",
"response",
"object",
"from",
"a",
"dict",
"."
] | def form_response(self, response_dict):
"""Return the serialized response object from a dict.
:param response_dict: a dict that can be serialized by self.response_schema
:type response_dict: dict
:returns: a serialized self.response_schema object
"""
self._create_moe_log_line(
type='response',
content=response_dict,
)
return self.response_schema.serialize(response_dict) | [
"def",
"form_response",
"(",
"self",
",",
"response_dict",
")",
":",
"self",
".",
"_create_moe_log_line",
"(",
"type",
"=",
"'response'",
",",
"content",
"=",
"response_dict",
",",
")",
"return",
"self",
".",
"response_schema",
".",
"serialize",
"(",
"response_dict",
")"
] | https://github.com/Yelp/MOE/blob/5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c/moe/views/pretty_view.py#L74-L85 |
|
bulletphysics/bullet3 | f0f2a952e146f016096db6f85cf0c44ed75b0b9a | examples/pybullet/gym/pybullet_envs/agents/tools/mock_algorithm.py | python | MockAlgorithm.__init__ | (self, envs) | Produce random actions and empty summaries.
Args:
envs: List of in-graph environments. | Produce random actions and empty summaries. | [
"Produce",
"random",
"actions",
"and",
"empty",
"summaries",
"."
] | def __init__(self, envs):
"""Produce random actions and empty summaries.
Args:
envs: List of in-graph environments.
"""
self._envs = envs | [
"def",
"__init__",
"(",
"self",
",",
"envs",
")",
":",
"self",
".",
"_envs",
"=",
"envs"
] | https://github.com/bulletphysics/bullet3/blob/f0f2a952e146f016096db6f85cf0c44ed75b0b9a/examples/pybullet/gym/pybullet_envs/agents/tools/mock_algorithm.py#L28-L34 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/python/framework/function.py | python | _DefinedFunction._create_definition_if_needed | (self) | Creates the function definition if it's not created yet. | Creates the function definition if it's not created yet. | [
"Creates",
"the",
"function",
"definition",
"if",
"it",
"s",
"not",
"created",
"yet",
"."
] | def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
if self._definition is not None:
return
# Create the func_def object.
temp_graph = _FuncGraph()
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
for (argname, argtype) in self._args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=temp_graph.getvar):
outputs = self._func(*inputs)
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
self._extra_inputs = temp_graph.extra_inputs
inputs.extend(temp_graph.extra_args)
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Build the FunctionDef
self._definition = _graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
inputs,
outputs,
out_names=self._out_names)
# Extra kwargs are treated as attrs on the function def.
sig_pre_func_name = self._func_name or _get_func_name(self._func)
kwargs_attr = _parse_kwargs_as_attrs(sig_pre_func_name,
**self._extra_kwargs)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([_get_func_name(self._func), self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__ | [
"def",
"_create_definition_if_needed",
"(",
"self",
")",
":",
"if",
"self",
".",
"_definition",
"is",
"not",
"None",
":",
"return",
"# Create the func_def object.",
"temp_graph",
"=",
"_FuncGraph",
"(",
")",
"with",
"temp_graph",
".",
"as_default",
"(",
")",
":",
"# List of placeholders for the function_def.",
"inputs",
"=",
"[",
"]",
"for",
"(",
"argname",
",",
"argtype",
")",
"in",
"self",
".",
"_args",
":",
"argholder",
"=",
"array_ops",
".",
"placeholder",
"(",
"argtype",
",",
"name",
"=",
"argname",
")",
"inputs",
".",
"append",
"(",
"argholder",
")",
"# Call func and gather the output tensors.",
"with",
"vs",
".",
"variable_scope",
"(",
"\"\"",
",",
"custom_getter",
"=",
"temp_graph",
".",
"getvar",
")",
":",
"outputs",
"=",
"self",
".",
"_func",
"(",
"*",
"inputs",
")",
"# If func only returned one value, make it a tuple.",
"if",
"not",
"isinstance",
"(",
"outputs",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"outputs",
"=",
"(",
"outputs",
",",
")",
"if",
"any",
"(",
"[",
"_",
"is",
"None",
"for",
"_",
"in",
"outputs",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Function can not return None.\"",
")",
"# Ensures each output is a Tensor.",
"outputs",
"=",
"[",
"ops",
".",
"convert_to_tensor",
"(",
"_",
")",
"for",
"_",
"in",
"outputs",
"]",
"self",
".",
"_extra_inputs",
"=",
"temp_graph",
".",
"extra_inputs",
"inputs",
".",
"extend",
"(",
"temp_graph",
".",
"extra_args",
")",
"# pylint: disable=protected-access",
"self",
".",
"_sub_functions",
"=",
"temp_graph",
".",
"_functions",
"# pylint: enable=protected-access",
"# Build the FunctionDef",
"self",
".",
"_definition",
"=",
"_graph_to_function_def",
"(",
"temp_graph",
",",
"temp_graph",
".",
"get_operations",
"(",
")",
",",
"inputs",
",",
"outputs",
",",
"out_names",
"=",
"self",
".",
"_out_names",
")",
"# Extra kwargs are treated as attrs on the function def.",
"sig_pre_func_name",
"=",
"self",
".",
"_func_name",
"or",
"_get_func_name",
"(",
"self",
".",
"_func",
")",
"kwargs_attr",
"=",
"_parse_kwargs_as_attrs",
"(",
"sig_pre_func_name",
",",
"*",
"*",
"self",
".",
"_extra_kwargs",
")",
"for",
"k",
"in",
"kwargs_attr",
":",
"self",
".",
"_definition",
".",
"attr",
"[",
"k",
"]",
".",
"CopyFrom",
"(",
"kwargs_attr",
"[",
"k",
"]",
")",
"# Hash the definition and its dependencies.",
"self",
".",
"_hash_str",
"=",
"self",
".",
"_create_hash_str",
"(",
"self",
".",
"_definition",
".",
"signature",
".",
"input_arg",
",",
"self",
".",
"_definition",
".",
"signature",
".",
"output_arg",
",",
"self",
".",
"_definition",
".",
"node_def",
")",
"# Finally, we decide the function name to use. If not specified,",
"# make up something which is almost certainly unique (but deterministic).",
"if",
"not",
"self",
".",
"_func_name",
":",
"self",
".",
"_func_name",
"=",
"\"_\"",
".",
"join",
"(",
"[",
"_get_func_name",
"(",
"self",
".",
"_func",
")",
",",
"self",
".",
"_hash_str",
"]",
")",
"self",
".",
"_definition",
".",
"signature",
".",
"name",
"=",
"self",
".",
"_func_name",
"if",
"self",
".",
"_func",
".",
"__doc__",
":",
"self",
".",
"_definition",
".",
"signature",
".",
"description",
"=",
"self",
".",
"_func",
".",
"__doc__"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/framework/function.py#L343-L399 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py | python | DataFrame.diff | (self, periods=1, axis=0) | return self._constructor(new_data) | First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN | First discrete difference of element. | [
"First",
"discrete",
"difference",
"of",
"element",
"."
] | def diff(self, periods=1, axis=0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Notes
-----
For boolean dtypes, this uses :meth:`operator.xor` rather than
:meth:`operator.sub`.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data) | [
"def",
"diff",
"(",
"self",
",",
"periods",
"=",
"1",
",",
"axis",
"=",
"0",
")",
"->",
"\"DataFrame\"",
":",
"bm_axis",
"=",
"self",
".",
"_get_block_manager_axis",
"(",
"axis",
")",
"new_data",
"=",
"self",
".",
"_data",
".",
"diff",
"(",
"n",
"=",
"periods",
",",
"axis",
"=",
"bm_axis",
")",
"return",
"self",
".",
"_constructor",
"(",
"new_data",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/frame.py#L6512-L6604 |
|
RGF-team/rgf | 272afb85b4c91571f576e5fc83ecfacce3672eb4 | python-package/rgf/utils.py | python | RGFClassifierMixin.predict_proba | (self, X) | return y | Predict class probabilities for X.
The predicted class probabilities of an input sample are computed.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes].
The class probabilities of the input samples.
The order of the classes corresponds to that in the attribute classes_. | Predict class probabilities for X. | [
"Predict",
"class",
"probabilities",
"for",
"X",
"."
] | def predict_proba(self, X):
"""
Predict class probabilities for X.
The predicted class probabilities of an input sample are computed.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes].
The class probabilities of the input samples.
The order of the classes corresponds to that in the attribute classes_.
"""
if not hasattr(self, '_fitted') or not self._fitted:
raise NotFittedError(NOT_FITTED_ERROR_DESC)
X = check_array(X, accept_sparse=True)
self._check_n_features(X.shape[1])
if self._n_classes == 2:
y = self._estimators[0].predict(X)
y = sigmoid(y)
y = np.c_[y, 1 - y]
else:
y = np.zeros((X.shape[0], self._n_classes))
for i, clf in enumerate(self._estimators):
class_proba = clf.predict(X)
y[:, i] = class_proba
if self.calc_prob == "sigmoid":
y = sigmoid(y)
normalizer = np.sum(y, axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
y /= normalizer
else:
y = softmax(y)
return y | [
"def",
"predict_proba",
"(",
"self",
",",
"X",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_fitted'",
")",
"or",
"not",
"self",
".",
"_fitted",
":",
"raise",
"NotFittedError",
"(",
"NOT_FITTED_ERROR_DESC",
")",
"X",
"=",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"True",
")",
"self",
".",
"_check_n_features",
"(",
"X",
".",
"shape",
"[",
"1",
"]",
")",
"if",
"self",
".",
"_n_classes",
"==",
"2",
":",
"y",
"=",
"self",
".",
"_estimators",
"[",
"0",
"]",
".",
"predict",
"(",
"X",
")",
"y",
"=",
"sigmoid",
"(",
"y",
")",
"y",
"=",
"np",
".",
"c_",
"[",
"y",
",",
"1",
"-",
"y",
"]",
"else",
":",
"y",
"=",
"np",
".",
"zeros",
"(",
"(",
"X",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"_n_classes",
")",
")",
"for",
"i",
",",
"clf",
"in",
"enumerate",
"(",
"self",
".",
"_estimators",
")",
":",
"class_proba",
"=",
"clf",
".",
"predict",
"(",
"X",
")",
"y",
"[",
":",
",",
"i",
"]",
"=",
"class_proba",
"if",
"self",
".",
"calc_prob",
"==",
"\"sigmoid\"",
":",
"y",
"=",
"sigmoid",
"(",
"y",
")",
"normalizer",
"=",
"np",
".",
"sum",
"(",
"y",
",",
"axis",
"=",
"1",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"normalizer",
"[",
"normalizer",
"==",
"0.0",
"]",
"=",
"1.0",
"y",
"/=",
"normalizer",
"else",
":",
"y",
"=",
"softmax",
"(",
"y",
")",
"return",
"y"
] | https://github.com/RGF-team/rgf/blob/272afb85b4c91571f576e5fc83ecfacce3672eb4/python-package/rgf/utils.py#L589-L628 |
|
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/VBox/Devices/EFI/Firmware/AppPkg/Applications/Python/PyMod-2.7.2/Lib/pydoc.py | python | HTMLDoc.docother | (self, object, name=None, mod=None, *ignored) | return lhs + self.repr(object) | Produce HTML documentation for a data object. | Produce HTML documentation for a data object. | [
"Produce",
"HTML",
"documentation",
"for",
"a",
"data",
"object",
"."
] | def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object) | [
"def",
"docother",
"(",
"self",
",",
"object",
",",
"name",
"=",
"None",
",",
"mod",
"=",
"None",
",",
"*",
"ignored",
")",
":",
"lhs",
"=",
"name",
"and",
"'<strong>%s</strong> = '",
"%",
"name",
"or",
"''",
"return",
"lhs",
"+",
"self",
".",
"repr",
"(",
"object",
")"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/VBox/Devices/EFI/Firmware/AppPkg/Applications/Python/PyMod-2.7.2/Lib/pydoc.py#L922-L925 |
|
linyouhappy/kongkongxiyou | 7a69b2913eb29f4be77f9a62fb90cdd72c4160f1 | cocosjs/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py | python | Type.element_type | (self) | return result | Retrieve the Type of elements within this Type.
If accessed on a type that is not an array, complex, or vector type, an
exception will be raised. | Retrieve the Type of elements within this Type. | [
"Retrieve",
"the",
"Type",
"of",
"elements",
"within",
"this",
"Type",
"."
] | def element_type(self):
"""Retrieve the Type of elements within this Type.
If accessed on a type that is not an array, complex, or vector type, an
exception will be raised.
"""
result = conf.lib.clang_getElementType(self)
if result.kind == TypeKind.INVALID:
raise Exception('Element type not available on this type.')
return result | [
"def",
"element_type",
"(",
"self",
")",
":",
"result",
"=",
"conf",
".",
"lib",
".",
"clang_getElementType",
"(",
"self",
")",
"if",
"result",
".",
"kind",
"==",
"TypeKind",
".",
"INVALID",
":",
"raise",
"Exception",
"(",
"'Element type not available on this type.'",
")",
"return",
"result"
] | https://github.com/linyouhappy/kongkongxiyou/blob/7a69b2913eb29f4be77f9a62fb90cdd72c4160f1/cocosjs/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L1680-L1690 |
|
AngoraFuzzer/Angora | 80e81c8590077bc0ac069dbd367da8ce405ff618 | llvm_mode/dfsan_rt/sanitizer_common/scripts/cpplint.py | python | FindNextMultiLineCommentStart | (lines, lineix) | return len(lines) | Find the beginning marker for a multiline comment. | Find the beginning marker for a multiline comment. | [
"Find",
"the",
"beginning",
"marker",
"for",
"a",
"multiline",
"comment",
"."
] | def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines) | [
"def",
"FindNextMultiLineCommentStart",
"(",
"lines",
",",
"lineix",
")",
":",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'/*'",
")",
":",
"# Only return this marker if the comment goes beyond this line",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"find",
"(",
"'*/'",
",",
"2",
")",
"<",
"0",
":",
"return",
"lineix",
"lineix",
"+=",
"1",
"return",
"len",
"(",
"lines",
")"
] | https://github.com/AngoraFuzzer/Angora/blob/80e81c8590077bc0ac069dbd367da8ce405ff618/llvm_mode/dfsan_rt/sanitizer_common/scripts/cpplint.py#L926-L934 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py | python | Misc.winfo_viewable | (self) | return self.tk.getint(
self.tk.call('winfo', 'viewable', self._w)) | Return true if the widget and all its higher ancestors are mapped. | Return true if the widget and all its higher ancestors are mapped. | [
"Return",
"true",
"if",
"the",
"widget",
"and",
"all",
"its",
"higher",
"ancestors",
"are",
"mapped",
"."
] | def winfo_viewable(self):
"""Return true if the widget and all its higher ancestors are mapped."""
return self.tk.getint(
self.tk.call('winfo', 'viewable', self._w)) | [
"def",
"winfo_viewable",
"(",
"self",
")",
":",
"return",
"self",
".",
"tk",
".",
"getint",
"(",
"self",
".",
"tk",
".",
"call",
"(",
"'winfo'",
",",
"'viewable'",
",",
"self",
".",
"_w",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py#L1111-L1114 |
|
Harick1/caffe-yolo | eea92bf3ddfe4d0ff6b0b3ba9b15c029a83ed9a3 | python/caffe/io.py | python | Transformer.set_mean | (self, in_, mean) | Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable) | Set the mean to subtract for centering the data. | [
"Set",
"the",
"mean",
"to",
"subtract",
"for",
"centering",
"the",
"data",
"."
] | def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean | [
"def",
"set_mean",
"(",
"self",
",",
"in_",
",",
"mean",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"ms",
"=",
"mean",
".",
"shape",
"if",
"mean",
".",
"ndim",
"==",
"1",
":",
"# broadcast channels",
"if",
"ms",
"[",
"0",
"]",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Mean channels incompatible with input.'",
")",
"mean",
"=",
"mean",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"# elementwise mean",
"if",
"len",
"(",
"ms",
")",
"==",
"2",
":",
"ms",
"=",
"(",
"1",
",",
")",
"+",
"ms",
"if",
"len",
"(",
"ms",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Mean shape invalid'",
")",
"if",
"ms",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
":",
"]",
":",
"raise",
"ValueError",
"(",
"'Mean shape incompatible with input shape.'",
")",
"self",
".",
"mean",
"[",
"in_",
"]",
"=",
"mean"
] | https://github.com/Harick1/caffe-yolo/blob/eea92bf3ddfe4d0ff6b0b3ba9b15c029a83ed9a3/python/caffe/io.py#L236-L260 |
||
raymondlu/super-animation-samples | 04234269112ff0dc32447f27a761dbbb00b8ba17 | samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py | python | CursorKind.is_invalid | (self) | return conf.lib.clang_isInvalid(self) | Test if this is an invalid kind. | Test if this is an invalid kind. | [
"Test",
"if",
"this",
"is",
"an",
"invalid",
"kind",
"."
] | def is_invalid(self):
"""Test if this is an invalid kind."""
return conf.lib.clang_isInvalid(self) | [
"def",
"is_invalid",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_isInvalid",
"(",
"self",
")"
] | https://github.com/raymondlu/super-animation-samples/blob/04234269112ff0dc32447f27a761dbbb00b8ba17/samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L652-L654 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Hash/SHA224.py | python | SHA224Hash.new | (self, data=None) | return SHA224Hash(data) | Create a fresh SHA-224 hash object. | Create a fresh SHA-224 hash object. | [
"Create",
"a",
"fresh",
"SHA",
"-",
"224",
"hash",
"object",
"."
] | def new(self, data=None):
"""Create a fresh SHA-224 hash object."""
return SHA224Hash(data) | [
"def",
"new",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"return",
"SHA224Hash",
"(",
"data",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/ResourceManager/lib/Crypto/Hash/SHA224.py#L143-L146 |
|
google/usd_from_gltf | 6d288cce8b68744494a226574ae1d7ba6a9c46eb | tools/ufginstall/ufginstall.py | python | JpgDep.install | (self) | Installs libjpeg-turbo dependency. | Installs libjpeg-turbo dependency. | [
"Installs",
"libjpeg",
"-",
"turbo",
"dependency",
"."
] | def install(self):
"""Installs libjpeg-turbo dependency."""
url = 'https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.2.zip'
extra_args = ['-DCMAKE_POSITION_INDEPENDENT_CODE=1']
path = os.path.join(cfg.src_dir, 'jpg.zip')
force = self.forced()
dl_dir = download_archive(url, path, force)
with cwd(dl_dir):
run_cmake(force, extra_args) | [
"def",
"install",
"(",
"self",
")",
":",
"url",
"=",
"'https://github.com/libjpeg-turbo/libjpeg-turbo/archive/2.0.2.zip'",
"extra_args",
"=",
"[",
"'-DCMAKE_POSITION_INDEPENDENT_CODE=1'",
"]",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"cfg",
".",
"src_dir",
",",
"'jpg.zip'",
")",
"force",
"=",
"self",
".",
"forced",
"(",
")",
"dl_dir",
"=",
"download_archive",
"(",
"url",
",",
"path",
",",
"force",
")",
"with",
"cwd",
"(",
"dl_dir",
")",
":",
"run_cmake",
"(",
"force",
",",
"extra_args",
")"
] | https://github.com/google/usd_from_gltf/blob/6d288cce8b68744494a226574ae1d7ba6a9c46eb/tools/ufginstall/ufginstall.py#L182-L190 |
||
microsoft/checkedc-clang | a173fefde5d7877b7750e7ce96dd08cf18baebf2 | clang/bindings/python/clang/cindex.py | python | Cursor.linkage | (self) | return LinkageKind.from_id(self._linkage) | Return the linkage of this cursor. | Return the linkage of this cursor. | [
"Return",
"the",
"linkage",
"of",
"this",
"cursor",
"."
] | def linkage(self):
"""Return the linkage of this cursor."""
if not hasattr(self, '_linkage'):
self._linkage = conf.lib.clang_getCursorLinkage(self)
return LinkageKind.from_id(self._linkage) | [
"def",
"linkage",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_linkage'",
")",
":",
"self",
".",
"_linkage",
"=",
"conf",
".",
"lib",
".",
"clang_getCursorLinkage",
"(",
"self",
")",
"return",
"LinkageKind",
".",
"from_id",
"(",
"self",
".",
"_linkage",
")"
] | https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/clang/bindings/python/clang/cindex.py#L1585-L1590 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/stc.py | python | StyledTextCtrl.DocLineFromVisible | (*args, **kwargs) | return _stc.StyledTextCtrl_DocLineFromVisible(*args, **kwargs) | DocLineFromVisible(self, int lineDisplay) -> int
Find the document line of a display line taking hidden lines into account. | DocLineFromVisible(self, int lineDisplay) -> int | [
"DocLineFromVisible",
"(",
"self",
"int",
"lineDisplay",
")",
"-",
">",
"int"
] | def DocLineFromVisible(*args, **kwargs):
"""
DocLineFromVisible(self, int lineDisplay) -> int
Find the document line of a display line taking hidden lines into account.
"""
return _stc.StyledTextCtrl_DocLineFromVisible(*args, **kwargs) | [
"def",
"DocLineFromVisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_DocLineFromVisible",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L3880-L3886 |
|
psi4/psi4 | be533f7f426b6ccc263904e55122899b16663395 | psi4/driver/procrouting/response/scf_products.py | python | TDRSCFEngine.reset_for_state_symm | (self, symmetry) | Reset internal quantities so the object is prepared to deal with transition to state with symmetry given | Reset internal quantities so the object is prepared to deal with transition to state with symmetry given | [
"Reset",
"internal",
"quantities",
"so",
"the",
"object",
"is",
"prepared",
"to",
"deal",
"with",
"transition",
"to",
"state",
"with",
"symmetry",
"given"
] | def reset_for_state_symm(self, symmetry):
"""Reset internal quantities so the object is prepared to deal with transition to state with symmetry given
"""
self.G_es = symmetry
self._build_prec()
self.product_cache.reset() | [
"def",
"reset_for_state_symm",
"(",
"self",
",",
"symmetry",
")",
":",
"self",
".",
"G_es",
"=",
"symmetry",
"self",
".",
"_build_prec",
"(",
")",
"self",
".",
"product_cache",
".",
"reset",
"(",
")"
] | https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/procrouting/response/scf_products.py#L232-L237 |
||
ceph/ceph | 959663007321a369c83218414a29bd9dbc8bda3a | qa/tasks/ceph_manager.py | python | OSDThrasher.thrash_pg_upmap_items | (self) | Install or remove random pg_upmap_items entries in OSDMap | Install or remove random pg_upmap_items entries in OSDMap | [
"Install",
"or",
"remove",
"random",
"pg_upmap_items",
"entries",
"in",
"OSDMap"
] | def thrash_pg_upmap_items(self):
"""
Install or remove random pg_upmap_items entries in OSDMap
"""
from random import shuffle
out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty')
j = json.loads(out)
self.log('j is %s' % j)
try:
if random.random() >= .3:
pgs = self.ceph_manager.get_pg_stats()
if not pgs:
return
pg = random.choice(pgs)
pgid = str(pg['pgid'])
poolid = int(pgid.split('.')[0])
sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid]
if len(sizes) == 0:
return
n = sizes[0]
osds = self.in_osds + self.out_osds
shuffle(osds)
osds = osds[0:n*2]
self.log('Setting %s to %s' % (pgid, osds))
cmd = ['osd', 'pg-upmap-items', pgid] + [str(x) for x in osds]
self.log('cmd %s' % cmd)
self.ceph_manager.raw_cluster_cmd(*cmd)
else:
m = j['pg_upmap_items']
if len(m) > 0:
shuffle(m)
pg = m[0]['pgid']
self.log('Clearing pg_upmap on %s' % pg)
self.ceph_manager.raw_cluster_cmd(
'osd',
'rm-pg-upmap-items',
pg)
else:
self.log('No pg_upmap entries; doing nothing')
except CommandFailedError:
self.log('Failed to rm-pg-upmap-items, ignoring') | [
"def",
"thrash_pg_upmap_items",
"(",
"self",
")",
":",
"from",
"random",
"import",
"shuffle",
"out",
"=",
"self",
".",
"ceph_manager",
".",
"raw_cluster_cmd",
"(",
"'osd'",
",",
"'dump'",
",",
"'-f'",
",",
"'json-pretty'",
")",
"j",
"=",
"json",
".",
"loads",
"(",
"out",
")",
"self",
".",
"log",
"(",
"'j is %s'",
"%",
"j",
")",
"try",
":",
"if",
"random",
".",
"random",
"(",
")",
">=",
".3",
":",
"pgs",
"=",
"self",
".",
"ceph_manager",
".",
"get_pg_stats",
"(",
")",
"if",
"not",
"pgs",
":",
"return",
"pg",
"=",
"random",
".",
"choice",
"(",
"pgs",
")",
"pgid",
"=",
"str",
"(",
"pg",
"[",
"'pgid'",
"]",
")",
"poolid",
"=",
"int",
"(",
"pgid",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"sizes",
"=",
"[",
"x",
"[",
"'size'",
"]",
"for",
"x",
"in",
"j",
"[",
"'pools'",
"]",
"if",
"x",
"[",
"'pool'",
"]",
"==",
"poolid",
"]",
"if",
"len",
"(",
"sizes",
")",
"==",
"0",
":",
"return",
"n",
"=",
"sizes",
"[",
"0",
"]",
"osds",
"=",
"self",
".",
"in_osds",
"+",
"self",
".",
"out_osds",
"shuffle",
"(",
"osds",
")",
"osds",
"=",
"osds",
"[",
"0",
":",
"n",
"*",
"2",
"]",
"self",
".",
"log",
"(",
"'Setting %s to %s'",
"%",
"(",
"pgid",
",",
"osds",
")",
")",
"cmd",
"=",
"[",
"'osd'",
",",
"'pg-upmap-items'",
",",
"pgid",
"]",
"+",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"osds",
"]",
"self",
".",
"log",
"(",
"'cmd %s'",
"%",
"cmd",
")",
"self",
".",
"ceph_manager",
".",
"raw_cluster_cmd",
"(",
"*",
"cmd",
")",
"else",
":",
"m",
"=",
"j",
"[",
"'pg_upmap_items'",
"]",
"if",
"len",
"(",
"m",
")",
">",
"0",
":",
"shuffle",
"(",
"m",
")",
"pg",
"=",
"m",
"[",
"0",
"]",
"[",
"'pgid'",
"]",
"self",
".",
"log",
"(",
"'Clearing pg_upmap on %s'",
"%",
"pg",
")",
"self",
".",
"ceph_manager",
".",
"raw_cluster_cmd",
"(",
"'osd'",
",",
"'rm-pg-upmap-items'",
",",
"pg",
")",
"else",
":",
"self",
".",
"log",
"(",
"'No pg_upmap entries; doing nothing'",
")",
"except",
"CommandFailedError",
":",
"self",
".",
"log",
"(",
"'Failed to rm-pg-upmap-items, ignoring'",
")"
] | https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/ceph_manager.py#L713-L753 |
||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/locators.py | python | Locator.locate | (self, requirement, prereleases=False) | return result | Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located. | Find the most recent distribution which matches the given
requirement. | [
"Find",
"the",
"most",
"recent",
"distribution",
"which",
"matches",
"the",
"given",
"requirement",
"."
] | def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
scheme = get_scheme(self.scheme)
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
if r.extras:
# lose the extras part of the requirement
requirement = r.requirement
matcher = scheme.matcher(requirement)
vcls = matcher.version_class
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(matcher.name)
if versions:
# sometimes, versions are invalid
slist = []
for k in versions:
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release version %s', k)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
result = versions[slist[-1]]
if result and r.extras:
result.extras = r.extras
return result | [
"def",
"locate",
"(",
"self",
",",
"requirement",
",",
"prereleases",
"=",
"False",
")",
":",
"result",
"=",
"None",
"scheme",
"=",
"get_scheme",
"(",
"self",
".",
"scheme",
")",
"r",
"=",
"parse_requirement",
"(",
"requirement",
")",
"if",
"r",
"is",
"None",
":",
"raise",
"DistlibException",
"(",
"'Not a valid requirement: %r'",
"%",
"requirement",
")",
"if",
"r",
".",
"extras",
":",
"# lose the extras part of the requirement",
"requirement",
"=",
"r",
".",
"requirement",
"matcher",
"=",
"scheme",
".",
"matcher",
"(",
"requirement",
")",
"vcls",
"=",
"matcher",
".",
"version_class",
"logger",
".",
"debug",
"(",
"'matcher: %s (%s)'",
",",
"matcher",
",",
"type",
"(",
"matcher",
")",
".",
"__name__",
")",
"versions",
"=",
"self",
".",
"get_project",
"(",
"matcher",
".",
"name",
")",
"if",
"versions",
":",
"# sometimes, versions are invalid",
"slist",
"=",
"[",
"]",
"for",
"k",
"in",
"versions",
":",
"try",
":",
"if",
"not",
"matcher",
".",
"match",
"(",
"k",
")",
":",
"logger",
".",
"debug",
"(",
"'%s did not match %r'",
",",
"matcher",
",",
"k",
")",
"else",
":",
"if",
"prereleases",
"or",
"not",
"vcls",
"(",
"k",
")",
".",
"is_prerelease",
":",
"slist",
".",
"append",
"(",
"k",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'skipping pre-release version %s'",
",",
"k",
")",
"except",
"Exception",
":",
"logger",
".",
"warning",
"(",
"'error matching %s with %r'",
",",
"matcher",
",",
"k",
")",
"pass",
"# slist.append(k)",
"if",
"len",
"(",
"slist",
")",
">",
"1",
":",
"slist",
"=",
"sorted",
"(",
"slist",
",",
"key",
"=",
"scheme",
".",
"key",
")",
"if",
"slist",
":",
"logger",
".",
"debug",
"(",
"'sorted list: %s'",
",",
"slist",
")",
"result",
"=",
"versions",
"[",
"slist",
"[",
"-",
"1",
"]",
"]",
"if",
"result",
"and",
"r",
".",
"extras",
":",
"result",
".",
"extras",
"=",
"r",
".",
"extras",
"return",
"result"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/locators.py#L289-L336 |
|
NERSC/timemory | 431912b360ff50d1a160d7826e2eea04fbd1037f | timemory/analyze/analyze.py | python | dump_flamegraph | (data, metric, file=None, echo_dart=False) | Dumps a flamegraph file | Dumps a flamegraph file | [
"Dumps",
"a",
"flamegraph",
"file"
] | def dump_flamegraph(data, metric, file=None, echo_dart=False):
"""Dumps a flamegraph file"""
from timemory.common import (
popen,
get_bin_script,
dart_measurement_file,
)
_files = dump_entity(
data,
lambda x: x.to_flamegraph(_get_metric(x, metric)),
file,
".flamegraph.txt",
)
for itr in _files:
flamegrapher = get_bin_script("flamegraph.pl")
if itr is not None:
if flamegrapher is None:
if echo_dart is True:
# write_ctest_notes(itr)
dart_measurement_file(
os.path.basename(itr), itr, format="string", type="text"
)
else:
(retc, outs, errs) = popen(
[
flamegrapher,
"--hash",
"--inverted",
"--bgcolors",
"'#FFFFFF'",
itr,
],
shell=True,
)
if outs is not None:
lbl = _get_label(itr)
sitr = _get_filename(itr, ".svg")
pitr = _get_filename(itr, ".png")
# write the SVG file
print(f"[{lbl}]|0> Outputting '{sitr}'...")
_create_directory(sitr)
with open(sitr, "w") as fout:
fout.write(f"{outs}\n")
# generate png
pfile = _svg_to_png(pitr, svg_code=outs)
# echo svg and png
if echo_dart:
# write_ctest_notes(sitr)
dart_measurement_file(
os.path.basename(itr),
itr,
format="string",
type="text",
)
dart_measurement_file(
os.path.basename(sitr), sitr, "svg"
)
if pfile is not None:
dart_measurement_file(
os.path.basename(pitr), pitr, "png"
)
else:
pass | [
"def",
"dump_flamegraph",
"(",
"data",
",",
"metric",
",",
"file",
"=",
"None",
",",
"echo_dart",
"=",
"False",
")",
":",
"from",
"timemory",
".",
"common",
"import",
"(",
"popen",
",",
"get_bin_script",
",",
"dart_measurement_file",
",",
")",
"_files",
"=",
"dump_entity",
"(",
"data",
",",
"lambda",
"x",
":",
"x",
".",
"to_flamegraph",
"(",
"_get_metric",
"(",
"x",
",",
"metric",
")",
")",
",",
"file",
",",
"\".flamegraph.txt\"",
",",
")",
"for",
"itr",
"in",
"_files",
":",
"flamegrapher",
"=",
"get_bin_script",
"(",
"\"flamegraph.pl\"",
")",
"if",
"itr",
"is",
"not",
"None",
":",
"if",
"flamegrapher",
"is",
"None",
":",
"if",
"echo_dart",
"is",
"True",
":",
"# write_ctest_notes(itr)",
"dart_measurement_file",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"itr",
")",
",",
"itr",
",",
"format",
"=",
"\"string\"",
",",
"type",
"=",
"\"text\"",
")",
"else",
":",
"(",
"retc",
",",
"outs",
",",
"errs",
")",
"=",
"popen",
"(",
"[",
"flamegrapher",
",",
"\"--hash\"",
",",
"\"--inverted\"",
",",
"\"--bgcolors\"",
",",
"\"'#FFFFFF'\"",
",",
"itr",
",",
"]",
",",
"shell",
"=",
"True",
",",
")",
"if",
"outs",
"is",
"not",
"None",
":",
"lbl",
"=",
"_get_label",
"(",
"itr",
")",
"sitr",
"=",
"_get_filename",
"(",
"itr",
",",
"\".svg\"",
")",
"pitr",
"=",
"_get_filename",
"(",
"itr",
",",
"\".png\"",
")",
"# write the SVG file",
"print",
"(",
"f\"[{lbl}]|0> Outputting '{sitr}'...\"",
")",
"_create_directory",
"(",
"sitr",
")",
"with",
"open",
"(",
"sitr",
",",
"\"w\"",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"f\"{outs}\\n\"",
")",
"# generate png",
"pfile",
"=",
"_svg_to_png",
"(",
"pitr",
",",
"svg_code",
"=",
"outs",
")",
"# echo svg and png",
"if",
"echo_dart",
":",
"# write_ctest_notes(sitr)",
"dart_measurement_file",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"itr",
")",
",",
"itr",
",",
"format",
"=",
"\"string\"",
",",
"type",
"=",
"\"text\"",
",",
")",
"dart_measurement_file",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"sitr",
")",
",",
"sitr",
",",
"\"svg\"",
")",
"if",
"pfile",
"is",
"not",
"None",
":",
"dart_measurement_file",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"pitr",
")",
",",
"pitr",
",",
"\"png\"",
")",
"else",
":",
"pass"
] | https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/timemory/analyze/analyze.py#L503-L570 |
||
microsoft/CNTK | e9396480025b9ca457d26b6f33dd07c474c6aa04 | bindings/python/cntk/ops/functions.py | python | Function.__str__ | (self) | return f_name + op_name + '(' + ", ".join([format_arg_spec(param) for param in args]) + ') -> ' + output_signature | Describes the Function and its signature as a string.
Example:
>>> f = C.log(C.input(1), name='f') # Function constructed as a graph
>>> print(f)
f: Log(Tensor[1]) -> Tensor[1]
>>> d = C.layers.Dense(10) # Function constructed as a layer
>>> print(d)
Dense(x: Sequence[tensor]) -> Sequence[tensor]
>>> @C.Function # construct a primitive Function through @Function
... def g(x,y):
... return x+y
>>> print(g)
Plus(x: Sequence[tensor], y: Sequence[tensor]) -> Sequence[tensor]
>>> @C.Function # construct a composite through @Function
... def h(x,y):
... return C.exp(x+y)
>>> print(h)
Composite(x: Sequence[tensor], y: Sequence[tensor]) -> Sequence[tensor] | Describes the Function and its signature as a string. | [
"Describes",
"the",
"Function",
"and",
"its",
"signature",
"as",
"a",
"string",
"."
] | def __str__(self):
'''
Describes the Function and its signature as a string.
Example:
>>> f = C.log(C.input(1), name='f') # Function constructed as a graph
>>> print(f)
f: Log(Tensor[1]) -> Tensor[1]
>>> d = C.layers.Dense(10) # Function constructed as a layer
>>> print(d)
Dense(x: Sequence[tensor]) -> Sequence[tensor]
>>> @C.Function # construct a primitive Function through @Function
... def g(x,y):
... return x+y
>>> print(g)
Plus(x: Sequence[tensor], y: Sequence[tensor]) -> Sequence[tensor]
>>> @C.Function # construct a composite through @Function
... def h(x,y):
... return C.exp(x+y)
>>> print(h)
Composite(x: Sequence[tensor], y: Sequence[tensor]) -> Sequence[tensor]
'''
f_name = self.name
op_name = self.op_name
if self.is_composite:
if self.root_function and all(i.uid == ri.uid for i, ri in zip(self.inputs, self.root_function.inputs)):
op_name = self.root_function.op_name
else:
op_name = 'Composite' # (real op_name is CompositeFunctionOpName)
else:
op_name = self.op_name
args = self.signature
def format_arg_spec(v, is_output=False):
s = v.name + ': ' if not is_output and v.name else '' # (suppress output names, since they duplicate the function name)
return s + str(v._type)
outputs = self.outputs
if len(outputs) > 1:
output_signature = 'Tuple[' + ', '.join(format_arg_spec(output, True) for output in outputs) + ']'
else:
output_signature = format_arg_spec(outputs[0], True)
if self.name:
f_name += ": "
return f_name + op_name + '(' + ", ".join([format_arg_spec(param) for param in args]) + ') -> ' + output_signature | [
"def",
"__str__",
"(",
"self",
")",
":",
"f_name",
"=",
"self",
".",
"name",
"op_name",
"=",
"self",
".",
"op_name",
"if",
"self",
".",
"is_composite",
":",
"if",
"self",
".",
"root_function",
"and",
"all",
"(",
"i",
".",
"uid",
"==",
"ri",
".",
"uid",
"for",
"i",
",",
"ri",
"in",
"zip",
"(",
"self",
".",
"inputs",
",",
"self",
".",
"root_function",
".",
"inputs",
")",
")",
":",
"op_name",
"=",
"self",
".",
"root_function",
".",
"op_name",
"else",
":",
"op_name",
"=",
"'Composite'",
"# (real op_name is CompositeFunctionOpName)",
"else",
":",
"op_name",
"=",
"self",
".",
"op_name",
"args",
"=",
"self",
".",
"signature",
"def",
"format_arg_spec",
"(",
"v",
",",
"is_output",
"=",
"False",
")",
":",
"s",
"=",
"v",
".",
"name",
"+",
"': '",
"if",
"not",
"is_output",
"and",
"v",
".",
"name",
"else",
"''",
"# (suppress output names, since they duplicate the function name)",
"return",
"s",
"+",
"str",
"(",
"v",
".",
"_type",
")",
"outputs",
"=",
"self",
".",
"outputs",
"if",
"len",
"(",
"outputs",
")",
">",
"1",
":",
"output_signature",
"=",
"'Tuple['",
"+",
"', '",
".",
"join",
"(",
"format_arg_spec",
"(",
"output",
",",
"True",
")",
"for",
"output",
"in",
"outputs",
")",
"+",
"']'",
"else",
":",
"output_signature",
"=",
"format_arg_spec",
"(",
"outputs",
"[",
"0",
"]",
",",
"True",
")",
"if",
"self",
".",
"name",
":",
"f_name",
"+=",
"\": \"",
"return",
"f_name",
"+",
"op_name",
"+",
"'('",
"+",
"\", \"",
".",
"join",
"(",
"[",
"format_arg_spec",
"(",
"param",
")",
"for",
"param",
"in",
"args",
"]",
")",
"+",
"') -> '",
"+",
"output_signature"
] | https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/ops/functions.py#L1148-L1191 |
|
arangodb/arangodb | 0d658689c7d1b721b314fa3ca27d38303e1570c8 | 3rdParty/V8/gyp/xcode_emulation.py | python | XcodeSettings._GetTargetPostbuilds | (self, configname, output, output_binary,
quiet=False) | return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet)) | Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds. | Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds. | [
"Returns",
"a",
"list",
"of",
"shell",
"commands",
"that",
"contain",
"the",
"shell",
"commands",
"to",
"run",
"as",
"postbuilds",
"for",
"this",
"target",
"before",
"the",
"actual",
"postbuilds",
"."
] | def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet)) | [
"def",
"_GetTargetPostbuilds",
"(",
"self",
",",
"configname",
",",
"output",
",",
"output_binary",
",",
"quiet",
"=",
"False",
")",
":",
"# dSYMs need to build before stripping happens.",
"return",
"(",
"self",
".",
"_GetDebugInfoPostbuilds",
"(",
"configname",
",",
"output",
",",
"output_binary",
",",
"quiet",
")",
"+",
"self",
".",
"_GetStripPostbuilds",
"(",
"configname",
",",
"output_binary",
",",
"quiet",
")",
")"
] | https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/gyp/xcode_emulation.py#L980-L987 |
|
hszhao/PSPNet | cf7e5a99ba37e46118026e96be5821a9bc63bde0 | python/caffe/pycaffe.py | python | _Net_forward_backward_all | (self, blobs=None, diffs=None, **kwargs) | return all_outs, all_diffs | Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict. | Run net forward + backward in batches. | [
"Run",
"net",
"forward",
"+",
"backward",
"in",
"batches",
"."
] | def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in batch_blobs.iteritems():
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in batch_diffs.iteritems():
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(all_outs.itervalues().next()) - len(kwargs.itervalues().next())
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs | [
"def",
"_Net_forward_backward_all",
"(",
"self",
",",
"blobs",
"=",
"None",
",",
"diffs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Batch blobs and diffs.",
"all_outs",
"=",
"{",
"out",
":",
"[",
"]",
"for",
"out",
"in",
"set",
"(",
"self",
".",
"outputs",
"+",
"(",
"blobs",
"or",
"[",
"]",
")",
")",
"}",
"all_diffs",
"=",
"{",
"diff",
":",
"[",
"]",
"for",
"diff",
"in",
"set",
"(",
"self",
".",
"inputs",
"+",
"(",
"diffs",
"or",
"[",
"]",
")",
")",
"}",
"forward_batches",
"=",
"self",
".",
"_batch",
"(",
"{",
"in_",
":",
"kwargs",
"[",
"in_",
"]",
"for",
"in_",
"in",
"self",
".",
"inputs",
"if",
"in_",
"in",
"kwargs",
"}",
")",
"backward_batches",
"=",
"self",
".",
"_batch",
"(",
"{",
"out",
":",
"kwargs",
"[",
"out",
"]",
"for",
"out",
"in",
"self",
".",
"outputs",
"if",
"out",
"in",
"kwargs",
"}",
")",
"# Collect outputs from batches (and heed lack of forward/backward batches).",
"for",
"fb",
",",
"bb",
"in",
"izip_longest",
"(",
"forward_batches",
",",
"backward_batches",
",",
"fillvalue",
"=",
"{",
"}",
")",
":",
"batch_blobs",
"=",
"self",
".",
"forward",
"(",
"blobs",
"=",
"blobs",
",",
"*",
"*",
"fb",
")",
"batch_diffs",
"=",
"self",
".",
"backward",
"(",
"diffs",
"=",
"diffs",
",",
"*",
"*",
"bb",
")",
"for",
"out",
",",
"out_blobs",
"in",
"batch_blobs",
".",
"iteritems",
"(",
")",
":",
"all_outs",
"[",
"out",
"]",
".",
"extend",
"(",
"out_blobs",
".",
"copy",
"(",
")",
")",
"for",
"diff",
",",
"out_diffs",
"in",
"batch_diffs",
".",
"iteritems",
"(",
")",
":",
"all_diffs",
"[",
"diff",
"]",
".",
"extend",
"(",
"out_diffs",
".",
"copy",
"(",
")",
")",
"# Package in ndarray.",
"for",
"out",
",",
"diff",
"in",
"zip",
"(",
"all_outs",
",",
"all_diffs",
")",
":",
"all_outs",
"[",
"out",
"]",
"=",
"np",
".",
"asarray",
"(",
"all_outs",
"[",
"out",
"]",
")",
"all_diffs",
"[",
"diff",
"]",
"=",
"np",
".",
"asarray",
"(",
"all_diffs",
"[",
"diff",
"]",
")",
"# Discard padding at the end and package in ndarray.",
"pad",
"=",
"len",
"(",
"all_outs",
".",
"itervalues",
"(",
")",
".",
"next",
"(",
")",
")",
"-",
"len",
"(",
"kwargs",
".",
"itervalues",
"(",
")",
".",
"next",
"(",
")",
")",
"if",
"pad",
":",
"for",
"out",
",",
"diff",
"in",
"zip",
"(",
"all_outs",
",",
"all_diffs",
")",
":",
"all_outs",
"[",
"out",
"]",
"=",
"all_outs",
"[",
"out",
"]",
"[",
":",
"-",
"pad",
"]",
"all_diffs",
"[",
"diff",
"]",
"=",
"all_diffs",
"[",
"diff",
"]",
"[",
":",
"-",
"pad",
"]",
"return",
"all_outs",
",",
"all_diffs"
] | https://github.com/hszhao/PSPNet/blob/cf7e5a99ba37e46118026e96be5821a9bc63bde0/python/caffe/pycaffe.py#L190-L232 |
|
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/linter/git_base.py | python | Repository.configure | (self, parameter, value) | return self._callgito("config", ["--local", parameter, value]) | Set a local configuration parameter. | Set a local configuration parameter. | [
"Set",
"a",
"local",
"configuration",
"parameter",
"."
] | def configure(self, parameter, value):
"""Set a local configuration parameter."""
return self._callgito("config", ["--local", parameter, value]) | [
"def",
"configure",
"(",
"self",
",",
"parameter",
",",
"value",
")",
":",
"return",
"self",
".",
"_callgito",
"(",
"\"config\"",
",",
"[",
"\"--local\"",
",",
"parameter",
",",
"value",
"]",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/linter/git_base.py#L100-L102 |
|
kamyu104/LeetCode-Solutions | 77605708a927ea3b85aee5a479db733938c7c211 | Python/split-a-string-in-balanced-strings.py | python | Solution.balancedStringSplit | (self, s) | return result | :type s: str
:rtype: int | :type s: str
:rtype: int | [
":",
"type",
"s",
":",
"str",
":",
"rtype",
":",
"int"
] | def balancedStringSplit(self, s):
"""
:type s: str
:rtype: int
"""
result, count = 0, 0
for c in s:
count += 1 if c == 'L' else -1
if count == 0:
result += 1
return result | [
"def",
"balancedStringSplit",
"(",
"self",
",",
"s",
")",
":",
"result",
",",
"count",
"=",
"0",
",",
"0",
"for",
"c",
"in",
"s",
":",
"count",
"+=",
"1",
"if",
"c",
"==",
"'L'",
"else",
"-",
"1",
"if",
"count",
"==",
"0",
":",
"result",
"+=",
"1",
"return",
"result"
] | https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/split-a-string-in-balanced-strings.py#L5-L15 |
|
alibaba/graph-learn | 54cafee9db3054dc310a28b856be7f97c7d5aee9 | graphlearn/python/data/values.py | python | Layers.set_layer_nodes | (self, layer_id, nodes) | Set `Nodes` of the given `Layer`. | Set `Nodes` of the given `Layer`. | [
"Set",
"Nodes",
"of",
"the",
"given",
"Layer",
"."
] | def set_layer_nodes(self, layer_id, nodes):
""" Set `Nodes` of the given `Layer`.
"""
layer_id -= 1
if isinstance(self.layers, list) and layer_id < len(self.layers):
if isinstance(self.layers[layer_id], Layer):
self.layers[layer_id].set_nodes(nodes)
else:
raise ValueError("layer {} is not a SingleLayer".format(layer_id))
else:
raise ValueError("layer id beyond the layers length.") | [
"def",
"set_layer_nodes",
"(",
"self",
",",
"layer_id",
",",
"nodes",
")",
":",
"layer_id",
"-=",
"1",
"if",
"isinstance",
"(",
"self",
".",
"layers",
",",
"list",
")",
"and",
"layer_id",
"<",
"len",
"(",
"self",
".",
"layers",
")",
":",
"if",
"isinstance",
"(",
"self",
".",
"layers",
"[",
"layer_id",
"]",
",",
"Layer",
")",
":",
"self",
".",
"layers",
"[",
"layer_id",
"]",
".",
"set_nodes",
"(",
"nodes",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"layer {} is not a SingleLayer\"",
".",
"format",
"(",
"layer_id",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"layer id beyond the layers length.\"",
")"
] | https://github.com/alibaba/graph-learn/blob/54cafee9db3054dc310a28b856be7f97c7d5aee9/graphlearn/python/data/values.py#L726-L736 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/docutils/io.py | python | StringInput.read | (self) | return self.decode(self.source) | Decode and return the source string. | Decode and return the source string. | [
"Decode",
"and",
"return",
"the",
"source",
"string",
"."
] | def read(self):
"""Decode and return the source string."""
return self.decode(self.source) | [
"def",
"read",
"(",
"self",
")",
":",
"return",
"self",
".",
"decode",
"(",
"self",
".",
"source",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/io.py#L433-L435 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/numpy/py3/numpy/polynomial/polynomial.py | python | polygrid2d | (x, y, c) | return pu._gridnd(polyval, c, x, y) | Evaluate a 2-D polynomial on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0 | Evaluate a 2-D polynomial on the Cartesian product of x and y. | [
"Evaluate",
"a",
"2",
"-",
"D",
"polynomial",
"on",
"the",
"Cartesian",
"product",
"of",
"x",
"and",
"y",
"."
] | def polygrid2d(x, y, c):
"""
Evaluate a 2-D polynomial on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
polyval, polyval2d, polyval3d, polygrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(polyval, c, x, y) | [
"def",
"polygrid2d",
"(",
"x",
",",
"y",
",",
"c",
")",
":",
"return",
"pu",
".",
"_gridnd",
"(",
"polyval",
",",
"c",
",",
"x",
",",
"y",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/polynomial/polynomial.py#L898-L948 |
|
NASA-SW-VnV/ikos | 71325dfb94737332542caa708d7537752021522d | analyzer/python/ikos/scan.py | python | run | (cmd) | return rc | Run the given command and return the exit code | Run the given command and return the exit code | [
"Run",
"the",
"given",
"command",
"and",
"return",
"the",
"exit",
"code"
] | def run(cmd):
''' Run the given command and return the exit code '''
log.debug('Running %s' % command_string(cmd))
try:
proc = subprocess.Popen(cmd)
rc = proc.wait()
except OSError as e:
printf('error: %s: %s\n', cmd[0], e.strerror, file=sys.stderr)
sys.exit(e.errno)
if rc != 0:
sys.exit(rc)
return rc | [
"def",
"run",
"(",
"cmd",
")",
":",
"log",
".",
"debug",
"(",
"'Running %s'",
"%",
"command_string",
"(",
"cmd",
")",
")",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
")",
"rc",
"=",
"proc",
".",
"wait",
"(",
")",
"except",
"OSError",
"as",
"e",
":",
"printf",
"(",
"'error: %s: %s\\n'",
",",
"cmd",
"[",
"0",
"]",
",",
"e",
".",
"strerror",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"e",
".",
"errno",
")",
"if",
"rc",
"!=",
"0",
":",
"sys",
".",
"exit",
"(",
"rc",
")",
"return",
"rc"
] | https://github.com/NASA-SW-VnV/ikos/blob/71325dfb94737332542caa708d7537752021522d/analyzer/python/ikos/scan.py#L419-L433 |
|
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | python/configobj/configobj.py | python | ConfigObj.reload | (self) | Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file. | Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file. | [
"Reload",
"a",
"ConfigObj",
"from",
"file",
".",
"This",
"method",
"raises",
"a",
"ReloadError",
"if",
"the",
"ConfigObj",
"doesn",
"t",
"have",
"a",
"filename",
"attribute",
"pointing",
"to",
"a",
"file",
"."
] | def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, basestring):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec) | [
"def",
"reload",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"filename",
",",
"basestring",
")",
":",
"raise",
"ReloadError",
"(",
")",
"filename",
"=",
"self",
".",
"filename",
"current_options",
"=",
"{",
"}",
"for",
"entry",
"in",
"OPTION_DEFAULTS",
":",
"if",
"entry",
"==",
"'configspec'",
":",
"continue",
"current_options",
"[",
"entry",
"]",
"=",
"getattr",
"(",
"self",
",",
"entry",
")",
"configspec",
"=",
"self",
".",
"_original_configspec",
"current_options",
"[",
"'configspec'",
"]",
"=",
"configspec",
"self",
".",
"clear",
"(",
")",
"self",
".",
"_initialise",
"(",
"current_options",
")",
"self",
".",
"_load",
"(",
"filename",
",",
"configspec",
")"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/configobj/configobj.py#L2334-L2356 |
||
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/decimal.py | python | Decimal.fma | (self, other, third, context=None) | return product.__add__(third, context) | Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed. | Fused multiply-add. | [
"Fused",
"multiply",
"-",
"add",
"."
] | def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
third = _convert_other(third, raiseit=True)
return product.__add__(third, context) | [
"def",
"fma",
"(",
"self",
",",
"other",
",",
"third",
",",
"context",
"=",
"None",
")",
":",
"other",
"=",
"_convert_other",
"(",
"other",
",",
"raiseit",
"=",
"True",
")",
"# compute product; raise InvalidOperation if either operand is",
"# a signaling NaN or if the product is zero times infinity.",
"if",
"self",
".",
"_is_special",
"or",
"other",
".",
"_is_special",
":",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"getcontext",
"(",
")",
"if",
"self",
".",
"_exp",
"==",
"'N'",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'sNaN'",
",",
"self",
")",
"if",
"other",
".",
"_exp",
"==",
"'N'",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'sNaN'",
",",
"other",
")",
"if",
"self",
".",
"_exp",
"==",
"'n'",
":",
"product",
"=",
"self",
"elif",
"other",
".",
"_exp",
"==",
"'n'",
":",
"product",
"=",
"other",
"elif",
"self",
".",
"_exp",
"==",
"'F'",
":",
"if",
"not",
"other",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'INF * 0 in fma'",
")",
"product",
"=",
"_SignedInfinity",
"[",
"self",
".",
"_sign",
"^",
"other",
".",
"_sign",
"]",
"elif",
"other",
".",
"_exp",
"==",
"'F'",
":",
"if",
"not",
"self",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'0 * INF in fma'",
")",
"product",
"=",
"_SignedInfinity",
"[",
"self",
".",
"_sign",
"^",
"other",
".",
"_sign",
"]",
"else",
":",
"product",
"=",
"_dec_from_triple",
"(",
"self",
".",
"_sign",
"^",
"other",
".",
"_sign",
",",
"str",
"(",
"int",
"(",
"self",
".",
"_int",
")",
"*",
"int",
"(",
"other",
".",
"_int",
")",
")",
",",
"self",
".",
"_exp",
"+",
"other",
".",
"_exp",
")",
"third",
"=",
"_convert_other",
"(",
"third",
",",
"raiseit",
"=",
"True",
")",
"return",
"product",
".",
"__add__",
"(",
"third",
",",
"context",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/decimal.py#L1809-L1851 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/distribute/distribute_coordinator.py | python | _WorkerContext._is_chief | (self) | return False | Return whether the task is the chief worker. | Return whether the task is the chief worker. | [
"Return",
"whether",
"the",
"task",
"is",
"the",
"chief",
"worker",
"."
] | def _is_chief(self):
"""Return whether the task is the chief worker."""
if (not self._cluster_spec or
self._task_type in [_TaskType.CHIEF, _TaskType.EVALUATOR, None]):
return True
# If not local and chief not in the cluster_spec, use the first worker as
# chief.
if (_TaskType.CHIEF not in self._cluster_spec.jobs and
self._task_type == _TaskType.WORKER and self._task_id == 0):
return True
return False | [
"def",
"_is_chief",
"(",
"self",
")",
":",
"if",
"(",
"not",
"self",
".",
"_cluster_spec",
"or",
"self",
".",
"_task_type",
"in",
"[",
"_TaskType",
".",
"CHIEF",
",",
"_TaskType",
".",
"EVALUATOR",
",",
"None",
"]",
")",
":",
"return",
"True",
"# If not local and chief not in the cluster_spec, use the first worker as",
"# chief.",
"if",
"(",
"_TaskType",
".",
"CHIEF",
"not",
"in",
"self",
".",
"_cluster_spec",
".",
"jobs",
"and",
"self",
".",
"_task_type",
"==",
"_TaskType",
".",
"WORKER",
"and",
"self",
".",
"_task_id",
"==",
"0",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/distribute/distribute_coordinator.py#L194-L205 |
|
facebook/fboss | 60063db1df37c2ec0e7dcd0955c54885ea9bf7f0 | fboss/py/fboss/cli/cli.py | python | RouteCli._ip | (cli_opts, ip, vrf) | Show the route to a specific IP | Show the route to a specific IP | [
"Show",
"the",
"route",
"to",
"a",
"specific",
"IP"
] | def _ip(cli_opts, ip, vrf):
"""Show the route to a specific IP"""
route.RouteIpCmd(cli_opts).run(ip, vrf) | [
"def",
"_ip",
"(",
"cli_opts",
",",
"ip",
",",
"vrf",
")",
":",
"route",
".",
"RouteIpCmd",
"(",
"cli_opts",
")",
".",
"run",
"(",
"ip",
",",
"vrf",
")"
] | https://github.com/facebook/fboss/blob/60063db1df37c2ec0e7dcd0955c54885ea9bf7f0/fboss/py/fboss/cli/cli.py#L561-L563 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/misc_util.py | python | generate_config_py | (target) | return target | Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py)) | Generate config.py file containing system_info information
used during building the package. | [
"Generate",
"config",
".",
"py",
"file",
"containing",
"system_info",
"information",
"used",
"during",
"building",
"the",
"package",
"."
] | def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
with open(target, 'w') as f:
f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write(textwrap.dedent("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
if sys.version_info >= (3, 8):
os.add_dll_directory(extra_dll_dir)
else:
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + extra_dll_dir
"""))
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(textwrap.dedent(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
'''))
return target | [
"def",
"generate_config_py",
"(",
"target",
")",
":",
"from",
"numpy",
".",
"distutils",
".",
"system_info",
"import",
"system_info",
"from",
"distutils",
".",
"dir_util",
"import",
"mkpath",
"mkpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"target",
")",
")",
"with",
"open",
"(",
"target",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'# This file is generated by numpy\\'s %s\\n'",
"%",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
")",
"f",
".",
"write",
"(",
"'# It contains system_info results at the time of building this package.\\n'",
")",
"f",
".",
"write",
"(",
"'__all__ = [\"get_info\",\"show\"]\\n\\n'",
")",
"# For gfortran+msvc combination, extra shared libraries may exist",
"f",
".",
"write",
"(",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\n import os\n import sys\n\n extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')\n\n if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):\n if sys.version_info >= (3, 8):\n os.add_dll_directory(extra_dll_dir)\n else:\n os.environ.setdefault('PATH', '')\n os.environ['PATH'] += os.pathsep + extra_dll_dir\n\n \"\"\"",
")",
")",
"for",
"k",
",",
"i",
"in",
"system_info",
".",
"saved_results",
".",
"items",
"(",
")",
":",
"f",
".",
"write",
"(",
"'%s=%r\\n'",
"%",
"(",
"k",
",",
"i",
")",
")",
"f",
".",
"write",
"(",
"textwrap",
".",
"dedent",
"(",
"r'''\n def get_info(name):\n g = globals()\n return g.get(name, g.get(name + \"_info\", {}))\n\n def show():\n for name,info_dict in globals().items():\n if name[0] == \"_\" or type(info_dict) is not type({}): continue\n print(name + \":\")\n if not info_dict:\n print(\" NOT AVAILABLE\")\n for k,v in info_dict.items():\n v = str(v)\n if k == \"sources\" and len(v) > 200:\n v = v[:60] + \" ...\\n... \" + v[-60:]\n print(\" %s = %s\" % (k,v))\n '''",
")",
")",
"return",
"target"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/misc_util.py#L2308-L2359 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/WebOb/webob/dec.py | python | wsgify.__call__ | (self, req, *args, **kw) | Call this as a WSGI application or with a request | Call this as a WSGI application or with a request | [
"Call",
"this",
"as",
"a",
"WSGI",
"application",
"or",
"with",
"a",
"request"
] | def __call__(self, req, *args, **kw):
"""Call this as a WSGI application or with a request"""
func = self.func
if func is None:
if args or kw:
raise TypeError(
"Unbound %s can only be called with the function it "
"will wrap" % self.__class__.__name__)
func = req
return self.clone(func)
if isinstance(req, dict):
if len(args) != 1 or kw:
raise TypeError(
"Calling %r as a WSGI app with the wrong signature")
environ = req
start_response = args[0]
req = self.RequestClass(environ)
req.response = req.ResponseClass()
try:
args = self.args
if self.middleware_wraps:
args = (self.middleware_wraps,) + args
resp = self.call_func(req, *args, **self.kwargs)
except HTTPException as exc:
resp = exc
if resp is None:
## FIXME: I'm not sure what this should be?
resp = req.response
if isinstance(resp, text_type):
resp = bytes_(resp, req.charset)
if isinstance(resp, bytes):
body = resp
resp = req.response
resp.write(body)
if resp is not req.response:
resp = req.response.merge_cookies(resp)
return resp(environ, start_response)
else:
if self.middleware_wraps:
args = (self.middleware_wraps,) + args
return self.func(req, *args, **kw) | [
"def",
"__call__",
"(",
"self",
",",
"req",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"func",
"=",
"self",
".",
"func",
"if",
"func",
"is",
"None",
":",
"if",
"args",
"or",
"kw",
":",
"raise",
"TypeError",
"(",
"\"Unbound %s can only be called with the function it \"",
"\"will wrap\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")",
"func",
"=",
"req",
"return",
"self",
".",
"clone",
"(",
"func",
")",
"if",
"isinstance",
"(",
"req",
",",
"dict",
")",
":",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
"or",
"kw",
":",
"raise",
"TypeError",
"(",
"\"Calling %r as a WSGI app with the wrong signature\"",
")",
"environ",
"=",
"req",
"start_response",
"=",
"args",
"[",
"0",
"]",
"req",
"=",
"self",
".",
"RequestClass",
"(",
"environ",
")",
"req",
".",
"response",
"=",
"req",
".",
"ResponseClass",
"(",
")",
"try",
":",
"args",
"=",
"self",
".",
"args",
"if",
"self",
".",
"middleware_wraps",
":",
"args",
"=",
"(",
"self",
".",
"middleware_wraps",
",",
")",
"+",
"args",
"resp",
"=",
"self",
".",
"call_func",
"(",
"req",
",",
"*",
"args",
",",
"*",
"*",
"self",
".",
"kwargs",
")",
"except",
"HTTPException",
"as",
"exc",
":",
"resp",
"=",
"exc",
"if",
"resp",
"is",
"None",
":",
"## FIXME: I'm not sure what this should be?",
"resp",
"=",
"req",
".",
"response",
"if",
"isinstance",
"(",
"resp",
",",
"text_type",
")",
":",
"resp",
"=",
"bytes_",
"(",
"resp",
",",
"req",
".",
"charset",
")",
"if",
"isinstance",
"(",
"resp",
",",
"bytes",
")",
":",
"body",
"=",
"resp",
"resp",
"=",
"req",
".",
"response",
"resp",
".",
"write",
"(",
"body",
")",
"if",
"resp",
"is",
"not",
"req",
".",
"response",
":",
"resp",
"=",
"req",
".",
"response",
".",
"merge_cookies",
"(",
"resp",
")",
"return",
"resp",
"(",
"environ",
",",
"start_response",
")",
"else",
":",
"if",
"self",
".",
"middleware_wraps",
":",
"args",
"=",
"(",
"self",
".",
"middleware_wraps",
",",
")",
"+",
"args",
"return",
"self",
".",
"func",
"(",
"req",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/WebOb/webob/dec.py#L108-L148 |
||
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | pygimli/physics/traveltime/plotting.py | python | drawFirstPicks | (ax, data, tt=None, plotva=False, **kwargs) | Plot first arrivals as lines. | Plot first arrivals as lines. | [
"Plot",
"first",
"arrivals",
"as",
"lines",
"."
] | def drawFirstPicks(ax, data, tt=None, plotva=False, **kwargs):
"""Plot first arrivals as lines."""
px = pg.x(data)
gx = np.array([px[int(g)] for g in data("g")])
sx = np.array([px[int(s)] for s in data("s")])
if tt is None:
tt = np.array(data("t"))
if plotva:
tt = np.absolute(gx - sx) / tt
uns = np.unique(sx)
cols = plt.cm.tab10(np.arange(10))
kwargs.setdefault('marker', 'x')
kwargs.setdefault('markersize', 8)
kwargs.setdefault('linestyle', '-')
for i, si in enumerate(uns):
ti = tt[sx == si]
gi = gx[sx == si]
ii = gi.argsort()
ax.plot(gi[ii], ti[ii], color=cols[i % 10], **kwargs)
ax.plot(si, 0., 's', color=cols[i % 10])
ax.grid(True)
if plotva:
ax.set_ylabel("Apparent velocity (m/s)")
else:
ax.set_ylabel("Traveltime (s)")
ax.set_xlabel("x (m)")
ax.invert_yaxis() | [
"def",
"drawFirstPicks",
"(",
"ax",
",",
"data",
",",
"tt",
"=",
"None",
",",
"plotva",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"px",
"=",
"pg",
".",
"x",
"(",
"data",
")",
"gx",
"=",
"np",
".",
"array",
"(",
"[",
"px",
"[",
"int",
"(",
"g",
")",
"]",
"for",
"g",
"in",
"data",
"(",
"\"g\"",
")",
"]",
")",
"sx",
"=",
"np",
".",
"array",
"(",
"[",
"px",
"[",
"int",
"(",
"s",
")",
"]",
"for",
"s",
"in",
"data",
"(",
"\"s\"",
")",
"]",
")",
"if",
"tt",
"is",
"None",
":",
"tt",
"=",
"np",
".",
"array",
"(",
"data",
"(",
"\"t\"",
")",
")",
"if",
"plotva",
":",
"tt",
"=",
"np",
".",
"absolute",
"(",
"gx",
"-",
"sx",
")",
"/",
"tt",
"uns",
"=",
"np",
".",
"unique",
"(",
"sx",
")",
"cols",
"=",
"plt",
".",
"cm",
".",
"tab10",
"(",
"np",
".",
"arange",
"(",
"10",
")",
")",
"kwargs",
".",
"setdefault",
"(",
"'marker'",
",",
"'x'",
")",
"kwargs",
".",
"setdefault",
"(",
"'markersize'",
",",
"8",
")",
"kwargs",
".",
"setdefault",
"(",
"'linestyle'",
",",
"'-'",
")",
"for",
"i",
",",
"si",
"in",
"enumerate",
"(",
"uns",
")",
":",
"ti",
"=",
"tt",
"[",
"sx",
"==",
"si",
"]",
"gi",
"=",
"gx",
"[",
"sx",
"==",
"si",
"]",
"ii",
"=",
"gi",
".",
"argsort",
"(",
")",
"ax",
".",
"plot",
"(",
"gi",
"[",
"ii",
"]",
",",
"ti",
"[",
"ii",
"]",
",",
"color",
"=",
"cols",
"[",
"i",
"%",
"10",
"]",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"plot",
"(",
"si",
",",
"0.",
",",
"'s'",
",",
"color",
"=",
"cols",
"[",
"i",
"%",
"10",
"]",
")",
"ax",
".",
"grid",
"(",
"True",
")",
"if",
"plotva",
":",
"ax",
".",
"set_ylabel",
"(",
"\"Apparent velocity (m/s)\"",
")",
"else",
":",
"ax",
".",
"set_ylabel",
"(",
"\"Traveltime (s)\"",
")",
"ax",
".",
"set_xlabel",
"(",
"\"x (m)\"",
")",
"ax",
".",
"invert_yaxis",
"(",
")"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/physics/traveltime/plotting.py#L72-L102 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | tools/DOI/doi.py | python | _http_request | (body, method, url, options, content_type=None) | return result | Issue an HTTP request with the given options.
We are forced to use a command line tool for this rather than use the
in-built Python libraries since httplib, urllib and urllib2 all seem to
have problems using HTTPS through the proxy at RAL. HTTP works fine,
but the DOI API is encrypted so that is not an option.
We prefer cURL to wget since it exists on many Linux machines and even
comes bundled with Git Bash for Windows! Some good info on scripting
with cURL can be found at:
http://curl.haxx.se/docs/httpscripting.html | Issue an HTTP request with the given options. | [
"Issue",
"an",
"HTTP",
"request",
"with",
"the",
"given",
"options",
"."
] | def _http_request(body, method, url, options, content_type=None):
'''Issue an HTTP request with the given options.
We are forced to use a command line tool for this rather than use the
in-built Python libraries since httplib, urllib and urllib2 all seem to
have problems using HTTPS through the proxy at RAL. HTTP works fine,
but the DOI API is encrypted so that is not an option.
We prefer cURL to wget since it exists on many Linux machines and even
comes bundled with Git Bash for Windows! Some good info on scripting
with cURL can be found at:
http://curl.haxx.se/docs/httpscripting.html'''
args = [
'curl',
'--user', options.username + ':' + options.password,
# The bodies of HTTP messages must be encoded:
'--data', body,
'--request', method,
]
if content_type is not None:
args.extend(['--header', content_type, ])
if 'http_proxy' in os.environ:
args.extend(['--proxy', os.environ['http_proxy']])
# Set how loud cURL should be while running.
if options.debug:
args.append('--verbose')
else:
args.append('--silent')
args.append(url)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
result = proc.stdout.readlines()
result = [x.decode() for x in result]
print("Server Response: " + str(result))
return result | [
"def",
"_http_request",
"(",
"body",
",",
"method",
",",
"url",
",",
"options",
",",
"content_type",
"=",
"None",
")",
":",
"args",
"=",
"[",
"'curl'",
",",
"'--user'",
",",
"options",
".",
"username",
"+",
"':'",
"+",
"options",
".",
"password",
",",
"# The bodies of HTTP messages must be encoded:",
"'--data'",
",",
"body",
",",
"'--request'",
",",
"method",
",",
"]",
"if",
"content_type",
"is",
"not",
"None",
":",
"args",
".",
"extend",
"(",
"[",
"'--header'",
",",
"content_type",
",",
"]",
")",
"if",
"'http_proxy'",
"in",
"os",
".",
"environ",
":",
"args",
".",
"extend",
"(",
"[",
"'--proxy'",
",",
"os",
".",
"environ",
"[",
"'http_proxy'",
"]",
"]",
")",
"# Set how loud cURL should be while running.",
"if",
"options",
".",
"debug",
":",
"args",
".",
"append",
"(",
"'--verbose'",
")",
"else",
":",
"args",
".",
"append",
"(",
"'--silent'",
")",
"args",
".",
"append",
"(",
"url",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"result",
"=",
"proc",
".",
"stdout",
".",
"readlines",
"(",
")",
"result",
"=",
"[",
"x",
".",
"decode",
"(",
")",
"for",
"x",
"in",
"result",
"]",
"print",
"(",
"\"Server Response: \"",
"+",
"str",
"(",
"result",
")",
")",
"return",
"result"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/tools/DOI/doi.py#L201-L241 |
|
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | scripts/Python/static-binding/lldb.py | python | SBData.GetDouble | (self, error, offset) | return _lldb.SBData_GetDouble(self, error, offset) | GetDouble(SBData self, SBError error, lldb::offset_t offset) -> double | GetDouble(SBData self, SBError error, lldb::offset_t offset) -> double | [
"GetDouble",
"(",
"SBData",
"self",
"SBError",
"error",
"lldb",
"::",
"offset_t",
"offset",
")",
"-",
">",
"double"
] | def GetDouble(self, error, offset):
"""GetDouble(SBData self, SBError error, lldb::offset_t offset) -> double"""
return _lldb.SBData_GetDouble(self, error, offset) | [
"def",
"GetDouble",
"(",
"self",
",",
"error",
",",
"offset",
")",
":",
"return",
"_lldb",
".",
"SBData_GetDouble",
"(",
"self",
",",
"error",
",",
"offset",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L3347-L3349 |
|
fatih/subvim | 241b6d170597857105da219c9b7d36059e9f11fb | vim/base/YouCompleteMe/third_party/jedi/jedi/evaluate.py | python | find_assignments | (lhs, results, seek_name) | Check if `seek_name` is in the left hand side `lhs` of assignment.
`lhs` can simply be a variable (`pr.Call`) or a tuple/list (`pr.Array`)
representing the following cases::
a = 1 # lhs is pr.Call
(a, b) = 2 # lhs is pr.Array
:type lhs: pr.Call
:type results: list
:type seek_name: str | Check if `seek_name` is in the left hand side `lhs` of assignment. | [
"Check",
"if",
"seek_name",
"is",
"in",
"the",
"left",
"hand",
"side",
"lhs",
"of",
"assignment",
"."
] | def find_assignments(lhs, results, seek_name):
"""
Check if `seek_name` is in the left hand side `lhs` of assignment.
`lhs` can simply be a variable (`pr.Call`) or a tuple/list (`pr.Array`)
representing the following cases::
a = 1 # lhs is pr.Call
(a, b) = 2 # lhs is pr.Array
:type lhs: pr.Call
:type results: list
:type seek_name: str
"""
if isinstance(lhs, pr.Array):
return assign_tuples(lhs, results, seek_name)
elif lhs.name.names[-1] == seek_name:
return results
else:
return [] | [
"def",
"find_assignments",
"(",
"lhs",
",",
"results",
",",
"seek_name",
")",
":",
"if",
"isinstance",
"(",
"lhs",
",",
"pr",
".",
"Array",
")",
":",
"return",
"assign_tuples",
"(",
"lhs",
",",
"results",
",",
"seek_name",
")",
"elif",
"lhs",
".",
"name",
".",
"names",
"[",
"-",
"1",
"]",
"==",
"seek_name",
":",
"return",
"results",
"else",
":",
"return",
"[",
"]"
] | https://github.com/fatih/subvim/blob/241b6d170597857105da219c9b7d36059e9f11fb/vim/base/YouCompleteMe/third_party/jedi/jedi/evaluate.py#L573-L592 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/shape_base.py | python | kron | (a, b) | return result | Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, ..., 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, ..., 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True | Kronecker product of two arrays. | [
"Kronecker",
"product",
"of",
"two",
"arrays",
"."
] | def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, ..., 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, ..., 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result | [
"def",
"kron",
"(",
"a",
",",
"b",
")",
":",
"b",
"=",
"asanyarray",
"(",
"b",
")",
"a",
"=",
"array",
"(",
"a",
",",
"copy",
"=",
"False",
",",
"subok",
"=",
"True",
",",
"ndmin",
"=",
"b",
".",
"ndim",
")",
"ndb",
",",
"nda",
"=",
"b",
".",
"ndim",
",",
"a",
".",
"ndim",
"if",
"(",
"nda",
"==",
"0",
"or",
"ndb",
"==",
"0",
")",
":",
"return",
"_nx",
".",
"multiply",
"(",
"a",
",",
"b",
")",
"as_",
"=",
"a",
".",
"shape",
"bs",
"=",
"b",
".",
"shape",
"if",
"not",
"a",
".",
"flags",
".",
"contiguous",
":",
"a",
"=",
"reshape",
"(",
"a",
",",
"as_",
")",
"if",
"not",
"b",
".",
"flags",
".",
"contiguous",
":",
"b",
"=",
"reshape",
"(",
"b",
",",
"bs",
")",
"nd",
"=",
"ndb",
"if",
"(",
"ndb",
"!=",
"nda",
")",
":",
"if",
"(",
"ndb",
">",
"nda",
")",
":",
"as_",
"=",
"(",
"1",
",",
")",
"*",
"(",
"ndb",
"-",
"nda",
")",
"+",
"as_",
"else",
":",
"bs",
"=",
"(",
"1",
",",
")",
"*",
"(",
"nda",
"-",
"ndb",
")",
"+",
"bs",
"nd",
"=",
"nda",
"result",
"=",
"outer",
"(",
"a",
",",
"b",
")",
".",
"reshape",
"(",
"as_",
"+",
"bs",
")",
"axis",
"=",
"nd",
"-",
"1",
"for",
"_",
"in",
"range",
"(",
"nd",
")",
":",
"result",
"=",
"concatenate",
"(",
"result",
",",
"axis",
"=",
"axis",
")",
"wrapper",
"=",
"get_array_prepare",
"(",
"a",
",",
"b",
")",
"if",
"wrapper",
"is",
"not",
"None",
":",
"result",
"=",
"wrapper",
"(",
"result",
")",
"wrapper",
"=",
"get_array_wrap",
"(",
"a",
",",
"b",
")",
"if",
"wrapper",
"is",
"not",
"None",
":",
"result",
"=",
"wrapper",
"(",
"result",
")",
"return",
"result"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/shape_base.py#L1066-L1162 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/tools/api/generator/create_python_api.py | python | get_module | (dir_path, relative_to_dir) | return dir_path.replace('/', '.').strip('.') | Get module that corresponds to path relative to relative_to_dir.
Args:
dir_path: Path to directory.
relative_to_dir: Get module relative to this directory.
Returns:
Name of module that corresponds to the given directory. | Get module that corresponds to path relative to relative_to_dir. | [
"Get",
"module",
"that",
"corresponds",
"to",
"path",
"relative",
"to",
"relative_to_dir",
"."
] | def get_module(dir_path, relative_to_dir):
"""Get module that corresponds to path relative to relative_to_dir.
Args:
dir_path: Path to directory.
relative_to_dir: Get module relative to this directory.
Returns:
Name of module that corresponds to the given directory.
"""
dir_path = dir_path[len(relative_to_dir):]
# Convert path separators to '/' for easier parsing below.
dir_path = dir_path.replace(os.sep, '/')
return dir_path.replace('/', '.').strip('.') | [
"def",
"get_module",
"(",
"dir_path",
",",
"relative_to_dir",
")",
":",
"dir_path",
"=",
"dir_path",
"[",
"len",
"(",
"relative_to_dir",
")",
":",
"]",
"# Convert path separators to '/' for easier parsing below.",
"dir_path",
"=",
"dir_path",
".",
"replace",
"(",
"os",
".",
"sep",
",",
"'/'",
")",
"return",
"dir_path",
".",
"replace",
"(",
"'/'",
",",
"'.'",
")",
".",
"strip",
"(",
"'.'",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/tools/api/generator/create_python_api.py#L523-L536 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/estimator/estimator.py | python | Estimator.__init__ | (self, model_fn, model_dir=None, config=None, params=None) | Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`. | Constructs an `Estimator` instance. | [
"Constructs",
"an",
"Estimator",
"instance",
"."
] | def __init__(self, model_fn, model_dir=None, config=None, params=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(alanyee): remove this suppression after it is no longer needed
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
self._device_fn = _get_replica_device_setter(self._config)
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {}) | [
"def",
"__init__",
"(",
"self",
",",
"model_fn",
",",
"model_dir",
"=",
"None",
",",
"config",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"Estimator",
".",
"_assert_members_are_not_overridden",
"(",
"self",
")",
"if",
"config",
"is",
"None",
":",
"self",
".",
"_config",
"=",
"run_config",
".",
"RunConfig",
"(",
")",
"logging",
".",
"info",
"(",
"'Using default config.'",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"config",
",",
"run_config",
".",
"RunConfig",
")",
":",
"raise",
"ValueError",
"(",
"'config must be an instance of RunConfig, but provided %s.'",
"%",
"config",
")",
"self",
".",
"_config",
"=",
"config",
"# Model directory.",
"if",
"(",
"model_dir",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"_config",
".",
"model_dir",
"is",
"not",
"None",
")",
":",
"if",
"model_dir",
"!=",
"self",
".",
"_config",
".",
"model_dir",
":",
"# TODO(alanyee): remove this suppression after it is no longer needed",
"# pylint: disable=g-doc-exception",
"raise",
"ValueError",
"(",
"\"model_dir are set both in constructor and RunConfig, but with \"",
"\"different values. In constructor: '{}', in RunConfig: \"",
"\"'{}' \"",
".",
"format",
"(",
"model_dir",
",",
"self",
".",
"_config",
".",
"model_dir",
")",
")",
"# pylint: enable=g-doc-exception",
"self",
".",
"_model_dir",
"=",
"model_dir",
"or",
"self",
".",
"_config",
".",
"model_dir",
"if",
"self",
".",
"_model_dir",
"is",
"None",
":",
"self",
".",
"_model_dir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"logging",
".",
"warning",
"(",
"'Using temporary folder as model directory: %s'",
",",
"self",
".",
"_model_dir",
")",
"if",
"self",
".",
"_config",
".",
"model_dir",
"is",
"None",
":",
"self",
".",
"_config",
"=",
"self",
".",
"_config",
".",
"replace",
"(",
"model_dir",
"=",
"self",
".",
"_model_dir",
")",
"logging",
".",
"info",
"(",
"'Using config: %s'",
",",
"str",
"(",
"vars",
"(",
"self",
".",
"_config",
")",
")",
")",
"if",
"self",
".",
"_config",
".",
"session_config",
"is",
"None",
":",
"self",
".",
"_session_config",
"=",
"config_pb2",
".",
"ConfigProto",
"(",
"allow_soft_placement",
"=",
"True",
")",
"else",
":",
"self",
".",
"_session_config",
"=",
"self",
".",
"_config",
".",
"session_config",
"self",
".",
"_device_fn",
"=",
"_get_replica_device_setter",
"(",
"self",
".",
"_config",
")",
"if",
"model_fn",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'model_fn must be provided to Estimator.'",
")",
"_verify_model_fn_args",
"(",
"model_fn",
",",
"params",
")",
"self",
".",
"_model_fn",
"=",
"model_fn",
"self",
".",
"_params",
"=",
"copy",
".",
"deepcopy",
"(",
"params",
"or",
"{",
"}",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/estimator/estimator.py#L93-L180 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/stc.py | python | StyledTextCtrl.SetMultiPaste | (*args, **kwargs) | return _stc.StyledTextCtrl_SetMultiPaste(*args, **kwargs) | SetMultiPaste(self, int multiPaste) | SetMultiPaste(self, int multiPaste) | [
"SetMultiPaste",
"(",
"self",
"int",
"multiPaste",
")"
] | def SetMultiPaste(*args, **kwargs):
"""SetMultiPaste(self, int multiPaste)"""
return _stc.StyledTextCtrl_SetMultiPaste(*args, **kwargs) | [
"def",
"SetMultiPaste",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetMultiPaste",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L4277-L4279 |
|
wujian16/Cornell-MOE | df299d1be882d2af9796d7a68b3f9505cac7a53e | moe/optimal_learning/python/base_prior.py | python | NormalPrior.sample_from_prior | (self, n_samples) | return p0[:, np.newaxis] | Returns N samples from the prior.
Parameters
----------
n_samples : int
The number of samples that will be drawn.
Returns
-------
(N, D) np.array
The samples from the prior. | Returns N samples from the prior. | [
"Returns",
"N",
"samples",
"from",
"the",
"prior",
"."
] | def sample_from_prior(self, n_samples):
"""
Returns N samples from the prior.
Parameters
----------
n_samples : int
The number of samples that will be drawn.
Returns
-------
(N, D) np.array
The samples from the prior.
"""
p0 = np.random.normal(loc=self.mean,
scale=self.sigma,
size=n_samples)
return p0[:, np.newaxis] | [
"def",
"sample_from_prior",
"(",
"self",
",",
"n_samples",
")",
":",
"p0",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"loc",
"=",
"self",
".",
"mean",
",",
"scale",
"=",
"self",
".",
"sigma",
",",
"size",
"=",
"n_samples",
")",
"return",
"p0",
"[",
":",
",",
"np",
".",
"newaxis",
"]"
] | https://github.com/wujian16/Cornell-MOE/blob/df299d1be882d2af9796d7a68b3f9505cac7a53e/moe/optimal_learning/python/base_prior.py#L356-L374 |
|
tzutalin/dlib-android | 989627cb7fe81cd1d41d73434b0e91ce1dd2683f | tools/lint/cpplint.py | python | FindStartOfExpressionInLine | (line, endpos, stack) | return (-1, stack) | Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line) | Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line) | [
"Find",
"position",
"at",
"the",
"matching",
"start",
"of",
"current",
"expression",
".",
"This",
"is",
"almost",
"the",
"reverse",
"of",
"FindEndOfExpressionInLine",
"but",
"note",
"that",
"the",
"input",
"position",
"and",
"returned",
"position",
"differs",
"by",
"1",
".",
"Args",
":",
"line",
":",
"a",
"CleansedLines",
"line",
".",
"endpos",
":",
"start",
"searching",
"at",
"this",
"position",
".",
"stack",
":",
"nesting",
"stack",
"at",
"endpos",
".",
"Returns",
":",
"On",
"finding",
"matching",
"start",
":",
"(",
"index",
"at",
"matching",
"start",
"None",
")",
"On",
"finding",
"an",
"unclosed",
"expression",
":",
"(",
"-",
"1",
"None",
")",
"Otherwise",
":",
"(",
"-",
"1",
"new",
"stack",
"at",
"beginning",
"of",
"this",
"line",
")"
] | def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack) | [
"def",
"FindStartOfExpressionInLine",
"(",
"line",
",",
"endpos",
",",
"stack",
")",
":",
"i",
"=",
"endpos",
"while",
"i",
">=",
"0",
":",
"char",
"=",
"line",
"[",
"i",
"]",
"if",
"char",
"in",
"')]}'",
":",
"# Found end of expression, push to expression stack",
"stack",
".",
"append",
"(",
"char",
")",
"elif",
"char",
"==",
"'>'",
":",
"# Found potential end of template argument list.",
"#",
"# Ignore it if it's a \"->\" or \">=\" or \"operator>\"",
"if",
"(",
"i",
">",
"0",
"and",
"(",
"line",
"[",
"i",
"-",
"1",
"]",
"==",
"'-'",
"or",
"Match",
"(",
"r'\\s>=\\s'",
",",
"line",
"[",
"i",
"-",
"1",
":",
"]",
")",
"or",
"Search",
"(",
"r'\\boperator\\s*$'",
",",
"line",
"[",
"0",
":",
"i",
"]",
")",
")",
")",
":",
"i",
"-=",
"1",
"else",
":",
"stack",
".",
"append",
"(",
"'>'",
")",
"elif",
"char",
"==",
"'<'",
":",
"# Found potential start of template argument list",
"if",
"i",
">",
"0",
"and",
"line",
"[",
"i",
"-",
"1",
"]",
"==",
"'<'",
":",
"# Left shift operator",
"i",
"-=",
"1",
"else",
":",
"# If there is a matching '>', we can pop the expression stack.",
"# Otherwise, ignore this '<' since it must be an operator.",
"if",
"stack",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'>'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"i",
",",
"None",
")",
"elif",
"char",
"in",
"'([{'",
":",
"# Found start of expression.",
"#",
"# If there are any unmatched '>' on the stack, they must be",
"# operators. Remove those.",
"while",
"stack",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'>'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"-",
"1",
",",
"None",
")",
"if",
"(",
"(",
"char",
"==",
"'('",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"')'",
")",
"or",
"(",
"char",
"==",
"'['",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"']'",
")",
"or",
"(",
"char",
"==",
"'{'",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'}'",
")",
")",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"i",
",",
"None",
")",
"else",
":",
"# Mismatched parentheses",
"return",
"(",
"-",
"1",
",",
"None",
")",
"elif",
"char",
"==",
"';'",
":",
"# Found something that look like end of statements. If we are currently",
"# expecting a '<', the matching '>' must have been an operator, since",
"# template argument list should not contain statements.",
"while",
"stack",
"and",
"stack",
"[",
"-",
"1",
"]",
"==",
"'>'",
":",
"stack",
".",
"pop",
"(",
")",
"if",
"not",
"stack",
":",
"return",
"(",
"-",
"1",
",",
"None",
")",
"i",
"-=",
"1",
"return",
"(",
"-",
"1",
",",
"stack",
")"
] | https://github.com/tzutalin/dlib-android/blob/989627cb7fe81cd1d41d73434b0e91ce1dd2683f/tools/lint/cpplint.py#L1524-L1595 |
|
yun-liu/RCF | 91bfb054ad04187dbbe21e539e165ad9bd3ff00b | scripts/cpp_lint.py | python | FileInfo.RepositoryName | (self) | return fullname | FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors. | FullName after removing the local path to the repository. | [
"FullName",
"after",
"removing",
"the",
"local",
"path",
"to",
"the",
"repository",
"."
] | def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname | [
"def",
"RepositoryName",
"(",
"self",
")",
":",
"fullname",
"=",
"self",
".",
"FullName",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"fullname",
")",
":",
"project_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fullname",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"\".svn\"",
")",
")",
":",
"# If there's a .svn file in the current directory, we recursively look",
"# up the directory tree for the top of the SVN checkout",
"root_dir",
"=",
"project_dir",
"one_up_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"root_dir",
")",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"one_up_dir",
",",
"\".svn\"",
")",
")",
":",
"root_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"root_dir",
")",
"one_up_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"one_up_dir",
")",
"prefix",
"=",
"os",
".",
"path",
".",
"commonprefix",
"(",
"[",
"root_dir",
",",
"project_dir",
"]",
")",
"return",
"fullname",
"[",
"len",
"(",
"prefix",
")",
"+",
"1",
":",
"]",
"# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by",
"# searching up from the current path.",
"root_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fullname",
")",
"while",
"(",
"root_dir",
"!=",
"os",
".",
"path",
".",
"dirname",
"(",
"root_dir",
")",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\".git\"",
")",
")",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\".hg\"",
")",
")",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\".svn\"",
")",
")",
")",
":",
"root_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"root_dir",
")",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\".git\"",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\".hg\"",
")",
")",
"or",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\".svn\"",
")",
")",
")",
":",
"prefix",
"=",
"os",
".",
"path",
".",
"commonprefix",
"(",
"[",
"root_dir",
",",
"project_dir",
"]",
")",
"return",
"fullname",
"[",
"len",
"(",
"prefix",
")",
"+",
"1",
":",
"]",
"# Don't know what to do; header guard warnings may be wrong...",
"return",
"fullname"
] | https://github.com/yun-liu/RCF/blob/91bfb054ad04187dbbe21e539e165ad9bd3ff00b/scripts/cpp_lint.py#L885-L928 |
|
lammps/lammps | b75c3065430a75b1b5543a10e10f46d9b4c91913 | tools/amber2lmp/amber2lammps.py | python | Amber.Read_TOP | (self, Basename) | Read the Amber parameter/topology (.top) file | Read the Amber parameter/topology (.top) file | [
"Read",
"the",
"Amber",
"parameter",
"/",
"topology",
"(",
".",
"top",
")",
"file"
] | def Read_TOP(self, Basename):
'Read the Amber parameter/topology (.top) file'
Item_list = self.Read_data(Basename + '.top')
if Item_list == None:
return
elif len(Item_list) < 31:
print '(error: File too short!)'
return
# Parse the data
if self.__dict__.has_key('ITITL'):
if Pop(Item_list,0) != self.ITITL:
print '(warning: ITITL differs!)'
else:
self.ITITL = Pop(Item_list,0)
print self.ITITL # Printing Self Title
if self.__dict__.has_key('NATOM'):
if eval(Pop(Item_list,0)) != self.NATOM:
print '(error: NATOM differs!)'
return
else:
self.NATOM = eval(Pop(Item_list,0))
print self.NATOM # Printing total number of atoms just to make sure that thing are going right
self.NTYPES = eval(Pop(Item_list,0))
self.NBONH = eval(Pop(Item_list,0))
self.MBONA = eval(Pop(Item_list,0))
self.NTHETH = eval(Pop(Item_list,0))
self.MTHETA = eval(Pop(Item_list,0))
self.NPHIH = eval(Pop(Item_list,0))
self.MPHIA = eval(Pop(Item_list,0))
self.NHPARM = eval(Pop(Item_list,0))
self.NPARM = eval(Pop(Item_list,0))
self.NEXT = eval(Pop(Item_list,0))
self.NRES = eval(Pop(Item_list,0))
self.NBONA = eval(Pop(Item_list,0))
self.NTHETA = eval(Pop(Item_list,0))
self.NPHIA = eval(Pop(Item_list,0))
self.NUMBND = eval(Pop(Item_list,0))
self.NUMANG = eval(Pop(Item_list,0))
self.NPTRA = eval(Pop(Item_list,0))
self.NATYP = eval(Pop(Item_list,0))
self.NPHB = eval(Pop(Item_list,0))
self.IFPERT = eval(Pop(Item_list,0))
self.NBPER = eval(Pop(Item_list,0))
self.NGPER = eval(Pop(Item_list,0))
self.NDPER = eval(Pop(Item_list,0))
self.MBPER = eval(Pop(Item_list,0))
self.MGPER = eval(Pop(Item_list,0))
self.MDPER = eval(Pop(Item_list,0))
self.IFBOX = eval(Pop(Item_list,0))
self.NMXRS = eval(Pop(Item_list,0))
self.IFCAP = eval(Pop(Item_list,0))
#....................................................
if len(Item_list) < 5 * self.NATOM + self.NTYPES**2 + \
2*(self.NRES + self.NUMBND + self.NUMANG) + \
3*self.NPTRA + self.NATYP:
print '(error: File too short!)'
return -1
self.IGRAPH = []
Pop(Item_list,0)
# A little kludge is needed here, since the IGRAPH strings are
# not separated by spaces if 4 characters in length.
for i in range(self.NATOM):
if len(Item_list[0]) > 4:
Item_list.insert(1, Item_list[0][4:])
Item_list.insert(1, Item_list[0][0:4])
del Item_list[0]
self.IGRAPH.append(Pop(Item_list,0))
# Vikas' Modification : In the following section, I am printing out each quantity which is currently being read from the topology file.
print 'Reading Charges...'
self.CHRG = []
for i in range(self.NATOM):
self.CHRG.append(eval(Pop(Item_list,0)))
print 'Reading Atomic Number...'
self.ANUMBER = []
for i in range(self.NATOM):
self.ANUMBER.append(eval(Pop(Item_list,0)))
print 'Reading Atomic Masses...'
self.AMASS = []
for i in range(self.NATOM):
self.AMASS.append(eval(Pop(Item_list,0)))
print 'Reading Atom Types...'
self.IAC = []
for i in range(self.NATOM):
self.IAC.append(eval(Pop(Item_list,0)))
print 'Reading Excluded Atoms...'
self.NUMEX = []
for i in range(self.NATOM):
self.NUMEX.append(eval(Pop(Item_list,0)))
print 'Reading Non-bonded Parameter Index...'
self.ICO = []
for i in range(self.NTYPES**2):
self.ICO.append(eval(Pop(Item_list,0)))
print 'Reading Residue Labels...'
self.LABRES = []
for i in range(self.NRES):
self.LABRES.append(Pop(Item_list,0))
print 'Reading Residues Starting Pointers...'
self.IPRES = []
for i in range(self.NRES):
self.IPRES.append(eval(Pop(Item_list,0)))
print 'Reading Bond Force Constants...'
self.RK = []
for i in range(self.NUMBND):
self.RK.append(eval(Pop(Item_list,0)))
print 'Reading Equilibrium Bond Values...'
self.REQ = []
for i in range(self.NUMBND):
self.REQ.append(eval(Pop(Item_list,0)))
print 'Reading Angle Force Constants...'
self.TK = []
for i in range(self.NUMANG):
self.TK.append(eval(Pop(Item_list,0)))
print 'Reading Equilibrium Angle Values...'
self.TEQ = []
for i in range(self.NUMANG):
self.TEQ.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Force Constants...'
self.PK = []
for i in range(self.NPTRA):
self.PK.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Periodicity...'
self.PN = []
for i in range(self.NPTRA):
self.PN.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Phase...'
self.PHASE = []
for i in range(self.NPTRA):
self.PHASE.append(eval(Pop(Item_list,0)))
print 'Reading 1-4 Electrostatic Scaling Factor...'
self.SCEEFAC = []
for i in range(self.NPTRA):
self.SCEEFAC.append(eval(Pop(Item_list,0)))
print 'Reading 1-4 Van der Waals Scaling Factor...'
self.SCNBFAC = []
for i in range(self.NPTRA):
self.SCNBFAC.append(eval(Pop(Item_list,0)))
print 'Reading Solty...' #I think this is currently not used in AMBER. Check it out, though
self.SOLTY = []
for i in range(self.NATYP):
self.SOLTY.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < 2 * self.NTYPES * (self.NTYPES + 1) / 2:
print '(error: File too short!)'
return -1
print 'Reading LJ A Coefficient...'
self.CN1 = []
for i in range(self.NTYPES * (self.NTYPES + 1) / 2):
self.CN1.append(eval(Pop(Item_list,0)))
print 'Reading LJ B Coefficient...'
self.CN2 = []
for i in range(self.NTYPES * (self.NTYPES + 1) / 2):
self.CN2.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < 3 * (self.NBONH + self.NBONA) + \
4 * (self.NTHETH + self.NTHETA) + 5 * (self.NPHIH + self.NPHIA):
print '(error: File too short!)'
return -1
print 'Reading Bonds which include hydrogen...'
self.IBH = []
self.JBH = []
self.ICBH = []
for i in range(self.NBONH):
self.IBH.append(eval(Pop(Item_list,0)))
self.JBH.append(eval(Pop(Item_list,0)))
self.ICBH.append(eval(Pop(Item_list,0)))
print 'Reading Bonds which dont include hydrogen...'
self.IB = []
self.JB = []
self.ICB = []
for i in range(self.NBONA):
self.IB.append(eval(Pop(Item_list,0)))
self.JB.append(eval(Pop(Item_list,0)))
self.ICB.append(eval(Pop(Item_list,0)))
print 'Reading Angles which include hydrogen...'
self.ITH = []
self.JTH = []
self.KTH = []
self.ICTH = []
for i in range(self.NTHETH):
self.ITH.append(eval(Pop(Item_list,0)))
self.JTH.append(eval(Pop(Item_list,0)))
self.KTH.append(eval(Pop(Item_list,0)))
self.ICTH.append(eval(Pop(Item_list,0)))
print 'Reading Angles which dont include hydrogen...'
self.IT = []
self.JT = []
self.KT = []
self.ICT = []
for i in range(self.NTHETA):
self.IT.append(eval(Pop(Item_list,0)))
self.JT.append(eval(Pop(Item_list,0)))
self.KT.append(eval(Pop(Item_list,0)))
self.ICT.append(eval(Pop(Item_list,0)))
print 'Reading Dihedrals which include hydrogen...'
self.IPH = []
self.JPH = []
self.KPH = []
self.LPH = []
self.ICPH = []
for i in range(self.NPHIH):
self.IPH.append(eval(Pop(Item_list,0)))
self.JPH.append(eval(Pop(Item_list,0)))
self.KPH.append(eval(Pop(Item_list,0)))
self.LPH.append(eval(Pop(Item_list,0)))
self.ICPH.append(eval(Pop(Item_list,0)))
print 'Reading Dihedrals which dont include hydrogen...'
self.IP = []
self.JP = []
self.KP = []
self.LP = []
self.ICP = []
for i in range(self.NPHIA):
self.IP.append(eval(Pop(Item_list,0)))
self.JP.append(eval(Pop(Item_list,0)))
self.KP.append(eval(Pop(Item_list,0)))
self.LP.append(eval(Pop(Item_list,0)))
self.ICP.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < self.NEXT + 3 * self.NPHB + 4 * self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading Excluded Atom List...'
self.NATEX = []
for i in range(self.NEXT):
self.NATEX.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond A Coefficient, corresponding to r**12 term for all possible types...'
self.ASOL = []
for i in range(self.NPHB):
self.ASOL.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond B Coefficient, corresponding to r**10 term for all possible types...'
self.BSOL = []
for i in range(self.NPHB):
self.BSOL.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond Cut...' # I think it is not being used nowadays
self.HBCUT = []
for i in range(self.NPHB):
self.HBCUT.append(eval(Pop(Item_list,0)))
print 'Reading Amber Atom Types for each atom...'
self.ISYMBL = []
for i in range(self.NATOM):
self.ISYMBL.append(Pop(Item_list,0))
print 'Reading Tree Chain Classification...'
self.ITREE = []
for i in range(self.NATOM):
self.ITREE.append(Pop(Item_list,0))
print 'Reading Join Array: Tree joining information' # Currently unused in Sander, an AMBER module
self.JOIN = []
for i in range(self.NATOM):
self.JOIN.append(eval(Pop(Item_list,0)))
print 'Reading IRotate...' # Currently unused in Sander and Gibbs
self.IROTAT = []
for i in range(self.NATOM):
self.IROTAT.append(eval(Pop(Item_list,0)))
#....................................................
if self.IFBOX > 0:
if len(Item_list) < 3:
print '(error: File too short!)'
return -1
print 'Reading final residue which is part of solute...'
self.IPTRES = eval(Pop(Item_list,0))
print 'Reading total number of molecules...'
self.NSPM = eval(Pop(Item_list,0))
print 'Reading first solvent moleule index...'
self.NSPSOL = eval(Pop(Item_list,0))
if len(Item_list) < self.NSPM + 4:
print '(error: File too short!)'
return -1
print 'Reading atom per molecule...'
self.NSP = []
for i in range(self.NSPM):
self.NSP.append(eval(Pop(Item_list,0)))
self.BETA = eval(Pop(Item_list,0))
print 'Reading Box Dimensions...'
if self.__dict__.has_key('BOX'):
BOX = []
for i in range(3):
BOX.append(eval(Pop(Item_list,0)))
for i in range(3):
if BOX[i] != self.BOX[i]:
print '(warning: BOX differs!)',
break
del BOX
else:
self.BOX = []
for i in range(3):
self.BOX.append(eval(Pop(Item_list,0)))
#....................................................
if self.IFCAP > 0:
if len(Item_list) < 5:
print '(error: File too short!)'
return -1
print 'Reading ICAP variables::: For details, refer to online AMBER format manual'
self.NATCAP = eval(Pop(Item_list,0))
self.CUTCAP = eval(Pop(Item_list,0))
self.XCAP = eval(Pop(Item_list,0))
self.YCAP = eval(Pop(Item_list,0))
self.ZCAP = eval(Pop(Item_list,0))
#....................................................
if self.IFPERT > 0:
if len(Item_list) < 4 * self.NBPER + 5 * self.NGPER + \
6 * self.NDPER + self.NRES + 6 * self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading perturb variables, 1. Bond, 2. Angles, 3. Dihedrals, etc etc.::: For details, refer to online AMBER format manual'
self.IBPER = []
self.JBPER = []
for i in range(self.NBPER):
self.IBPER.append(eval(Pop(Item_list,0)))
self.JBPER.append(eval(Pop(Item_list,0)))
self.ICBPER = []
for i in range(2 * self.NBPER):
self.ICBPER.append(eval(Pop(Item_list,0)))
self.ITPER = []
self.JTPER = []
self.KTPER = []
for i in range(self.NGPER):
self.ITPER.append(eval(Pop(Item_list,0)))
self.JTPER.append(eval(Pop(Item_list,0)))
self.KTPER.append(eval(Pop(Item_list,0)))
self.ICTPER = []
for i in range(2 * self.NGPER):
self.ICTPER.append(eval(Pop(Item_list,0)))
self.IPPER = []
self.JPPER = []
self.KPPER = []
self.LPPER = []
for i in range(self.NDPER):
self.IPPER.append(eval(Pop(Item_list,0)))
self.JPPER.append(eval(Pop(Item_list,0)))
self.KPPER.append(eval(Pop(Item_list,0)))
self.LPPER.append(eval(Pop(Item_list,0)))
self.ICPPER = []
for i in range(2 * self.NDPER):
self.ICPPER.append(eval(Pop(Item_list,0)))
LABRES = []
for i in range(self.NRES):
LABRES.append(Pop(Item_list,0))
for i in range(self.NRES):
if LABRES[i] != self.LABRES[i]:
print '(warning: BOX differs!)',
break
self.IGRPER = []
for i in range(self.NATOM):
self.IGRPER.append(eval(Pop(Item_list,0)))
self.ISMPER = []
for i in range(self.NATOM):
self.ISMPER.append(eval(Pop(Item_list,0)))
self.ALMPER = []
for i in range(self.NATOM):
self.ALMPER.append(eval(Pop(Item_list,0)))
self.IAPER = []
for i in range(self.NATOM):
self.IAPER.append(eval(Pop(Item_list,0)))
self.IACPER = []
for i in range(self.NATOM):
self.IACPER.append(eval(Pop(Item_list,0)))
self.CGPER = []
for i in range(self.NATOM):
self.CGPER.append(eval(Pop(Item_list,0)))
#....................................................
self.IPOL = 0
if self.IPOL == 1:
if len(Item_list) < self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading Polarizability Data. For details, refer to online AMBER format manual'
self.ATPOL = []
for i in range(self.NATOM):
self.ATPOL.append(eval(Pop(Item_list,0)))
if self.IFPERT == 1:
if len(Item_list) < self.NATOM:
print '(error: File too short!)'
return -1
self.ATPOL1 = []
for i in range(self.NATOM):
self.ATPOL1.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list):
print '(warning: File too large!)',
print 'done.'
self.TOP_is_read = 1 | [
"def",
"Read_TOP",
"(",
"self",
",",
"Basename",
")",
":",
"Item_list",
"=",
"self",
".",
"Read_data",
"(",
"Basename",
"+",
"'.top'",
")",
"if",
"Item_list",
"==",
"None",
":",
"return",
"elif",
"len",
"(",
"Item_list",
")",
"<",
"31",
":",
"print",
"'(error: File too short!)'",
"return",
"# Parse the data",
"if",
"self",
".",
"__dict__",
".",
"has_key",
"(",
"'ITITL'",
")",
":",
"if",
"Pop",
"(",
"Item_list",
",",
"0",
")",
"!=",
"self",
".",
"ITITL",
":",
"print",
"'(warning: ITITL differs!)'",
"else",
":",
"self",
".",
"ITITL",
"=",
"Pop",
"(",
"Item_list",
",",
"0",
")",
"print",
"self",
".",
"ITITL",
"# Printing Self Title",
"if",
"self",
".",
"__dict__",
".",
"has_key",
"(",
"'NATOM'",
")",
":",
"if",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"!=",
"self",
".",
"NATOM",
":",
"print",
"'(error: NATOM differs!)'",
"return",
"else",
":",
"self",
".",
"NATOM",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"self",
".",
"NATOM",
"# Printing total number of atoms just to make sure that thing are going right",
"self",
".",
"NTYPES",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NBONH",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"MBONA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NTHETH",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"MTHETA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NPHIH",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"MPHIA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NHPARM",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NPARM",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NEXT",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NRES",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NBONA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NTHETA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NPHIA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NUMBND",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NUMANG",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NPTRA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NATYP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NPHB",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"IFPERT",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NBPER",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NGPER",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NDPER",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"MBPER",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"MGPER",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"MDPER",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"IFBOX",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"NMXRS",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"IFCAP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"#....................................................",
"if",
"len",
"(",
"Item_list",
")",
"<",
"5",
"*",
"self",
".",
"NATOM",
"+",
"self",
".",
"NTYPES",
"**",
"2",
"+",
"2",
"*",
"(",
"self",
".",
"NRES",
"+",
"self",
".",
"NUMBND",
"+",
"self",
".",
"NUMANG",
")",
"+",
"3",
"*",
"self",
".",
"NPTRA",
"+",
"self",
".",
"NATYP",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"self",
".",
"IGRAPH",
"=",
"[",
"]",
"Pop",
"(",
"Item_list",
",",
"0",
")",
"# A little kludge is needed here, since the IGRAPH strings are",
"# not separated by spaces if 4 characters in length.",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"if",
"len",
"(",
"Item_list",
"[",
"0",
"]",
")",
">",
"4",
":",
"Item_list",
".",
"insert",
"(",
"1",
",",
"Item_list",
"[",
"0",
"]",
"[",
"4",
":",
"]",
")",
"Item_list",
".",
"insert",
"(",
"1",
",",
"Item_list",
"[",
"0",
"]",
"[",
"0",
":",
"4",
"]",
")",
"del",
"Item_list",
"[",
"0",
"]",
"self",
".",
"IGRAPH",
".",
"append",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"# Vikas' Modification : In the following section, I am printing out each quantity which is currently being read from the topology file.",
"print",
"'Reading Charges...'",
"self",
".",
"CHRG",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"CHRG",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Atomic Number...'",
"self",
".",
"ANUMBER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ANUMBER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Atomic Masses...'",
"self",
".",
"AMASS",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"AMASS",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Atom Types...'",
"self",
".",
"IAC",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"IAC",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Excluded Atoms...'",
"self",
".",
"NUMEX",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"NUMEX",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Non-bonded Parameter Index...'",
"self",
".",
"ICO",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NTYPES",
"**",
"2",
")",
":",
"self",
".",
"ICO",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Residue Labels...'",
"self",
".",
"LABRES",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NRES",
")",
":",
"self",
".",
"LABRES",
".",
"append",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"'Reading Residues Starting Pointers...'",
"self",
".",
"IPRES",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NRES",
")",
":",
"self",
".",
"IPRES",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Bond Force Constants...'",
"self",
".",
"RK",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NUMBND",
")",
":",
"self",
".",
"RK",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Equilibrium Bond Values...'",
"self",
".",
"REQ",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NUMBND",
")",
":",
"self",
".",
"REQ",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Angle Force Constants...'",
"self",
".",
"TK",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NUMANG",
")",
":",
"self",
".",
"TK",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Equilibrium Angle Values...'",
"self",
".",
"TEQ",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NUMANG",
")",
":",
"self",
".",
"TEQ",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Dihedral Force Constants...'",
"self",
".",
"PK",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPTRA",
")",
":",
"self",
".",
"PK",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Dihedral Periodicity...'",
"self",
".",
"PN",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPTRA",
")",
":",
"self",
".",
"PN",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Dihedral Phase...'",
"self",
".",
"PHASE",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPTRA",
")",
":",
"self",
".",
"PHASE",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading 1-4 Electrostatic Scaling Factor...'",
"self",
".",
"SCEEFAC",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPTRA",
")",
":",
"self",
".",
"SCEEFAC",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading 1-4 Van der Waals Scaling Factor...'",
"self",
".",
"SCNBFAC",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPTRA",
")",
":",
"self",
".",
"SCNBFAC",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Solty...'",
"#I think this is currently not used in AMBER. Check it out, though",
"self",
".",
"SOLTY",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATYP",
")",
":",
"self",
".",
"SOLTY",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"if",
"len",
"(",
"Item_list",
")",
"<",
"2",
"*",
"self",
".",
"NTYPES",
"*",
"(",
"self",
".",
"NTYPES",
"+",
"1",
")",
"/",
"2",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading LJ A Coefficient...'",
"self",
".",
"CN1",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NTYPES",
"*",
"(",
"self",
".",
"NTYPES",
"+",
"1",
")",
"/",
"2",
")",
":",
"self",
".",
"CN1",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading LJ B Coefficient...'",
"self",
".",
"CN2",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NTYPES",
"*",
"(",
"self",
".",
"NTYPES",
"+",
"1",
")",
"/",
"2",
")",
":",
"self",
".",
"CN2",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"if",
"len",
"(",
"Item_list",
")",
"<",
"3",
"*",
"(",
"self",
".",
"NBONH",
"+",
"self",
".",
"NBONA",
")",
"+",
"4",
"*",
"(",
"self",
".",
"NTHETH",
"+",
"self",
".",
"NTHETA",
")",
"+",
"5",
"*",
"(",
"self",
".",
"NPHIH",
"+",
"self",
".",
"NPHIA",
")",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading Bonds which include hydrogen...'",
"self",
".",
"IBH",
"=",
"[",
"]",
"self",
".",
"JBH",
"=",
"[",
"]",
"self",
".",
"ICBH",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NBONH",
")",
":",
"self",
".",
"IBH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JBH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICBH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Bonds which dont include hydrogen...'",
"self",
".",
"IB",
"=",
"[",
"]",
"self",
".",
"JB",
"=",
"[",
"]",
"self",
".",
"ICB",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NBONA",
")",
":",
"self",
".",
"IB",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JB",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICB",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Angles which include hydrogen...'",
"self",
".",
"ITH",
"=",
"[",
"]",
"self",
".",
"JTH",
"=",
"[",
"]",
"self",
".",
"KTH",
"=",
"[",
"]",
"self",
".",
"ICTH",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NTHETH",
")",
":",
"self",
".",
"ITH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JTH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"KTH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICTH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Angles which dont include hydrogen...'",
"self",
".",
"IT",
"=",
"[",
"]",
"self",
".",
"JT",
"=",
"[",
"]",
"self",
".",
"KT",
"=",
"[",
"]",
"self",
".",
"ICT",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NTHETA",
")",
":",
"self",
".",
"IT",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JT",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"KT",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICT",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Dihedrals which include hydrogen...'",
"self",
".",
"IPH",
"=",
"[",
"]",
"self",
".",
"JPH",
"=",
"[",
"]",
"self",
".",
"KPH",
"=",
"[",
"]",
"self",
".",
"LPH",
"=",
"[",
"]",
"self",
".",
"ICPH",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPHIH",
")",
":",
"self",
".",
"IPH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JPH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"KPH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"LPH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICPH",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Dihedrals which dont include hydrogen...'",
"self",
".",
"IP",
"=",
"[",
"]",
"self",
".",
"JP",
"=",
"[",
"]",
"self",
".",
"KP",
"=",
"[",
"]",
"self",
".",
"LP",
"=",
"[",
"]",
"self",
".",
"ICP",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPHIA",
")",
":",
"self",
".",
"IP",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JP",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"KP",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"LP",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICP",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"if",
"len",
"(",
"Item_list",
")",
"<",
"self",
".",
"NEXT",
"+",
"3",
"*",
"self",
".",
"NPHB",
"+",
"4",
"*",
"self",
".",
"NATOM",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading Excluded Atom List...'",
"self",
".",
"NATEX",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NEXT",
")",
":",
"self",
".",
"NATEX",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading H-Bond A Coefficient, corresponding to r**12 term for all possible types...'",
"self",
".",
"ASOL",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPHB",
")",
":",
"self",
".",
"ASOL",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading H-Bond B Coefficient, corresponding to r**10 term for all possible types...'",
"self",
".",
"BSOL",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPHB",
")",
":",
"self",
".",
"BSOL",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading H-Bond Cut...'",
"# I think it is not being used nowadays",
"self",
".",
"HBCUT",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NPHB",
")",
":",
"self",
".",
"HBCUT",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading Amber Atom Types for each atom...'",
"self",
".",
"ISYMBL",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ISYMBL",
".",
"append",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"'Reading Tree Chain Classification...'",
"self",
".",
"ITREE",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ITREE",
".",
"append",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"'Reading Join Array: Tree joining information'",
"# Currently unused in Sander, an AMBER module",
"self",
".",
"JOIN",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"JOIN",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"print",
"'Reading IRotate...'",
"# Currently unused in Sander and Gibbs",
"self",
".",
"IROTAT",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"IROTAT",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"if",
"self",
".",
"IFBOX",
">",
"0",
":",
"if",
"len",
"(",
"Item_list",
")",
"<",
"3",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading final residue which is part of solute...'",
"self",
".",
"IPTRES",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"'Reading total number of molecules...'",
"self",
".",
"NSPM",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"'Reading first solvent moleule index...'",
"self",
".",
"NSPSOL",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"if",
"len",
"(",
"Item_list",
")",
"<",
"self",
".",
"NSPM",
"+",
"4",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading atom per molecule...'",
"self",
".",
"NSP",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NSPM",
")",
":",
"self",
".",
"NSP",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"BETA",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"print",
"'Reading Box Dimensions...'",
"if",
"self",
".",
"__dict__",
".",
"has_key",
"(",
"'BOX'",
")",
":",
"BOX",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"BOX",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"if",
"BOX",
"[",
"i",
"]",
"!=",
"self",
".",
"BOX",
"[",
"i",
"]",
":",
"print",
"'(warning: BOX differs!)'",
",",
"break",
"del",
"BOX",
"else",
":",
"self",
".",
"BOX",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"self",
".",
"BOX",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"if",
"self",
".",
"IFCAP",
">",
"0",
":",
"if",
"len",
"(",
"Item_list",
")",
"<",
"5",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading ICAP variables::: For details, refer to online AMBER format manual'",
"self",
".",
"NATCAP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"CUTCAP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"XCAP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"YCAP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"self",
".",
"ZCAP",
"=",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"#....................................................",
"if",
"self",
".",
"IFPERT",
">",
"0",
":",
"if",
"len",
"(",
"Item_list",
")",
"<",
"4",
"*",
"self",
".",
"NBPER",
"+",
"5",
"*",
"self",
".",
"NGPER",
"+",
"6",
"*",
"self",
".",
"NDPER",
"+",
"self",
".",
"NRES",
"+",
"6",
"*",
"self",
".",
"NATOM",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading perturb variables, 1. Bond, 2. Angles, 3. Dihedrals, etc etc.::: For details, refer to online AMBER format manual'",
"self",
".",
"IBPER",
"=",
"[",
"]",
"self",
".",
"JBPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NBPER",
")",
":",
"self",
".",
"IBPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JBPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICBPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
"*",
"self",
".",
"NBPER",
")",
":",
"self",
".",
"ICBPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ITPER",
"=",
"[",
"]",
"self",
".",
"JTPER",
"=",
"[",
"]",
"self",
".",
"KTPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NGPER",
")",
":",
"self",
".",
"ITPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JTPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"KTPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICTPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
"*",
"self",
".",
"NGPER",
")",
":",
"self",
".",
"ICTPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"IPPER",
"=",
"[",
"]",
"self",
".",
"JPPER",
"=",
"[",
"]",
"self",
".",
"KPPER",
"=",
"[",
"]",
"self",
".",
"LPPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NDPER",
")",
":",
"self",
".",
"IPPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"JPPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"KPPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"LPPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ICPPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
"*",
"self",
".",
"NDPER",
")",
":",
"self",
".",
"ICPPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"LABRES",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NRES",
")",
":",
"LABRES",
".",
"append",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NRES",
")",
":",
"if",
"LABRES",
"[",
"i",
"]",
"!=",
"self",
".",
"LABRES",
"[",
"i",
"]",
":",
"print",
"'(warning: BOX differs!)'",
",",
"break",
"self",
".",
"IGRPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"IGRPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ISMPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ISMPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"ALMPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ALMPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"IAPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"IAPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"IACPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"IACPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"self",
".",
"CGPER",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"CGPER",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"self",
".",
"IPOL",
"=",
"0",
"if",
"self",
".",
"IPOL",
"==",
"1",
":",
"if",
"len",
"(",
"Item_list",
")",
"<",
"self",
".",
"NATOM",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"print",
"'Reading Polarizability Data. For details, refer to online AMBER format manual'",
"self",
".",
"ATPOL",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ATPOL",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"if",
"self",
".",
"IFPERT",
"==",
"1",
":",
"if",
"len",
"(",
"Item_list",
")",
"<",
"self",
".",
"NATOM",
":",
"print",
"'(error: File too short!)'",
"return",
"-",
"1",
"self",
".",
"ATPOL1",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"NATOM",
")",
":",
"self",
".",
"ATPOL1",
".",
"append",
"(",
"eval",
"(",
"Pop",
"(",
"Item_list",
",",
"0",
")",
")",
")",
"#....................................................",
"if",
"len",
"(",
"Item_list",
")",
":",
"print",
"'(warning: File too large!)'",
",",
"print",
"'done.'",
"self",
".",
"TOP_is_read",
"=",
"1"
] | https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/amber2lmp/amber2lammps.py#L476-L933 |
||
ouster-lidar/ouster_example | 13ea8e8b8a4951fb630dbc9108666995c8443bf6 | python/src/ouster/sdk/examples/pcap.py | python | main | () | Pcap examples runner. | Pcap examples runner. | [
"Pcap",
"examples",
"runner",
"."
] | def main():
"""Pcap examples runner."""
examples = {
"open3d-one-scan": pcap_3d_one_scan,
"plot-xyz-points": pcap_display_xyz_points,
"pcap-to-csv": pcap_to_csv,
"pcap-to-las": pcap_to_las,
"pcap-to-pcd": pcap_to_pcd,
"query-scan": pcap_query_scan,
"read-packets": pcap_read_packets,
}
description = "Ouster Python SDK Pcap examples. The EXAMPLE must be one of:\n " + str.join(
'\n ', examples.keys())
parser = argparse.ArgumentParser(
description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('pcap_path', metavar='PCAP', help='path to pcap file')
parser.add_argument('metadata_path',
metavar='METADATA',
help='path to metadata json')
parser.add_argument('example',
metavar='EXAMPLE',
choices=examples.keys(),
help='name of the example to run')
parser.add_argument('--scan-num',
type=int,
default=1,
help='index of scan to use')
args = parser.parse_args()
try:
example = examples[args.example]
except KeyError:
print(f"No such example: {args.example}")
print(description)
exit(1)
if not args.metadata_path or not os.path.exists(args.metadata_path):
print(f"Metadata file does not exist: {args.metadata_path}")
exit(1)
print(f'example: {args.example}')
with open(args.metadata_path, 'r') as f:
metadata = client.SensorInfo(f.read())
source = pcap.Pcap(args.pcap_path, metadata)
with closing(source):
example(source, metadata, args.scan_num) | [
"def",
"main",
"(",
")",
":",
"examples",
"=",
"{",
"\"open3d-one-scan\"",
":",
"pcap_3d_one_scan",
",",
"\"plot-xyz-points\"",
":",
"pcap_display_xyz_points",
",",
"\"pcap-to-csv\"",
":",
"pcap_to_csv",
",",
"\"pcap-to-las\"",
":",
"pcap_to_las",
",",
"\"pcap-to-pcd\"",
":",
"pcap_to_pcd",
",",
"\"query-scan\"",
":",
"pcap_query_scan",
",",
"\"read-packets\"",
":",
"pcap_read_packets",
",",
"}",
"description",
"=",
"\"Ouster Python SDK Pcap examples. The EXAMPLE must be one of:\\n \"",
"+",
"str",
".",
"join",
"(",
"'\\n '",
",",
"examples",
".",
"keys",
"(",
")",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"description",
",",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
")",
"parser",
".",
"add_argument",
"(",
"'pcap_path'",
",",
"metavar",
"=",
"'PCAP'",
",",
"help",
"=",
"'path to pcap file'",
")",
"parser",
".",
"add_argument",
"(",
"'metadata_path'",
",",
"metavar",
"=",
"'METADATA'",
",",
"help",
"=",
"'path to metadata json'",
")",
"parser",
".",
"add_argument",
"(",
"'example'",
",",
"metavar",
"=",
"'EXAMPLE'",
",",
"choices",
"=",
"examples",
".",
"keys",
"(",
")",
",",
"help",
"=",
"'name of the example to run'",
")",
"parser",
".",
"add_argument",
"(",
"'--scan-num'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"1",
",",
"help",
"=",
"'index of scan to use'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"try",
":",
"example",
"=",
"examples",
"[",
"args",
".",
"example",
"]",
"except",
"KeyError",
":",
"print",
"(",
"f\"No such example: {args.example}\"",
")",
"print",
"(",
"description",
")",
"exit",
"(",
"1",
")",
"if",
"not",
"args",
".",
"metadata_path",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"args",
".",
"metadata_path",
")",
":",
"print",
"(",
"f\"Metadata file does not exist: {args.metadata_path}\"",
")",
"exit",
"(",
"1",
")",
"print",
"(",
"f'example: {args.example}'",
")",
"with",
"open",
"(",
"args",
".",
"metadata_path",
",",
"'r'",
")",
"as",
"f",
":",
"metadata",
"=",
"client",
".",
"SensorInfo",
"(",
"f",
".",
"read",
"(",
")",
")",
"source",
"=",
"pcap",
".",
"Pcap",
"(",
"args",
".",
"pcap_path",
",",
"metadata",
")",
"with",
"closing",
"(",
"source",
")",
":",
"example",
"(",
"source",
",",
"metadata",
",",
"args",
".",
"scan_num",
")"
] | https://github.com/ouster-lidar/ouster_example/blob/13ea8e8b8a4951fb630dbc9108666995c8443bf6/python/src/ouster/sdk/examples/pcap.py#L308-L358 |
||
toggl-open-source/toggldesktop | 91865205885531cc8fd9e8d613dad49d625d56e7 | third_party/cpplint/cpplint.py | python | CheckForNonStandardConstructs | (filename, clean_lines, linenum,
nesting_state, error) | r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message | r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. | [
"r",
"Logs",
"an",
"error",
"if",
"we",
"see",
"certain",
"non",
"-",
"ANSI",
"constructs",
"ignored",
"by",
"gcc",
"-",
"2",
"."
] | def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.') | [
"def",
"CheckForNonStandardConstructs",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
":",
"# Remove comments from the line, but leave in strings for now.",
"line",
"=",
"clean_lines",
".",
"lines",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'printf\\s*\\(.*\".*%[-+ ]?\\d*q'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf_format'",
",",
"3",
",",
"'%q in format strings is deprecated. Use %ll instead.'",
")",
"if",
"Search",
"(",
"r'printf\\s*\\(.*\".*%\\d+\\$'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf_format'",
",",
"2",
",",
"'%N$ formats are unconventional. Try rewriting to avoid them.'",
")",
"# Remove escaped backslashes before looking for undefined escapes.",
"line",
"=",
"line",
".",
"replace",
"(",
"'\\\\\\\\'",
",",
"''",
")",
"if",
"Search",
"(",
"r'(\"|\\').*\\\\(%|\\[|\\(|{)'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/printf_format'",
",",
"3",
",",
"'%, [, (, and { are undefined character escapes. Unescape them.'",
")",
"# For the rest, work with both comments and strings removed.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"if",
"Search",
"(",
"r'\\b(const|volatile|void|char|short|int|long'",
"r'|float|double|signed|unsigned'",
"r'|schar|u?int8|u?int16|u?int32|u?int64)'",
"r'\\s+(register|static|extern|typedef)\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/storage_class'",
",",
"5",
",",
"'Storage class (static, extern, typedef, etc) should be first.'",
")",
"if",
"Match",
"(",
"r'\\s*#\\s*endif\\s*[^/\\s]+'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/endif_comment'",
",",
"5",
",",
"'Uncommented text after #endif is non-standard. Use a comment.'",
")",
"if",
"Match",
"(",
"r'\\s*class\\s+(\\w+\\s*::\\s*)+\\w+\\s*;'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/forward_decl'",
",",
"5",
",",
"'Inner-style forward declarations are invalid. Remove this line.'",
")",
"if",
"Search",
"(",
"r'(\\w+|[+-]?\\d+(\\.\\d*)?)\\s*(<|>)\\?=?\\s*(\\w+|[+-]?\\d+)(\\.\\d*)?'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/deprecated'",
",",
"3",
",",
"'>? and <? (max and min) operators are non-standard and deprecated.'",
")",
"if",
"Search",
"(",
"r'^\\s*const\\s*string\\s*&\\s*\\w+\\s*;'",
",",
"line",
")",
":",
"# TODO(unknown): Could it be expanded safely to arbitrary references,",
"# without triggering too many false positives? The first",
"# attempt triggered 5 warnings for mostly benign code in the regtest, hence",
"# the restriction.",
"# Here's the original regexp, for the reference:",
"# type_name = r'\\w+((\\s*::\\s*\\w+)|(\\s*<\\s*\\w+?\\s*>))?'",
"# r'\\s*const\\s*' + type_name + '\\s*&\\s*\\w+\\s*;'",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/member_string_references'",
",",
"2",
",",
"'const string& members are dangerous. It is much better to use '",
"'alternatives, such as pointers or simple constants.'",
")",
"# Everything else in this function operates on class declarations.",
"# Return early if the top of the nesting stack is not a class, or if",
"# the class head is not completed yet.",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"not",
"classinfo",
"or",
"not",
"classinfo",
".",
"seen_open_brace",
":",
"return",
"# The class may have been declared with namespace or classname qualifiers.",
"# The constructor and destructor will not have those qualifiers.",
"base_classname",
"=",
"classinfo",
".",
"name",
".",
"split",
"(",
"'::'",
")",
"[",
"-",
"1",
"]",
"# Look for single-argument constructors that aren't marked explicit.",
"# Technically a valid construct, but against style. Also look for",
"# non-single-argument constructors which are also technically valid, but",
"# strongly suggest something is wrong.",
"explicit_constructor_match",
"=",
"Match",
"(",
"r'\\s+(?:inline\\s+)?(explicit\\s+)?(?:inline\\s+)?%s\\s*'",
"r'\\(((?:[^()]|\\([^()]*\\))*)\\)'",
"%",
"re",
".",
"escape",
"(",
"base_classname",
")",
",",
"line",
")",
"if",
"explicit_constructor_match",
":",
"is_marked_explicit",
"=",
"explicit_constructor_match",
".",
"group",
"(",
"1",
")",
"if",
"not",
"explicit_constructor_match",
".",
"group",
"(",
"2",
")",
":",
"constructor_args",
"=",
"[",
"]",
"else",
":",
"constructor_args",
"=",
"explicit_constructor_match",
".",
"group",
"(",
"2",
")",
".",
"split",
"(",
"','",
")",
"# collapse arguments so that commas in template parameter lists and function",
"# argument parameter lists don't split arguments in two",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"constructor_args",
")",
":",
"constructor_arg",
"=",
"constructor_args",
"[",
"i",
"]",
"while",
"(",
"constructor_arg",
".",
"count",
"(",
"'<'",
")",
">",
"constructor_arg",
".",
"count",
"(",
"'>'",
")",
"or",
"constructor_arg",
".",
"count",
"(",
"'('",
")",
">",
"constructor_arg",
".",
"count",
"(",
"')'",
")",
")",
":",
"constructor_arg",
"+=",
"','",
"+",
"constructor_args",
"[",
"i",
"+",
"1",
"]",
"del",
"constructor_args",
"[",
"i",
"+",
"1",
"]",
"constructor_args",
"[",
"i",
"]",
"=",
"constructor_arg",
"i",
"+=",
"1",
"defaulted_args",
"=",
"[",
"arg",
"for",
"arg",
"in",
"constructor_args",
"if",
"'='",
"in",
"arg",
"]",
"noarg_constructor",
"=",
"(",
"not",
"constructor_args",
"or",
"# empty arg list",
"# 'void' arg specifier",
"(",
"len",
"(",
"constructor_args",
")",
"==",
"1",
"and",
"constructor_args",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"==",
"'void'",
")",
")",
"onearg_constructor",
"=",
"(",
"(",
"len",
"(",
"constructor_args",
")",
"==",
"1",
"and",
"# exactly one arg",
"not",
"noarg_constructor",
")",
"or",
"# all but at most one arg defaulted",
"(",
"len",
"(",
"constructor_args",
")",
">=",
"1",
"and",
"not",
"noarg_constructor",
"and",
"len",
"(",
"defaulted_args",
")",
">=",
"len",
"(",
"constructor_args",
")",
"-",
"1",
")",
")",
"initializer_list_constructor",
"=",
"bool",
"(",
"onearg_constructor",
"and",
"Search",
"(",
"r'\\bstd\\s*::\\s*initializer_list\\b'",
",",
"constructor_args",
"[",
"0",
"]",
")",
")",
"copy_constructor",
"=",
"bool",
"(",
"onearg_constructor",
"and",
"Match",
"(",
"r'(const\\s+)?%s(\\s*<[^>]*>)?(\\s+const)?\\s*(?:<\\w+>\\s*)?&'",
"%",
"re",
".",
"escape",
"(",
"base_classname",
")",
",",
"constructor_args",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
")",
"if",
"(",
"not",
"is_marked_explicit",
"and",
"onearg_constructor",
"and",
"not",
"initializer_list_constructor",
"and",
"not",
"copy_constructor",
")",
":",
"if",
"defaulted_args",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Constructors callable with one argument '",
"'should be marked explicit.'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Single-parameter constructors should be marked explicit.'",
")",
"elif",
"is_marked_explicit",
"and",
"not",
"onearg_constructor",
":",
"if",
"noarg_constructor",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"5",
",",
"'Zero-parameter constructors should not be marked explicit.'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/explicit'",
",",
"0",
",",
"'Constructors that require multiple arguments '",
"'should not be marked explicit.'",
")"
] | https://github.com/toggl-open-source/toggldesktop/blob/91865205885531cc8fd9e8d613dad49d625d56e7/third_party/cpplint/cpplint.py#L2573-L2734 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/signal/ltisys.py | python | ZerosPolesGain.to_zpk | (self) | return copy.deepcopy(self) | Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy) | Return a copy of the current 'ZerosPolesGain' system. | [
"Return",
"a",
"copy",
"of",
"the",
"current",
"ZerosPolesGain",
"system",
"."
] | def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self) | [
"def",
"to_zpk",
"(",
"self",
")",
":",
"return",
"copy",
".",
"deepcopy",
"(",
"self",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/signal/ltisys.py#L1045-L1055 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers.py | python | _get_elementwise_name_from_keras_layer | (keras_layer) | Get the keras layer name from the activation name. | Get the keras layer name from the activation name. | [
"Get",
"the",
"keras",
"layer",
"name",
"from",
"the",
"activation",
"name",
"."
] | def _get_elementwise_name_from_keras_layer(keras_layer):
"""
Get the keras layer name from the activation name.
"""
mode = keras_layer.mode
if mode == "sum":
return "ADD"
elif mode == "mul":
return "MULTIPLY"
elif mode == "concat":
if len(keras_layer.input_shape[0]) == 3 and (
keras_layer.concat_axis == 1 or keras_layer.concat_axis == -2
):
return "SEQUENCE_CONCAT"
elif len(keras_layer.input_shape[0]) == 4 and (
keras_layer.concat_axis == 3 or keras_layer.concat_axis == -1
):
return "CONCAT"
elif len(keras_layer.input_shape[0]) == 2 and (
keras_layer.concat_axis == 1 or keras_layer.concat_axis == -1
):
return "CONCAT"
else:
option = "input_shape = %s concat_axis = %s" % (
str(keras_layer.input_shape[0]),
str(keras_layer.concat_axis),
)
_utils.raise_error_unsupported_option(option, mode, keras_layer.name)
elif mode == "cos":
if len(keras_layer.input_shape[0]) == 2:
return "COS"
else:
option = "input_shape = %s" % (str(keras_layer.input_shape[0]))
_utils.raise_error_unsupported_option(option, mode, keras_layer.name)
elif mode == "dot":
if len(keras_layer.input_shape[0]) == 2:
return "DOT"
else:
option = "input_shape = %s" % (str(keras_layer.input_shape[0]))
_utils.raise_error_unsupported_option(option, mode, keras_layer.name)
elif mode == "max":
return "MAX"
elif mode == "ave":
return "AVE"
else:
_utils.raise_error_unsupported_categorical_option(
"mode", mode, "Merge", keras_layer.name
) | [
"def",
"_get_elementwise_name_from_keras_layer",
"(",
"keras_layer",
")",
":",
"mode",
"=",
"keras_layer",
".",
"mode",
"if",
"mode",
"==",
"\"sum\"",
":",
"return",
"\"ADD\"",
"elif",
"mode",
"==",
"\"mul\"",
":",
"return",
"\"MULTIPLY\"",
"elif",
"mode",
"==",
"\"concat\"",
":",
"if",
"len",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
"==",
"3",
"and",
"(",
"keras_layer",
".",
"concat_axis",
"==",
"1",
"or",
"keras_layer",
".",
"concat_axis",
"==",
"-",
"2",
")",
":",
"return",
"\"SEQUENCE_CONCAT\"",
"elif",
"len",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
"==",
"4",
"and",
"(",
"keras_layer",
".",
"concat_axis",
"==",
"3",
"or",
"keras_layer",
".",
"concat_axis",
"==",
"-",
"1",
")",
":",
"return",
"\"CONCAT\"",
"elif",
"len",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
"==",
"2",
"and",
"(",
"keras_layer",
".",
"concat_axis",
"==",
"1",
"or",
"keras_layer",
".",
"concat_axis",
"==",
"-",
"1",
")",
":",
"return",
"\"CONCAT\"",
"else",
":",
"option",
"=",
"\"input_shape = %s concat_axis = %s\"",
"%",
"(",
"str",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
",",
"str",
"(",
"keras_layer",
".",
"concat_axis",
")",
",",
")",
"_utils",
".",
"raise_error_unsupported_option",
"(",
"option",
",",
"mode",
",",
"keras_layer",
".",
"name",
")",
"elif",
"mode",
"==",
"\"cos\"",
":",
"if",
"len",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
"==",
"2",
":",
"return",
"\"COS\"",
"else",
":",
"option",
"=",
"\"input_shape = %s\"",
"%",
"(",
"str",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
")",
"_utils",
".",
"raise_error_unsupported_option",
"(",
"option",
",",
"mode",
",",
"keras_layer",
".",
"name",
")",
"elif",
"mode",
"==",
"\"dot\"",
":",
"if",
"len",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
"==",
"2",
":",
"return",
"\"DOT\"",
"else",
":",
"option",
"=",
"\"input_shape = %s\"",
"%",
"(",
"str",
"(",
"keras_layer",
".",
"input_shape",
"[",
"0",
"]",
")",
")",
"_utils",
".",
"raise_error_unsupported_option",
"(",
"option",
",",
"mode",
",",
"keras_layer",
".",
"name",
")",
"elif",
"mode",
"==",
"\"max\"",
":",
"return",
"\"MAX\"",
"elif",
"mode",
"==",
"\"ave\"",
":",
"return",
"\"AVE\"",
"else",
":",
"_utils",
".",
"raise_error_unsupported_categorical_option",
"(",
"\"mode\"",
",",
"mode",
",",
"\"Merge\"",
",",
"keras_layer",
".",
"name",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/converters/keras/_layers.py#L73-L120 |
||
nyuwireless-unipd/ns3-mmwave | 4ff9e87e8079764e04cbeccd8e85bff15ae16fb3 | bindings/python/rad_util.py | python | is_rotated | (seq1, seq2) | return False | Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0 | Return true if the first sequence is a rotation of the second sequence. | [
"Return",
"true",
"if",
"the",
"first",
"sequence",
"is",
"a",
"rotation",
"of",
"the",
"second",
"sequence",
"."
] | def is_rotated(seq1, seq2):
"""Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0
"""
# Do a sanity check.
if len(seq1) != len(seq2):
return False
# Look for occurrences of second sequence head item in first sequence.
start_indexes = []
head_item = seq2[0]
for index1 in range(len(seq1)):
if seq1[index1] == head_item:
start_indexes.append(index1)
# Check that wrapped sequence matches.
double_seq1 = seq1 + seq1
for index1 in start_indexes:
if double_seq1[index1:index1+len(seq1)] == seq2:
return True
return False | [
"def",
"is_rotated",
"(",
"seq1",
",",
"seq2",
")",
":",
"# Do a sanity check.",
"if",
"len",
"(",
"seq1",
")",
"!=",
"len",
"(",
"seq2",
")",
":",
"return",
"False",
"# Look for occurrences of second sequence head item in first sequence.",
"start_indexes",
"=",
"[",
"]",
"head_item",
"=",
"seq2",
"[",
"0",
"]",
"for",
"index1",
"in",
"range",
"(",
"len",
"(",
"seq1",
")",
")",
":",
"if",
"seq1",
"[",
"index1",
"]",
"==",
"head_item",
":",
"start_indexes",
".",
"append",
"(",
"index1",
")",
"# Check that wrapped sequence matches.",
"double_seq1",
"=",
"seq1",
"+",
"seq1",
"for",
"index1",
"in",
"start_indexes",
":",
"if",
"double_seq1",
"[",
"index1",
":",
"index1",
"+",
"len",
"(",
"seq1",
")",
"]",
"==",
"seq2",
":",
"return",
"True",
"return",
"False"
] | https://github.com/nyuwireless-unipd/ns3-mmwave/blob/4ff9e87e8079764e04cbeccd8e85bff15ae16fb3/bindings/python/rad_util.py#L735-L775 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/setuptools/command/build_py.py | python | build_py._get_platform_patterns | (spec, package, src_dir) | return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
) | yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir. | yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir. | [
"yield",
"platform",
"-",
"specific",
"path",
"patterns",
"(",
"suitable",
"for",
"glob",
"or",
"fn_match",
")",
"from",
"a",
"glob",
"-",
"based",
"spec",
"(",
"such",
"as",
"self",
".",
"package_data",
"or",
"self",
".",
"exclude_package_data",
")",
"matching",
"package",
"in",
"src_dir",
"."
] | def _get_platform_patterns(spec, package, src_dir):
"""
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(
spec.get('', []),
spec.get(package, []),
)
return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
) | [
"def",
"_get_platform_patterns",
"(",
"spec",
",",
"package",
",",
"src_dir",
")",
":",
"raw_patterns",
"=",
"itertools",
".",
"chain",
"(",
"spec",
".",
"get",
"(",
"''",
",",
"[",
"]",
")",
",",
"spec",
".",
"get",
"(",
"package",
",",
"[",
"]",
")",
",",
")",
"return",
"(",
"# Each pattern has to be converted to a platform-specific path",
"os",
".",
"path",
".",
"join",
"(",
"src_dir",
",",
"convert_path",
"(",
"pattern",
")",
")",
"for",
"pattern",
"in",
"raw_patterns",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/setuptools/command/build_py.py#L220-L235 |
|
cvxpy/cvxpy | 5165b4fb750dfd237de8659383ef24b4b2e33aaf | cvxpy/atoms/min.py | python | min.is_atom_log_log_convex | (self) | return False | Is the atom log-log convex? | Is the atom log-log convex? | [
"Is",
"the",
"atom",
"log",
"-",
"log",
"convex?"
] | def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return False | [
"def",
"is_atom_log_log_convex",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"False"
] | https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/min.py#L84-L87 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | third_party/bintrees/bintrees/rbtree.py | python | RBTree.root | (self) | return self._root | root node of T | root node of T | [
"root",
"node",
"of",
"T"
] | def root(self):
""" root node of T """
return self._root | [
"def",
"root",
"(",
"self",
")",
":",
"return",
"self",
".",
"_root"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/bintrees/bintrees/rbtree.py#L142-L144 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/dataview.py | python | DataViewTreeCtrl.InsertItem | (*args, **kwargs) | return _dataview.DataViewTreeCtrl_InsertItem(*args, **kwargs) | InsertItem(self, DataViewItem parent, DataViewItem previous, String text,
int icon=-1, wxClientData data=None) -> DataViewItem | InsertItem(self, DataViewItem parent, DataViewItem previous, String text,
int icon=-1, wxClientData data=None) -> DataViewItem | [
"InsertItem",
"(",
"self",
"DataViewItem",
"parent",
"DataViewItem",
"previous",
"String",
"text",
"int",
"icon",
"=",
"-",
"1",
"wxClientData",
"data",
"=",
"None",
")",
"-",
">",
"DataViewItem"
] | def InsertItem(*args, **kwargs):
"""
InsertItem(self, DataViewItem parent, DataViewItem previous, String text,
int icon=-1, wxClientData data=None) -> DataViewItem
"""
return _dataview.DataViewTreeCtrl_InsertItem(*args, **kwargs) | [
"def",
"InsertItem",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewTreeCtrl_InsertItem",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/dataview.py#L2497-L2502 |
|
libLAS/libLAS | e6a1aaed412d638687b8aec44f7b12df7ca2bbbb | python/liblas/point.py | python | Point.set_raw_z | (self, value) | return core.las.LASPoint_SetRawZ(self.handle, value) | Sets the Z coordinate of the LAS point to an integer value
value.
..note::
The point will be scaled according to the obj:`liblas.point.Point.header`'s
scale value for the Z dimension when returned as a double obj:`liblas.point.Point.y`. | Sets the Z coordinate of the LAS point to an integer value
value. | [
"Sets",
"the",
"Z",
"coordinate",
"of",
"the",
"LAS",
"point",
"to",
"an",
"integer",
"value",
"value",
"."
] | def set_raw_z(self, value):
"""Sets the Z coordinate of the LAS point to an integer value
value.
..note::
The point will be scaled according to the obj:`liblas.point.Point.header`'s
scale value for the Z dimension when returned as a double obj:`liblas.point.Point.y`.
"""
return core.las.LASPoint_SetRawZ(self.handle, value) | [
"def",
"set_raw_z",
"(",
"self",
",",
"value",
")",
":",
"return",
"core",
".",
"las",
".",
"LASPoint_SetRawZ",
"(",
"self",
".",
"handle",
",",
"value",
")"
] | https://github.com/libLAS/libLAS/blob/e6a1aaed412d638687b8aec44f7b12df7ca2bbbb/python/liblas/point.py#L200-L208 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/core/fromnumeric.py | python | nonzero | (a) | return _wrapfunc(a, 'nonzero') | Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast1d(a))``.
.. deprecated:: 1.17.0
Use `atleast1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) | Return the indices of the elements that are non-zero. | [
"Return",
"the",
"indices",
"of",
"the",
"elements",
"that",
"are",
"non",
"-",
"zero",
"."
] | def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast1d(a))``.
.. deprecated:: 1.17.0
Use `atleast1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero') | [
"def",
"nonzero",
"(",
"a",
")",
":",
"return",
"_wrapfunc",
"(",
"a",
",",
"'nonzero'",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/core/fromnumeric.py#L1805-L1896 |
|
NVIDIAGameWorks/kaolin | e5148d05e9c1e2ce92a07881ce3593b1c5c3f166 | kaolin/metrics/trianglemesh.py | python | average_edge_length | (vertices, faces) | return edge_length | r"""Returns the average length of each faces in a mesh.
Args:
vertices (torch.Tensor): Batched vertices, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor): Faces, of shape :math:`(\text{num_faces}, 3)`.
Returns:
(torch.Tensor):
average length of each edges in a face, of shape
:math:`(\text{batch_size}, \text{num_faces})`.
Example:
>>> vertices = torch.tensor([[[1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2]])
>>> average_edge_length(vertices, faces)
tensor([[1.4142]]) | r"""Returns the average length of each faces in a mesh. | [
"r",
"Returns",
"the",
"average",
"length",
"of",
"each",
"faces",
"in",
"a",
"mesh",
"."
] | def average_edge_length(vertices, faces):
r"""Returns the average length of each faces in a mesh.
Args:
vertices (torch.Tensor): Batched vertices, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor): Faces, of shape :math:`(\text{num_faces}, 3)`.
Returns:
(torch.Tensor):
average length of each edges in a face, of shape
:math:`(\text{batch_size}, \text{num_faces})`.
Example:
>>> vertices = torch.tensor([[[1, 0, 0],
... [0, 1, 0],
... [0, 0, 1]]], dtype=torch.float)
>>> faces = torch.tensor([[0, 1, 2]])
>>> average_edge_length(vertices, faces)
tensor([[1.4142]])
"""
batch_size = vertices.shape[0]
p1 = torch.index_select(vertices, 1, faces[:, 0])
p2 = torch.index_select(vertices, 1, faces[:, 1])
p3 = torch.index_select(vertices, 1, faces[:, 2])
# get edge lentgh
e1 = p2 - p1
e2 = p3 - p1
e3 = p2 - p3
el1 = torch.sqrt((torch.sum(e1**2, dim=2)))
el2 = torch.sqrt((torch.sum(e2**2, dim=2)))
el3 = torch.sqrt((torch.sum(e3**2, dim=2)))
edge_length = (el1 + el2 + el3) / 3.
return edge_length | [
"def",
"average_edge_length",
"(",
"vertices",
",",
"faces",
")",
":",
"batch_size",
"=",
"vertices",
".",
"shape",
"[",
"0",
"]",
"p1",
"=",
"torch",
".",
"index_select",
"(",
"vertices",
",",
"1",
",",
"faces",
"[",
":",
",",
"0",
"]",
")",
"p2",
"=",
"torch",
".",
"index_select",
"(",
"vertices",
",",
"1",
",",
"faces",
"[",
":",
",",
"1",
"]",
")",
"p3",
"=",
"torch",
".",
"index_select",
"(",
"vertices",
",",
"1",
",",
"faces",
"[",
":",
",",
"2",
"]",
")",
"# get edge lentgh",
"e1",
"=",
"p2",
"-",
"p1",
"e2",
"=",
"p3",
"-",
"p1",
"e3",
"=",
"p2",
"-",
"p3",
"el1",
"=",
"torch",
".",
"sqrt",
"(",
"(",
"torch",
".",
"sum",
"(",
"e1",
"**",
"2",
",",
"dim",
"=",
"2",
")",
")",
")",
"el2",
"=",
"torch",
".",
"sqrt",
"(",
"(",
"torch",
".",
"sum",
"(",
"e2",
"**",
"2",
",",
"dim",
"=",
"2",
")",
")",
")",
"el3",
"=",
"torch",
".",
"sqrt",
"(",
"(",
"torch",
".",
"sum",
"(",
"e3",
"**",
"2",
",",
"dim",
"=",
"2",
")",
")",
")",
"edge_length",
"=",
"(",
"el1",
"+",
"el2",
"+",
"el3",
")",
"/",
"3.",
"return",
"edge_length"
] | https://github.com/NVIDIAGameWorks/kaolin/blob/e5148d05e9c1e2ce92a07881ce3593b1c5c3f166/kaolin/metrics/trianglemesh.py#L265-L302 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiTabContainer.__init__ | (self, *args, **kwargs) | __init__(self) -> AuiTabContainer | __init__(self) -> AuiTabContainer | [
"__init__",
"(",
"self",
")",
"-",
">",
"AuiTabContainer"
] | def __init__(self, *args, **kwargs):
"""__init__(self) -> AuiTabContainer"""
_aui.AuiTabContainer_swiginit(self,_aui.new_AuiTabContainer(*args, **kwargs)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_aui",
".",
"AuiTabContainer_swiginit",
"(",
"self",
",",
"_aui",
".",
"new_AuiTabContainer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L1124-L1126 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py | python | Environment.can_add | (self, dist) | return py_compat and compatible_platforms(dist.platform, self.platform) | Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned. | Is distribution `dist` acceptable for this environment? | [
"Is",
"distribution",
"dist",
"acceptable",
"for",
"this",
"environment?"
] | def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform) | [
"def",
"can_add",
"(",
"self",
",",
"dist",
")",
":",
"py_compat",
"=",
"(",
"self",
".",
"python",
"is",
"None",
"or",
"dist",
".",
"py_version",
"is",
"None",
"or",
"dist",
".",
"py_version",
"==",
"self",
".",
"python",
")",
"return",
"py_compat",
"and",
"compatible_platforms",
"(",
"dist",
".",
"platform",
",",
"self",
".",
"platform",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L986-L998 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/ops/_grad_experimental/grad_math_ops.py | python | get_bprop_index_addcmul | (self) | return bprop | Generate bprop for Addcmul | Generate bprop for Addcmul | [
"Generate",
"bprop",
"for",
"Addcmul"
] | def get_bprop_index_addcmul(self):
"""Generate bprop for Addcmul"""
mul_op = P.Mul()
def bprop(input_data, x1, x2, value, out, dout):
dx1 = mul_op(dout, mul_op(value, x2))
dx2 = mul_op(dout, mul_op(value, x1))
dvalue = mul_op(dout, mul_op(x1, x2))
return dout, dx1, dx2, dvalue
return bprop | [
"def",
"get_bprop_index_addcmul",
"(",
"self",
")",
":",
"mul_op",
"=",
"P",
".",
"Mul",
"(",
")",
"def",
"bprop",
"(",
"input_data",
",",
"x1",
",",
"x2",
",",
"value",
",",
"out",
",",
"dout",
")",
":",
"dx1",
"=",
"mul_op",
"(",
"dout",
",",
"mul_op",
"(",
"value",
",",
"x2",
")",
")",
"dx2",
"=",
"mul_op",
"(",
"dout",
",",
"mul_op",
"(",
"value",
",",
"x1",
")",
")",
"dvalue",
"=",
"mul_op",
"(",
"dout",
",",
"mul_op",
"(",
"x1",
",",
"x2",
")",
")",
"return",
"dout",
",",
"dx1",
",",
"dx2",
",",
"dvalue",
"return",
"bprop"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_grad_experimental/grad_math_ops.py#L103-L113 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | FileTypeInfo.SetIcon | (*args, **kwargs) | return _misc_.FileTypeInfo_SetIcon(*args, **kwargs) | SetIcon(self, String iconFile, int iconIndex=0) | SetIcon(self, String iconFile, int iconIndex=0) | [
"SetIcon",
"(",
"self",
"String",
"iconFile",
"int",
"iconIndex",
"=",
"0",
")"
] | def SetIcon(*args, **kwargs):
"""SetIcon(self, String iconFile, int iconIndex=0)"""
return _misc_.FileTypeInfo_SetIcon(*args, **kwargs) | [
"def",
"SetIcon",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"FileTypeInfo_SetIcon",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L2507-L2509 |
|
mozilla/DeepSpeech | aa1d28530d531d0d92289bf5f11a49fe516fdc86 | bin/import_gram_vaani.py | python | setup_logging | (level) | Setup basic logging
Args:
level (int): minimum log level for emitting messages | Setup basic logging
Args:
level (int): minimum log level for emitting messages | [
"Setup",
"basic",
"logging",
"Args",
":",
"level",
"(",
"int",
")",
":",
"minimum",
"log",
"level",
"for",
"emitting",
"messages"
] | def setup_logging(level):
"""Setup basic logging
Args:
level (int): minimum log level for emitting messages
"""
format = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(
level=level, stream=sys.stdout, format=format, datefmt="%Y-%m-%d %H:%M:%S"
) | [
"def",
"setup_logging",
"(",
"level",
")",
":",
"format",
"=",
"\"[%(asctime)s] %(levelname)s:%(name)s:%(message)s\"",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"level",
",",
"stream",
"=",
"sys",
".",
"stdout",
",",
"format",
"=",
"format",
",",
"datefmt",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
")"
] | https://github.com/mozilla/DeepSpeech/blob/aa1d28530d531d0d92289bf5f11a49fe516fdc86/bin/import_gram_vaani.py#L78-L86 |
||
openvinotoolkit/openvino | dedcbeafa8b84cccdc55ca64b8da516682b381c7 | tools/pot/openvino/tools/pot/api/samples/face_detection/face_detection_sample.py | python | MTCNNEngine.set_model | (self, model) | Loads NetworkX model into InferenceEngine and stores it in Engine class
:param model: CompressedModel instance | Loads NetworkX model into InferenceEngine and stores it in Engine class
:param model: CompressedModel instance | [
"Loads",
"NetworkX",
"model",
"into",
"InferenceEngine",
"and",
"stores",
"it",
"in",
"Engine",
"class",
":",
"param",
"model",
":",
"CompressedModel",
"instance"
] | def set_model(self, model):
""" Loads NetworkX model into InferenceEngine and stores it in Engine class
:param model: CompressedModel instance
"""
# save graph to IR and use it to initialize IE Network
self._model = self._set_model(model)
self._output_layers = {}
stage_names = ['pnet', 'rnet', 'onet']
for stage, model_dict in enumerate(model.models):
self._output_layers[stage_names[stage]] = {
'probabilities': model_dict['name'] + '_' + self.config['outputs']['probabilities'][stage],
'regions': model_dict['name'] + '_' + self.config['outputs']['regions'][stage],
} | [
"def",
"set_model",
"(",
"self",
",",
"model",
")",
":",
"# save graph to IR and use it to initialize IE Network",
"self",
".",
"_model",
"=",
"self",
".",
"_set_model",
"(",
"model",
")",
"self",
".",
"_output_layers",
"=",
"{",
"}",
"stage_names",
"=",
"[",
"'pnet'",
",",
"'rnet'",
",",
"'onet'",
"]",
"for",
"stage",
",",
"model_dict",
"in",
"enumerate",
"(",
"model",
".",
"models",
")",
":",
"self",
".",
"_output_layers",
"[",
"stage_names",
"[",
"stage",
"]",
"]",
"=",
"{",
"'probabilities'",
":",
"model_dict",
"[",
"'name'",
"]",
"+",
"'_'",
"+",
"self",
".",
"config",
"[",
"'outputs'",
"]",
"[",
"'probabilities'",
"]",
"[",
"stage",
"]",
",",
"'regions'",
":",
"model_dict",
"[",
"'name'",
"]",
"+",
"'_'",
"+",
"self",
".",
"config",
"[",
"'outputs'",
"]",
"[",
"'regions'",
"]",
"[",
"stage",
"]",
",",
"}"
] | https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/pot/openvino/tools/pot/api/samples/face_detection/face_detection_sample.py#L93-L105 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/cluster/hierarchical.py | python | FeatureAgglomeration.fit | (self, X, y=None, **params) | return AgglomerativeClustering.fit(self, X.T, **params) | Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self | Fit the hierarchical clustering on the data | [
"Fit",
"the",
"hierarchical",
"clustering",
"on",
"the",
"data"
] | def fit(self, X, y=None, **params):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The data
Returns
-------
self
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
ensure_min_features=2, estimator=self)
return AgglomerativeClustering.fit(self, X.T, **params) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"X",
"=",
"check_array",
"(",
"X",
",",
"accept_sparse",
"=",
"[",
"'csr'",
",",
"'csc'",
",",
"'coo'",
"]",
",",
"ensure_min_features",
"=",
"2",
",",
"estimator",
"=",
"self",
")",
"return",
"AgglomerativeClustering",
".",
"fit",
"(",
"self",
",",
"X",
".",
"T",
",",
"*",
"*",
"params",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/cluster/hierarchical.py#L823-L837 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.