nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_aarch64/python2.7/dist-packages/rosdep2/installers.py | python | InstallerContext.get_installer | (self, installer_key) | return self.installers[installer_key] | :returns: :class:`Installer` class associated with *installer_key*.
:raises: :exc:`KeyError` If not associated installer
:raises: :exc:`InstallFailed` If installer cannot produce an install command (e.g. if installer is not installed) | :returns: :class:`Installer` class associated with *installer_key*.
:raises: :exc:`KeyError` If not associated installer
:raises: :exc:`InstallFailed` If installer cannot produce an install command (e.g. if installer is not installed) | [
":",
"returns",
":",
":",
"class",
":",
"Installer",
"class",
"associated",
"with",
"*",
"installer_key",
"*",
".",
":",
"raises",
":",
":",
"exc",
":",
"KeyError",
"If",
"not",
"associated",
"installer",
":",
"raises",
":",
":",
"exc",
":",
"InstallFailed",
"If",
"installer",
"cannot",
"produce",
"an",
"install",
"command",
"(",
"e",
".",
"g",
".",
"if",
"installer",
"is",
"not",
"installed",
")"
] | def get_installer(self, installer_key):
"""
:returns: :class:`Installer` class associated with *installer_key*.
:raises: :exc:`KeyError` If not associated installer
:raises: :exc:`InstallFailed` If installer cannot produce an install command (e.g. if installer is not installed)
"""
return self.installers[installer_key] | [
"def",
"get_installer",
"(",
"self",
",",
"installer_key",
")",
":",
"return",
"self",
".",
"installers",
"[",
"installer_key",
"]"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_aarch64/python2.7/dist-packages/rosdep2/installers.py#L147-L153 |
|
esphome/esphome | 40e06c9819f17409615d4f4eec5cfe4dc9a3776d | esphome/yaml_util.py | python | dump | (dict_) | return yaml.dump(
dict_, default_flow_style=False, allow_unicode=True, Dumper=ESPHomeDumper
) | Dump YAML to a string and remove null. | Dump YAML to a string and remove null. | [
"Dump",
"YAML",
"to",
"a",
"string",
"and",
"remove",
"null",
"."
] | def dump(dict_):
"""Dump YAML to a string and remove null."""
return yaml.dump(
dict_, default_flow_style=False, allow_unicode=True, Dumper=ESPHomeDumper
) | [
"def",
"dump",
"(",
"dict_",
")",
":",
"return",
"yaml",
".",
"dump",
"(",
"dict_",
",",
"default_flow_style",
"=",
"False",
",",
"allow_unicode",
"=",
"True",
",",
"Dumper",
"=",
"ESPHomeDumper",
")"
] | https://github.com/esphome/esphome/blob/40e06c9819f17409615d4f4eec5cfe4dc9a3776d/esphome/yaml_util.py#L351-L355 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | TextDataObject.GetText | (*args, **kwargs) | return _misc_.TextDataObject_GetText(*args, **kwargs) | GetText(self) -> String
Returns the text associated with the data object. | GetText(self) -> String | [
"GetText",
"(",
"self",
")",
"-",
">",
"String"
] | def GetText(*args, **kwargs):
"""
GetText(self) -> String
Returns the text associated with the data object.
"""
return _misc_.TextDataObject_GetText(*args, **kwargs) | [
"def",
"GetText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"TextDataObject_GetText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L5201-L5207 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/framework/ops.py | python | Operation._add_input | (self, tensor, dtype=None) | Add a new input to this operation.
Args:
tensor: the Tensor to add as an input.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph. | Add a new input to this operation. | [
"Add",
"a",
"new",
"input",
"to",
"this",
"operation",
"."
] | def _add_input(self, tensor, dtype=None):
"""Add a new input to this operation.
Args:
tensor: the Tensor to add as an input.
dtype: tf.DType: type of the input; defaults to
the tensor's dtype.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
assert not self._c_op, (
"Operation._add_input doesn't work with C API")
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
if dtype is None:
dtype = tensor.dtype
else:
dtype = dtypes.as_dtype(dtype)
if not dtype.is_compatible_with(tensor.dtype):
raise TypeError(
"Cannot convert a tensor of type %s to an input of type %s" %
(tensor.dtype.name, dtype.name))
self._inputs.append(tensor)
self._input_types_val.append(dtype)
tensor._add_consumer(self) # pylint: disable=protected-access
self._recompute_node_def() | [
"def",
"_add_input",
"(",
"self",
",",
"tensor",
",",
"dtype",
"=",
"None",
")",
":",
"assert",
"not",
"self",
".",
"_c_op",
",",
"(",
"\"Operation._add_input doesn't work with C API\"",
")",
"if",
"not",
"isinstance",
"(",
"tensor",
",",
"Tensor",
")",
":",
"raise",
"TypeError",
"(",
"\"tensor must be a Tensor: %s\"",
"%",
"tensor",
")",
"_assert_same_graph",
"(",
"self",
",",
"tensor",
")",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"tensor",
".",
"dtype",
"else",
":",
"dtype",
"=",
"dtypes",
".",
"as_dtype",
"(",
"dtype",
")",
"if",
"not",
"dtype",
".",
"is_compatible_with",
"(",
"tensor",
".",
"dtype",
")",
":",
"raise",
"TypeError",
"(",
"\"Cannot convert a tensor of type %s to an input of type %s\"",
"%",
"(",
"tensor",
".",
"dtype",
".",
"name",
",",
"dtype",
".",
"name",
")",
")",
"self",
".",
"_inputs",
".",
"append",
"(",
"tensor",
")",
"self",
".",
"_input_types_val",
".",
"append",
"(",
"dtype",
")",
"tensor",
".",
"_add_consumer",
"(",
"self",
")",
"# pylint: disable=protected-access",
"self",
".",
"_recompute_node_def",
"(",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/framework/ops.py#L1739-L1768 |
||
lyxok1/Tiny-DSOD | 94d15450699bea0dd3720e75e2d273e476174fba | scripts/cpp_lint.py | python | FindNextMultiLineCommentStart | (lines, lineix) | return len(lines) | Find the beginning marker for a multiline comment. | Find the beginning marker for a multiline comment. | [
"Find",
"the",
"beginning",
"marker",
"for",
"a",
"multiline",
"comment",
"."
] | def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines) | [
"def",
"FindNextMultiLineCommentStart",
"(",
"lines",
",",
"lineix",
")",
":",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'/*'",
")",
":",
"# Only return this marker if the comment goes beyond this line",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"find",
"(",
"'*/'",
",",
"2",
")",
"<",
"0",
":",
"return",
"lineix",
"lineix",
"+=",
"1",
"return",
"len",
"(",
"lines",
")"
] | https://github.com/lyxok1/Tiny-DSOD/blob/94d15450699bea0dd3720e75e2d273e476174fba/scripts/cpp_lint.py#L1123-L1131 |
|
deepmind/open_spiel | 4ca53bea32bb2875c7385d215424048ae92f78c8 | open_spiel/python/algorithms/best_response.py | python | BestResponsePolicy.joint_action_probabilities_counterfactual | (self, state) | return [(list(actions), np.prod(probs)) for actions, probs in zip(
itertools.product(
*actions_per_player), itertools.product(*probs_per_player))] | Get list of action, probability tuples for simultaneous node.
Counterfactual reach probabilities exclude the best-responder's actions,
the sum of the probabilities is equal to the number of actions of the
player _player_id.
Args:
state: the current state of the game.
Returns:
list of action, probability tuples. An action is a tuple of individual
actions for each player of the game. | Get list of action, probability tuples for simultaneous node. | [
"Get",
"list",
"of",
"action",
"probability",
"tuples",
"for",
"simultaneous",
"node",
"."
] | def joint_action_probabilities_counterfactual(self, state):
"""Get list of action, probability tuples for simultaneous node.
Counterfactual reach probabilities exclude the best-responder's actions,
the sum of the probabilities is equal to the number of actions of the
player _player_id.
Args:
state: the current state of the game.
Returns:
list of action, probability tuples. An action is a tuple of individual
actions for each player of the game.
"""
actions_per_player, probs_per_player = (
openspiel_policy.joint_action_probabilities_aux(state, self._policy))
probs_per_player[self._player_id] = [
1.0 for _ in probs_per_player[self._player_id]
]
return [(list(actions), np.prod(probs)) for actions, probs in zip(
itertools.product(
*actions_per_player), itertools.product(*probs_per_player))] | [
"def",
"joint_action_probabilities_counterfactual",
"(",
"self",
",",
"state",
")",
":",
"actions_per_player",
",",
"probs_per_player",
"=",
"(",
"openspiel_policy",
".",
"joint_action_probabilities_aux",
"(",
"state",
",",
"self",
".",
"_policy",
")",
")",
"probs_per_player",
"[",
"self",
".",
"_player_id",
"]",
"=",
"[",
"1.0",
"for",
"_",
"in",
"probs_per_player",
"[",
"self",
".",
"_player_id",
"]",
"]",
"return",
"[",
"(",
"list",
"(",
"actions",
")",
",",
"np",
".",
"prod",
"(",
"probs",
")",
")",
"for",
"actions",
",",
"probs",
"in",
"zip",
"(",
"itertools",
".",
"product",
"(",
"*",
"actions_per_player",
")",
",",
"itertools",
".",
"product",
"(",
"*",
"probs_per_player",
")",
")",
"]"
] | https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/algorithms/best_response.py#L135-L155 |
|
macchina-io/macchina.io | ef24ba0e18379c3dd48fb84e6dbf991101cb8db0 | platform/JS/V8/v8/tools/stats-viewer.py | python | SharedDataAccess.ByteAt | (self, index) | return ord(self.CharAt(index)) | Return the (unsigned) byte at the specified byte index. | Return the (unsigned) byte at the specified byte index. | [
"Return",
"the",
"(",
"unsigned",
")",
"byte",
"at",
"the",
"specified",
"byte",
"index",
"."
] | def ByteAt(self, index):
"""Return the (unsigned) byte at the specified byte index."""
return ord(self.CharAt(index)) | [
"def",
"ByteAt",
"(",
"self",
",",
"index",
")",
":",
"return",
"ord",
"(",
"self",
".",
"CharAt",
"(",
"index",
")",
")"
] | https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/tools/stats-viewer.py#L312-L314 |
|
glotzerlab/hoomd-blue | f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a | hoomd/operations.py | python | Operations.updaters | (self) | return self._updaters | list[`hoomd.operation.Updater`]: A list of updater operations.
Holds the list of updaters associated with this collection. The list can
be modified as a standard Python list. | list[`hoomd.operation.Updater`]: A list of updater operations. | [
"list",
"[",
"hoomd",
".",
"operation",
".",
"Updater",
"]",
":",
"A",
"list",
"of",
"updater",
"operations",
"."
] | def updaters(self):
"""list[`hoomd.operation.Updater`]: A list of updater operations.
Holds the list of updaters associated with this collection. The list can
be modified as a standard Python list.
"""
return self._updaters | [
"def",
"updaters",
"(",
"self",
")",
":",
"return",
"self",
".",
"_updaters"
] | https://github.com/glotzerlab/hoomd-blue/blob/f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a/hoomd/operations.py#L273-L279 |
|
google/nucleus | 68d3947fafba1337f294c0668a6e1c7f3f1273e3 | nucleus/util/variant_utils.py | python | _genotype_order_in_likelihoods | (num_alts, ploidy=2) | Yields tuples of `ploidy` ints for the given number of alt alleles.
https://samtools.github.io/hts-specs/VCFv4.1.pdf
"If A is the allele in REF and B,C,... are the alleles as ordered in ALT,
the ordering of genotypes for the likelihoods is given by:
F(j/k) = (k*(k+1)/2)+j. In other words, for biallelic sites the ordering is:
AA,AB,BB; for triallelic sites the ordering is: AA,AB,BB,AC,BC,CC, etc."
The biallelic sites in our case are 0/0, 0/1, 1/1.
The triallelic sites are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2.
This wiki page has more information that generalizes to different ploidy.
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
Args:
num_alts: int. The number of alternate alleles at the site.
ploidy: int. The ploidy for which to return genotypes.
Yields:
Tuples of `ploidy` ints representing allele indices in the order they appear
in the corresponding genotype likelihood array. | Yields tuples of `ploidy` ints for the given number of alt alleles. | [
"Yields",
"tuples",
"of",
"ploidy",
"ints",
"for",
"the",
"given",
"number",
"of",
"alt",
"alleles",
"."
] | def _genotype_order_in_likelihoods(num_alts, ploidy=2):
"""Yields tuples of `ploidy` ints for the given number of alt alleles.
https://samtools.github.io/hts-specs/VCFv4.1.pdf
"If A is the allele in REF and B,C,... are the alleles as ordered in ALT,
the ordering of genotypes for the likelihoods is given by:
F(j/k) = (k*(k+1)/2)+j. In other words, for biallelic sites the ordering is:
AA,AB,BB; for triallelic sites the ordering is: AA,AB,BB,AC,BC,CC, etc."
The biallelic sites in our case are 0/0, 0/1, 1/1.
The triallelic sites are 0/0, 0/1, 1/1, 0/2, 1/2, 2/2.
This wiki page has more information that generalizes to different ploidy.
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
Args:
num_alts: int. The number of alternate alleles at the site.
ploidy: int. The ploidy for which to return genotypes.
Yields:
Tuples of `ploidy` ints representing allele indices in the order they appear
in the corresponding genotype likelihood array.
"""
if ploidy == 1:
for i in range(num_alts + 1):
yield (i,)
elif ploidy == 2:
for j in range(num_alts + 1):
for i in range(j + 1):
yield (i, j)
else:
raise NotImplementedError('Only haploid and diploid supported.') | [
"def",
"_genotype_order_in_likelihoods",
"(",
"num_alts",
",",
"ploidy",
"=",
"2",
")",
":",
"if",
"ploidy",
"==",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"num_alts",
"+",
"1",
")",
":",
"yield",
"(",
"i",
",",
")",
"elif",
"ploidy",
"==",
"2",
":",
"for",
"j",
"in",
"range",
"(",
"num_alts",
"+",
"1",
")",
":",
"for",
"i",
"in",
"range",
"(",
"j",
"+",
"1",
")",
":",
"yield",
"(",
"i",
",",
"j",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Only haploid and diploid supported.'",
")"
] | https://github.com/google/nucleus/blob/68d3947fafba1337f294c0668a6e1c7f3f1273e3/nucleus/util/variant_utils.py#L718-L747 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py3/scipy/stats/stats.py | python | describe | (a, axis=0, ddof=1, bias=True, nan_policy='propagate') | return DescribeResult(n, mm, m, v, sk, kurt) | Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.])) | Compute several descriptive statistics of the passed array. | [
"Compute",
"several",
"descriptive",
"statistics",
"of",
"the",
"passed",
"array",
"."
] | def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt) | [
"def",
"describe",
"(",
"a",
",",
"axis",
"=",
"0",
",",
"ddof",
"=",
"1",
",",
"bias",
"=",
"True",
",",
"nan_policy",
"=",
"'propagate'",
")",
":",
"a",
",",
"axis",
"=",
"_chk_asarray",
"(",
"a",
",",
"axis",
")",
"contains_nan",
",",
"nan_policy",
"=",
"_contains_nan",
"(",
"a",
",",
"nan_policy",
")",
"if",
"contains_nan",
"and",
"nan_policy",
"==",
"'omit'",
":",
"a",
"=",
"ma",
".",
"masked_invalid",
"(",
"a",
")",
"return",
"mstats_basic",
".",
"describe",
"(",
"a",
",",
"axis",
",",
"ddof",
",",
"bias",
")",
"if",
"a",
".",
"size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"The input must not be empty.\"",
")",
"n",
"=",
"a",
".",
"shape",
"[",
"axis",
"]",
"mm",
"=",
"(",
"np",
".",
"min",
"(",
"a",
",",
"axis",
"=",
"axis",
")",
",",
"np",
".",
"max",
"(",
"a",
",",
"axis",
"=",
"axis",
")",
")",
"m",
"=",
"np",
".",
"mean",
"(",
"a",
",",
"axis",
"=",
"axis",
")",
"v",
"=",
"np",
".",
"var",
"(",
"a",
",",
"axis",
"=",
"axis",
",",
"ddof",
"=",
"ddof",
")",
"sk",
"=",
"skew",
"(",
"a",
",",
"axis",
",",
"bias",
"=",
"bias",
")",
"kurt",
"=",
"kurtosis",
"(",
"a",
",",
"axis",
",",
"bias",
"=",
"bias",
")",
"return",
"DescribeResult",
"(",
"n",
",",
"mm",
",",
"m",
",",
"v",
",",
"sk",
",",
"kurt",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/stats/stats.py#L1188-L1263 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py2/setuptools/command/build_py.py | python | build_py.run | (self) | Build modules, packages, and copy data files to build directory | Build modules, packages, and copy data files to build directory | [
"Build",
"modules",
"packages",
"and",
"copy",
"data",
"files",
"to",
"build",
"directory"
] | def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) | [
"def",
"run",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"py_modules",
"and",
"not",
"self",
".",
"packages",
":",
"return",
"if",
"self",
".",
"py_modules",
":",
"self",
".",
"build_modules",
"(",
")",
"if",
"self",
".",
"packages",
":",
"self",
".",
"build_packages",
"(",
")",
"self",
".",
"build_package_data",
"(",
")",
"self",
".",
"run_2to3",
"(",
"self",
".",
"__updated_files",
",",
"False",
")",
"self",
".",
"run_2to3",
"(",
"self",
".",
"__updated_files",
",",
"True",
")",
"self",
".",
"run_2to3",
"(",
"self",
".",
"__doctests_2to3",
",",
"True",
")",
"# Only compile actual .py files, using our base class' idea of what our",
"# output files are.",
"self",
".",
"byte_compile",
"(",
"orig",
".",
"build_py",
".",
"get_outputs",
"(",
"self",
",",
"include_bytecode",
"=",
"0",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/command/build_py.py#L43-L61 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py | python | Decimal._fix_nan | (self, context) | return Decimal(self) | Decapitate the payload of a NaN to fit the context | Decapitate the payload of a NaN to fit the context | [
"Decapitate",
"the",
"payload",
"of",
"a",
"NaN",
"to",
"fit",
"the",
"context"
] | def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self) | [
"def",
"_fix_nan",
"(",
"self",
",",
"context",
")",
":",
"payload",
"=",
"self",
".",
"_int",
"# maximum length of payload is precision if clamp=0,",
"# precision-1 if clamp=1.",
"max_payload_len",
"=",
"context",
".",
"prec",
"-",
"context",
".",
"clamp",
"if",
"len",
"(",
"payload",
")",
">",
"max_payload_len",
":",
"payload",
"=",
"payload",
"[",
"len",
"(",
"payload",
")",
"-",
"max_payload_len",
":",
"]",
".",
"lstrip",
"(",
"'0'",
")",
"return",
"_dec_from_triple",
"(",
"self",
".",
"_sign",
",",
"payload",
",",
"self",
".",
"_exp",
",",
"True",
")",
"return",
"Decimal",
"(",
"self",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py#L1649-L1659 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/configdialog.py | python | ConfigDialog.extension_selected | (self, event) | Handle selection of an extension from the list. | Handle selection of an extension from the list. | [
"Handle",
"selection",
"of",
"an",
"extension",
"from",
"the",
"list",
"."
] | def extension_selected(self, event):
"Handle selection of an extension from the list."
newsel = self.extension_list.curselection()
if newsel:
newsel = self.extension_list.get(newsel)
if newsel is None or newsel != self.current_extension:
if self.current_extension:
self.details_frame.config(text='')
self.config_frame[self.current_extension].grid_forget()
self.current_extension = None
if newsel:
self.details_frame.config(text=newsel)
self.config_frame[newsel].grid(column=0, row=0, sticky='nsew')
self.current_extension = newsel | [
"def",
"extension_selected",
"(",
"self",
",",
"event",
")",
":",
"newsel",
"=",
"self",
".",
"extension_list",
".",
"curselection",
"(",
")",
"if",
"newsel",
":",
"newsel",
"=",
"self",
".",
"extension_list",
".",
"get",
"(",
"newsel",
")",
"if",
"newsel",
"is",
"None",
"or",
"newsel",
"!=",
"self",
".",
"current_extension",
":",
"if",
"self",
".",
"current_extension",
":",
"self",
".",
"details_frame",
".",
"config",
"(",
"text",
"=",
"''",
")",
"self",
".",
"config_frame",
"[",
"self",
".",
"current_extension",
"]",
".",
"grid_forget",
"(",
")",
"self",
".",
"current_extension",
"=",
"None",
"if",
"newsel",
":",
"self",
".",
"details_frame",
".",
"config",
"(",
"text",
"=",
"newsel",
")",
"self",
".",
"config_frame",
"[",
"newsel",
"]",
".",
"grid",
"(",
"column",
"=",
"0",
",",
"row",
"=",
"0",
",",
"sticky",
"=",
"'nsew'",
")",
"self",
".",
"current_extension",
"=",
"newsel"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/configdialog.py#L353-L366 |
||
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/ops/rnn_cell.py | python | RNNCell.__call__ | (self, inputs, state, scope=None) | Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`. | Run this RNN cell on inputs, starting from the given state. | [
"Run",
"this",
"RNN",
"cell",
"on",
"inputs",
"starting",
"from",
"the",
"given",
"state",
"."
] | def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
raise NotImplementedError("Abstract method") | [
"def",
"__call__",
"(",
"self",
",",
"inputs",
",",
"state",
",",
"scope",
"=",
"None",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"Abstract method\"",
")"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/rnn_cell.py#L111-L128 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/bdb.py | python | Bdb.user_line | (self, frame) | Called when we stop or break at a line. | Called when we stop or break at a line. | [
"Called",
"when",
"we",
"stop",
"or",
"break",
"at",
"a",
"line",
"."
] | def user_line(self, frame):
"""Called when we stop or break at a line."""
pass | [
"def",
"user_line",
"(",
"self",
",",
"frame",
")",
":",
"pass"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/bdb.py#L259-L261 |
||
FreeCAD/FreeCAD | ba42231b9c6889b89e064d6d563448ed81e376ec | src/Mod/Draft/draftviewproviders/view_dimension.py | python | ViewProviderLinearDimension.remove_dim_arrows | (self) | Remove dimension arrows in the dimension lines.
Remove the existing nodes. | Remove dimension arrows in the dimension lines. | [
"Remove",
"dimension",
"arrows",
"in",
"the",
"dimension",
"lines",
"."
] | def remove_dim_arrows(self):
"""Remove dimension arrows in the dimension lines.
Remove the existing nodes.
"""
self.node.removeChild(self.marks)
self.node3d.removeChild(self.marks) | [
"def",
"remove_dim_arrows",
"(",
"self",
")",
":",
"self",
".",
"node",
".",
"removeChild",
"(",
"self",
".",
"marks",
")",
"self",
".",
"node3d",
".",
"removeChild",
"(",
"self",
".",
"marks",
")"
] | https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftviewproviders/view_dimension.py#L776-L782 |
||
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/layers/rnn.py | python | RNNCell.get_initial_states | (self,
batch_ref,
shape=None,
dtype='float32',
init_value=0,
batch_dim_idx=0) | return init_states | r"""
Generate initialized states according to provided shape, data type and
value.
Parameters:
batch_ref: A (possibly nested structure of) tensor variable[s].
The first dimension of the tensor will be used as batch size to
initialize states.
shape: A (possibly nested structure of) shape[s], where a shape is
represented as a list/tuple of integer). -1(for batch size) will
beautomatically inserted if shape is not started with it. If None,
property `state_shape` will be used. The default value is None.
dtype: A (possibly nested structure of) data type[s]. The structure
must be same as that of `shape`, except when all tensors' in states
has the same data type, a single data type can be used. If
property `cell.state_shape` is not available, float32 will be used
as the data type. The default value is float32.
init_value: A float value used to initialize states.
batch_dim_idx: An integer indicating which dimension of the tensor in
inputs represents batch size. The default value is 0.
Returns:
Variable: tensor variable[s] packed in the same structure provided \
by shape, representing the initialized states. | r"""
Generate initialized states according to provided shape, data type and
value. | [
"r",
"Generate",
"initialized",
"states",
"according",
"to",
"provided",
"shape",
"data",
"type",
"and",
"value",
"."
] | def get_initial_states(self,
batch_ref,
shape=None,
dtype='float32',
init_value=0,
batch_dim_idx=0):
r"""
Generate initialized states according to provided shape, data type and
value.
Parameters:
batch_ref: A (possibly nested structure of) tensor variable[s].
The first dimension of the tensor will be used as batch size to
initialize states.
shape: A (possibly nested structure of) shape[s], where a shape is
represented as a list/tuple of integer). -1(for batch size) will
beautomatically inserted if shape is not started with it. If None,
property `state_shape` will be used. The default value is None.
dtype: A (possibly nested structure of) data type[s]. The structure
must be same as that of `shape`, except when all tensors' in states
has the same data type, a single data type can be used. If
property `cell.state_shape` is not available, float32 will be used
as the data type. The default value is float32.
init_value: A float value used to initialize states.
batch_dim_idx: An integer indicating which dimension of the tensor in
inputs represents batch size. The default value is 0.
Returns:
Variable: tensor variable[s] packed in the same structure provided \
by shape, representing the initialized states.
"""
if sys.version_info < (3, ):
integer_types = (
int,
long, )
else:
integer_types = (int, )
check_variable_and_dtype(batch_ref, 'batch_ref',
['float32', 'float64', 'int32', 'int64'],
'RNNCell')
check_type(shape, 'shape', (list, tuple, type(None), integer_types),
'RNNCell')
if isinstance(shape, (list, tuple)):
shapes = map_structure(lambda x: x, shape)
if isinstance(shape, list):
for i, _shape in enumerate(shapes):
check_type(_shape, 'shapes[' + str(i) + ']', integer_types,
'RNNCell')
else:
check_type(shapes, 'shapes', integer_types, 'RNNCell')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'RNNCell')
# TODO: use inputs and batch_size
batch_ref = flatten(batch_ref)[0]
def _is_shape_sequence(seq):
if sys.version_info < (3, ):
integer_types = (
int,
long, )
else:
integer_types = (int, )
"""For shape, list/tuple of integer is the finest-grained objection"""
if (isinstance(seq, list) or isinstance(seq, tuple)):
if reduce(lambda flag, x: isinstance(x, integer_types) and flag,
seq, True):
return False
# TODO: Add check for the illegal
if isinstance(seq, dict):
return True
return (isinstance(seq, collections.Sequence) and
not isinstance(seq, six.string_types))
class Shape(object):
def __init__(self, shape):
self.shape = shape if shape[0] == -1 else ([-1] + list(shape))
# nested structure of shapes
states_shapes = self.state_shape if shape is None else shape
is_sequence_ori = utils.is_sequence
utils.is_sequence = _is_shape_sequence
states_shapes = map_structure(lambda shape: Shape(shape), states_shapes)
utils.is_sequence = is_sequence_ori
# nested structure of dtypes
try:
states_dtypes = self.state_dtype if dtype is None else dtype
except NotImplementedError: # use fp32 as default
states_dtypes = "float32"
if len(flatten(states_dtypes)) == 1:
dtype = flatten(states_dtypes)[0]
states_dtypes = map_structure(lambda shape: dtype, states_shapes)
init_states = map_structure(
lambda shape, dtype: tensor.fill_constant_batch_size_like(
input=batch_ref,
shape=shape.shape,
dtype=dtype,
value=init_value,
input_dim_idx=batch_dim_idx), states_shapes, states_dtypes)
return init_states | [
"def",
"get_initial_states",
"(",
"self",
",",
"batch_ref",
",",
"shape",
"=",
"None",
",",
"dtype",
"=",
"'float32'",
",",
"init_value",
"=",
"0",
",",
"batch_dim_idx",
"=",
"0",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"integer_types",
"=",
"(",
"int",
",",
"long",
",",
")",
"else",
":",
"integer_types",
"=",
"(",
"int",
",",
")",
"check_variable_and_dtype",
"(",
"batch_ref",
",",
"'batch_ref'",
",",
"[",
"'float32'",
",",
"'float64'",
",",
"'int32'",
",",
"'int64'",
"]",
",",
"'RNNCell'",
")",
"check_type",
"(",
"shape",
",",
"'shape'",
",",
"(",
"list",
",",
"tuple",
",",
"type",
"(",
"None",
")",
",",
"integer_types",
")",
",",
"'RNNCell'",
")",
"if",
"isinstance",
"(",
"shape",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"shapes",
"=",
"map_structure",
"(",
"lambda",
"x",
":",
"x",
",",
"shape",
")",
"if",
"isinstance",
"(",
"shape",
",",
"list",
")",
":",
"for",
"i",
",",
"_shape",
"in",
"enumerate",
"(",
"shapes",
")",
":",
"check_type",
"(",
"_shape",
",",
"'shapes['",
"+",
"str",
"(",
"i",
")",
"+",
"']'",
",",
"integer_types",
",",
"'RNNCell'",
")",
"else",
":",
"check_type",
"(",
"shapes",
",",
"'shapes'",
",",
"integer_types",
",",
"'RNNCell'",
")",
"check_dtype",
"(",
"dtype",
",",
"'dtype'",
",",
"[",
"'float32'",
",",
"'float64'",
"]",
",",
"'RNNCell'",
")",
"# TODO: use inputs and batch_size",
"batch_ref",
"=",
"flatten",
"(",
"batch_ref",
")",
"[",
"0",
"]",
"def",
"_is_shape_sequence",
"(",
"seq",
")",
":",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
")",
":",
"integer_types",
"=",
"(",
"int",
",",
"long",
",",
")",
"else",
":",
"integer_types",
"=",
"(",
"int",
",",
")",
"\"\"\"For shape, list/tuple of integer is the finest-grained objection\"\"\"",
"if",
"(",
"isinstance",
"(",
"seq",
",",
"list",
")",
"or",
"isinstance",
"(",
"seq",
",",
"tuple",
")",
")",
":",
"if",
"reduce",
"(",
"lambda",
"flag",
",",
"x",
":",
"isinstance",
"(",
"x",
",",
"integer_types",
")",
"and",
"flag",
",",
"seq",
",",
"True",
")",
":",
"return",
"False",
"# TODO: Add check for the illegal",
"if",
"isinstance",
"(",
"seq",
",",
"dict",
")",
":",
"return",
"True",
"return",
"(",
"isinstance",
"(",
"seq",
",",
"collections",
".",
"Sequence",
")",
"and",
"not",
"isinstance",
"(",
"seq",
",",
"six",
".",
"string_types",
")",
")",
"class",
"Shape",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"shape",
")",
":",
"self",
".",
"shape",
"=",
"shape",
"if",
"shape",
"[",
"0",
"]",
"==",
"-",
"1",
"else",
"(",
"[",
"-",
"1",
"]",
"+",
"list",
"(",
"shape",
")",
")",
"# nested structure of shapes",
"states_shapes",
"=",
"self",
".",
"state_shape",
"if",
"shape",
"is",
"None",
"else",
"shape",
"is_sequence_ori",
"=",
"utils",
".",
"is_sequence",
"utils",
".",
"is_sequence",
"=",
"_is_shape_sequence",
"states_shapes",
"=",
"map_structure",
"(",
"lambda",
"shape",
":",
"Shape",
"(",
"shape",
")",
",",
"states_shapes",
")",
"utils",
".",
"is_sequence",
"=",
"is_sequence_ori",
"# nested structure of dtypes",
"try",
":",
"states_dtypes",
"=",
"self",
".",
"state_dtype",
"if",
"dtype",
"is",
"None",
"else",
"dtype",
"except",
"NotImplementedError",
":",
"# use fp32 as default",
"states_dtypes",
"=",
"\"float32\"",
"if",
"len",
"(",
"flatten",
"(",
"states_dtypes",
")",
")",
"==",
"1",
":",
"dtype",
"=",
"flatten",
"(",
"states_dtypes",
")",
"[",
"0",
"]",
"states_dtypes",
"=",
"map_structure",
"(",
"lambda",
"shape",
":",
"dtype",
",",
"states_shapes",
")",
"init_states",
"=",
"map_structure",
"(",
"lambda",
"shape",
",",
"dtype",
":",
"tensor",
".",
"fill_constant_batch_size_like",
"(",
"input",
"=",
"batch_ref",
",",
"shape",
"=",
"shape",
".",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"value",
"=",
"init_value",
",",
"input_dim_idx",
"=",
"batch_dim_idx",
")",
",",
"states_shapes",
",",
"states_dtypes",
")",
"return",
"init_states"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/layers/rnn.py#L96-L196 |
|
facebookincubator/profilo | d3a275d0e7897cc4e3507d543459f3227e85c67f | deps/fmt/doc/build.py | python | Pip.install | (self, package, commit=None) | Install package using pip. | Install package using pip. | [
"Install",
"package",
"using",
"pip",
"."
] | def install(self, package, commit=None):
"Install package using pip."
if commit:
package = 'git+https://github.com/{0}.git@{1}'.format(package, commit)
print('Installing {0}'.format(package))
check_call([self.path, 'install', package]) | [
"def",
"install",
"(",
"self",
",",
"package",
",",
"commit",
"=",
"None",
")",
":",
"if",
"commit",
":",
"package",
"=",
"'git+https://github.com/{0}.git@{1}'",
".",
"format",
"(",
"package",
",",
"commit",
")",
"print",
"(",
"'Installing {0}'",
".",
"format",
"(",
"package",
")",
")",
"check_call",
"(",
"[",
"self",
".",
"path",
",",
"'install'",
",",
"package",
"]",
")"
] | https://github.com/facebookincubator/profilo/blob/d3a275d0e7897cc4e3507d543459f3227e85c67f/deps/fmt/doc/build.py#L13-L18 |
||
cornell-zhang/heterocl | 6d9e4b4acc2ee2707b2d25b27298c0335bccedfd | python/heterocl/tvm/api.py | python | max_value | (dtype) | return _api_internal._max_value(dtype) | maximum value of dtype | maximum value of dtype | [
"maximum",
"value",
"of",
"dtype"
] | def max_value(dtype):
"""maximum value of dtype"""
return _api_internal._max_value(dtype) | [
"def",
"max_value",
"(",
"dtype",
")",
":",
"return",
"_api_internal",
".",
"_max_value",
"(",
"dtype",
")"
] | https://github.com/cornell-zhang/heterocl/blob/6d9e4b4acc2ee2707b2d25b27298c0335bccedfd/python/heterocl/tvm/api.py#L33-L35 |
|
google/tink | 59bb34495d1cb8f9d9dbc0f0a52c4f9e21491a14 | python/tink/core/_primitive_wrapper.py | python | PrimitiveWrapper.primitive_class | (self) | Returns the class of the primitive produced by the wrapper. | Returns the class of the primitive produced by the wrapper. | [
"Returns",
"the",
"class",
"of",
"the",
"primitive",
"produced",
"by",
"the",
"wrapper",
"."
] | def primitive_class(self) -> Type[P]:
"""Returns the class of the primitive produced by the wrapper."""
raise NotImplementedError() | [
"def",
"primitive_class",
"(",
"self",
")",
"->",
"Type",
"[",
"P",
"]",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/google/tink/blob/59bb34495d1cb8f9d9dbc0f0a52c4f9e21491a14/python/tink/core/_primitive_wrapper.py#L45-L47 |
||
OGRECave/ogre-next | 287307980e6de8910f04f3cc0994451b075071fd | Tools/Wings3DExporter/vector.py | python | Vector.__xor__ | (self, other) | return self.cross(other) | 3d cross product | 3d cross product | [
"3d",
"cross",
"product"
] | def __xor__(self, other):
"3d cross product"
return self.cross(other) | [
"def",
"__xor__",
"(",
"self",
",",
"other",
")",
":",
"return",
"self",
".",
"cross",
"(",
"other",
")"
] | https://github.com/OGRECave/ogre-next/blob/287307980e6de8910f04f3cc0994451b075071fd/Tools/Wings3DExporter/vector.py#L43-L45 |
|
openvinotoolkit/openvino | dedcbeafa8b84cccdc55ca64b8da516682b381c7 | tools/mo/openvino/tools/mo/ops/If.py | python | If.update_if_output_ports_shape | (if_node: Node) | Update shape and values for If output ports.
:param if_node: The If node to update output ports and shapes
:return: None | Update shape and values for If output ports. | [
"Update",
"shape",
"and",
"values",
"for",
"If",
"output",
"ports",
"."
] | def update_if_output_ports_shape(if_node: Node):
"""
Update shape and values for If output ports.
:param if_node: The If node to update output ports and shapes
:return: None
"""
node_name = if_node.soft_get('name', if_node.id)
then_outputs = [node for node in if_node.then_graph.get_op_nodes() if node.has('output_id')]
else_outputs = [node for node in if_node.else_graph.get_op_nodes() if node.has('output_id')]
outputs_mapping = {}
outputs_number = len(if_node.out_ports())
if outputs_number == 0 and len(if_node.out_ports(control_flow=True)) != 0:
# Some models have if with control flow outputs.
# These shape inference for such ifs
# TODO: need to rethink and redo support for control flow edges in if operation
for node in if_node.out_nodes(control_flow=True).values():
node.shape = int64_array([])
return
for port_id in if_node.out_ports().keys():
outputs_mapping[port_id] = {}
# variables then_contains_fake_outputs/else_contains_fake_outputs contains True value
# if all outputs from then_body/else_body have shape [0]. It means then_body/else_body does not return data
# and further shape_inference for this branch is not possible.
# TODO: exclude support fake_outputs from this code when we will support shape_inference with empty tensors
then_contains_fake_outputs = \
If.results_mapping_and_finding_fake_outputs(then_outputs, 'then_graph', outputs_mapping)
else_contains_fake_outputs = \
If.results_mapping_and_finding_fake_outputs(else_outputs, 'else_graph', outputs_mapping)
# use_then_shape is True when else_body or when both bodies do not return data. If use_then_shape is True If's
# outputs will have the same shapes as then_body results
use_then_shape = else_contains_fake_outputs or not then_contains_fake_outputs
cond_value = if_node.in_port(0).data.get_value()
for port_id in outputs_mapping:
then_else_nodes = outputs_mapping[port_id]
assert 'then_graph' in then_else_nodes.keys(), 'then_graph does not connect with If.out_port[{0}] ' \
'in {1} node!'.format(port_id, node_name)
assert 'else_graph' in then_else_nodes.keys(), 'else_graph does not connect with If.out_port[{0}] ' \
'in {1} node!'.format(port_id, node_name)
then_shape = then_else_nodes['then_graph'].in_port(0).data.get_shape()
then_value = then_else_nodes['then_graph'].in_port(0).data.get_value()
else_shape = then_else_nodes['else_graph'].in_port(0).data.get_shape()
else_value = then_else_nodes['else_graph'].in_port(0).data.get_value()
if is_fully_defined(cond_value):
if cond_value.item() is True:
if then_value is not None:
if_node.out_port(port_id).data.set_value(then_value)
else:
if_node.out_port(port_id).data.set_shape(then_shape)
else:
if else_value is not None:
if_node.out_port(port_id).data.set_value(else_value)
else:
if_node.out_port(port_id).data.set_shape(else_shape)
else:
if then_contains_fake_outputs ^ else_contains_fake_outputs:
# if exactly one of the outputs is fake then use another one
if_node.out_port(port_id).data.set_shape(then_shape if use_then_shape else else_shape)
else:
# find "intersection" which is equal to the dimension value if corresponding dimensions are equal
# and dynamic otherwise
assert len(then_shape) == len(else_shape), 'Ranks of "then" and "else" output tensors are ' \
'different for node {} for port {}'.format(node_name,
port_id)
output_shape = [d1 if is_fully_defined(d1) and is_fully_defined(d2) and d1 == d2 else
dynamic_dimension_value for d1, d2 in zip(then_shape, else_shape)]
if_node.out_port(port_id).data.set_shape(output_shape) | [
"def",
"update_if_output_ports_shape",
"(",
"if_node",
":",
"Node",
")",
":",
"node_name",
"=",
"if_node",
".",
"soft_get",
"(",
"'name'",
",",
"if_node",
".",
"id",
")",
"then_outputs",
"=",
"[",
"node",
"for",
"node",
"in",
"if_node",
".",
"then_graph",
".",
"get_op_nodes",
"(",
")",
"if",
"node",
".",
"has",
"(",
"'output_id'",
")",
"]",
"else_outputs",
"=",
"[",
"node",
"for",
"node",
"in",
"if_node",
".",
"else_graph",
".",
"get_op_nodes",
"(",
")",
"if",
"node",
".",
"has",
"(",
"'output_id'",
")",
"]",
"outputs_mapping",
"=",
"{",
"}",
"outputs_number",
"=",
"len",
"(",
"if_node",
".",
"out_ports",
"(",
")",
")",
"if",
"outputs_number",
"==",
"0",
"and",
"len",
"(",
"if_node",
".",
"out_ports",
"(",
"control_flow",
"=",
"True",
")",
")",
"!=",
"0",
":",
"# Some models have if with control flow outputs.",
"# These shape inference for such ifs",
"# TODO: need to rethink and redo support for control flow edges in if operation",
"for",
"node",
"in",
"if_node",
".",
"out_nodes",
"(",
"control_flow",
"=",
"True",
")",
".",
"values",
"(",
")",
":",
"node",
".",
"shape",
"=",
"int64_array",
"(",
"[",
"]",
")",
"return",
"for",
"port_id",
"in",
"if_node",
".",
"out_ports",
"(",
")",
".",
"keys",
"(",
")",
":",
"outputs_mapping",
"[",
"port_id",
"]",
"=",
"{",
"}",
"# variables then_contains_fake_outputs/else_contains_fake_outputs contains True value",
"# if all outputs from then_body/else_body have shape [0]. It means then_body/else_body does not return data",
"# and further shape_inference for this branch is not possible.",
"# TODO: exclude support fake_outputs from this code when we will support shape_inference with empty tensors",
"then_contains_fake_outputs",
"=",
"If",
".",
"results_mapping_and_finding_fake_outputs",
"(",
"then_outputs",
",",
"'then_graph'",
",",
"outputs_mapping",
")",
"else_contains_fake_outputs",
"=",
"If",
".",
"results_mapping_and_finding_fake_outputs",
"(",
"else_outputs",
",",
"'else_graph'",
",",
"outputs_mapping",
")",
"# use_then_shape is True when else_body or when both bodies do not return data. If use_then_shape is True If's",
"# outputs will have the same shapes as then_body results",
"use_then_shape",
"=",
"else_contains_fake_outputs",
"or",
"not",
"then_contains_fake_outputs",
"cond_value",
"=",
"if_node",
".",
"in_port",
"(",
"0",
")",
".",
"data",
".",
"get_value",
"(",
")",
"for",
"port_id",
"in",
"outputs_mapping",
":",
"then_else_nodes",
"=",
"outputs_mapping",
"[",
"port_id",
"]",
"assert",
"'then_graph'",
"in",
"then_else_nodes",
".",
"keys",
"(",
")",
",",
"'then_graph does not connect with If.out_port[{0}] '",
"'in {1} node!'",
".",
"format",
"(",
"port_id",
",",
"node_name",
")",
"assert",
"'else_graph'",
"in",
"then_else_nodes",
".",
"keys",
"(",
")",
",",
"'else_graph does not connect with If.out_port[{0}] '",
"'in {1} node!'",
".",
"format",
"(",
"port_id",
",",
"node_name",
")",
"then_shape",
"=",
"then_else_nodes",
"[",
"'then_graph'",
"]",
".",
"in_port",
"(",
"0",
")",
".",
"data",
".",
"get_shape",
"(",
")",
"then_value",
"=",
"then_else_nodes",
"[",
"'then_graph'",
"]",
".",
"in_port",
"(",
"0",
")",
".",
"data",
".",
"get_value",
"(",
")",
"else_shape",
"=",
"then_else_nodes",
"[",
"'else_graph'",
"]",
".",
"in_port",
"(",
"0",
")",
".",
"data",
".",
"get_shape",
"(",
")",
"else_value",
"=",
"then_else_nodes",
"[",
"'else_graph'",
"]",
".",
"in_port",
"(",
"0",
")",
".",
"data",
".",
"get_value",
"(",
")",
"if",
"is_fully_defined",
"(",
"cond_value",
")",
":",
"if",
"cond_value",
".",
"item",
"(",
")",
"is",
"True",
":",
"if",
"then_value",
"is",
"not",
"None",
":",
"if_node",
".",
"out_port",
"(",
"port_id",
")",
".",
"data",
".",
"set_value",
"(",
"then_value",
")",
"else",
":",
"if_node",
".",
"out_port",
"(",
"port_id",
")",
".",
"data",
".",
"set_shape",
"(",
"then_shape",
")",
"else",
":",
"if",
"else_value",
"is",
"not",
"None",
":",
"if_node",
".",
"out_port",
"(",
"port_id",
")",
".",
"data",
".",
"set_value",
"(",
"else_value",
")",
"else",
":",
"if_node",
".",
"out_port",
"(",
"port_id",
")",
".",
"data",
".",
"set_shape",
"(",
"else_shape",
")",
"else",
":",
"if",
"then_contains_fake_outputs",
"^",
"else_contains_fake_outputs",
":",
"# if exactly one of the outputs is fake then use another one",
"if_node",
".",
"out_port",
"(",
"port_id",
")",
".",
"data",
".",
"set_shape",
"(",
"then_shape",
"if",
"use_then_shape",
"else",
"else_shape",
")",
"else",
":",
"# find \"intersection\" which is equal to the dimension value if corresponding dimensions are equal",
"# and dynamic otherwise",
"assert",
"len",
"(",
"then_shape",
")",
"==",
"len",
"(",
"else_shape",
")",
",",
"'Ranks of \"then\" and \"else\" output tensors are '",
"'different for node {} for port {}'",
".",
"format",
"(",
"node_name",
",",
"port_id",
")",
"output_shape",
"=",
"[",
"d1",
"if",
"is_fully_defined",
"(",
"d1",
")",
"and",
"is_fully_defined",
"(",
"d2",
")",
"and",
"d1",
"==",
"d2",
"else",
"dynamic_dimension_value",
"for",
"d1",
",",
"d2",
"in",
"zip",
"(",
"then_shape",
",",
"else_shape",
")",
"]",
"if_node",
".",
"out_port",
"(",
"port_id",
")",
".",
"data",
".",
"set_shape",
"(",
"output_shape",
")"
] | https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/mo/openvino/tools/mo/ops/If.py#L144-L220 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_controls.py | python | Slider.GetSelStart | (*args, **kwargs) | return _controls_.Slider_GetSelStart(*args, **kwargs) | GetSelStart(self) -> int | GetSelStart(self) -> int | [
"GetSelStart",
"(",
"self",
")",
"-",
">",
"int"
] | def GetSelStart(*args, **kwargs):
"""GetSelStart(self) -> int"""
return _controls_.Slider_GetSelStart(*args, **kwargs) | [
"def",
"GetSelStart",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"Slider_GetSelStart",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L2919-L2921 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tpu_embedding.py | python | TPUEmbedding.generate_send_gradients_op | (self,
feature_to_gradient_dict,
learning_rates=None) | return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients,
learning_rates=[
learning_rates[tag] for tag in self._learning_rate_keys
],
config=self.config_proto.SerializeToString()) | Send gradient to TPU embedding.
Args:
feature_to_gradient_dict: dict mapping feature names to gradient wrt
activations.
learning_rates: dict mapping from learning rate key to dynamic learning
rate. Defaults to `None`.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
RuntimeError: If `mode` is not `TRAINING`. | Send gradient to TPU embedding. | [
"Send",
"gradient",
"to",
"TPU",
"embedding",
"."
] | def generate_send_gradients_op(self,
feature_to_gradient_dict,
learning_rates=None):
"""Send gradient to TPU embedding.
Args:
feature_to_gradient_dict: dict mapping feature names to gradient wrt
activations.
learning_rates: dict mapping from learning rate key to dynamic learning
rate. Defaults to `None`.
Returns:
SendTPUEmbeddingGradients Op.
Raises:
RuntimeError: If `mode` is not `TRAINING`.
"""
if self._mode != TRAINING:
raise RuntimeError('Only in training mode gradients need to '
'be sent to TPU embedding; got mode {}.'
.format(self._mode))
if learning_rates is None:
learning_rates = dict()
gradients = []
for table in self._table_to_features_dict:
features = self._table_to_features_dict[table]
table_gradients = []
for feature in features:
gradient = feature_to_gradient_dict[feature]
# Expand dims for non-sequence feature to match sequence features.
if gradient.shape.ndims == 2:
gradient = array_ops.expand_dims(gradient, 1)
table_gradients.append(gradient)
interleaved_table_grads = array_ops.reshape(
array_ops.concat(table_gradients, axis=1),
[-1, array_ops.shape(table_gradients[0])[-1]])
gradients.append(interleaved_table_grads)
return tpu_ops.send_tpu_embedding_gradients(
inputs=gradients,
learning_rates=[
learning_rates[tag] for tag in self._learning_rate_keys
],
config=self.config_proto.SerializeToString()) | [
"def",
"generate_send_gradients_op",
"(",
"self",
",",
"feature_to_gradient_dict",
",",
"learning_rates",
"=",
"None",
")",
":",
"if",
"self",
".",
"_mode",
"!=",
"TRAINING",
":",
"raise",
"RuntimeError",
"(",
"'Only in training mode gradients need to '",
"'be sent to TPU embedding; got mode {}.'",
".",
"format",
"(",
"self",
".",
"_mode",
")",
")",
"if",
"learning_rates",
"is",
"None",
":",
"learning_rates",
"=",
"dict",
"(",
")",
"gradients",
"=",
"[",
"]",
"for",
"table",
"in",
"self",
".",
"_table_to_features_dict",
":",
"features",
"=",
"self",
".",
"_table_to_features_dict",
"[",
"table",
"]",
"table_gradients",
"=",
"[",
"]",
"for",
"feature",
"in",
"features",
":",
"gradient",
"=",
"feature_to_gradient_dict",
"[",
"feature",
"]",
"# Expand dims for non-sequence feature to match sequence features.",
"if",
"gradient",
".",
"shape",
".",
"ndims",
"==",
"2",
":",
"gradient",
"=",
"array_ops",
".",
"expand_dims",
"(",
"gradient",
",",
"1",
")",
"table_gradients",
".",
"append",
"(",
"gradient",
")",
"interleaved_table_grads",
"=",
"array_ops",
".",
"reshape",
"(",
"array_ops",
".",
"concat",
"(",
"table_gradients",
",",
"axis",
"=",
"1",
")",
",",
"[",
"-",
"1",
",",
"array_ops",
".",
"shape",
"(",
"table_gradients",
"[",
"0",
"]",
")",
"[",
"-",
"1",
"]",
"]",
")",
"gradients",
".",
"append",
"(",
"interleaved_table_grads",
")",
"return",
"tpu_ops",
".",
"send_tpu_embedding_gradients",
"(",
"inputs",
"=",
"gradients",
",",
"learning_rates",
"=",
"[",
"learning_rates",
"[",
"tag",
"]",
"for",
"tag",
"in",
"self",
".",
"_learning_rate_keys",
"]",
",",
"config",
"=",
"self",
".",
"config_proto",
".",
"SerializeToString",
"(",
")",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/tpu_embedding.py#L1005-L1050 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/masked/maskededit.py | python | MaskedEditMixin._findNextTemplateChar | (self, pos) | return pos | Find the position of the next non-editable character in the mask. | Find the position of the next non-editable character in the mask. | [
"Find",
"the",
"position",
"of",
"the",
"next",
"non",
"-",
"editable",
"character",
"in",
"the",
"mask",
"."
] | def _findNextTemplateChar(self, pos):
""" Find the position of the next non-editable character in the mask."""
while not self._isTemplateChar(pos) and pos < self._masklength:
pos += 1
return pos | [
"def",
"_findNextTemplateChar",
"(",
"self",
",",
"pos",
")",
":",
"while",
"not",
"self",
".",
"_isTemplateChar",
"(",
"pos",
")",
"and",
"pos",
"<",
"self",
".",
"_masklength",
":",
"pos",
"+=",
"1",
"return",
"pos"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/masked/maskededit.py#L4119-L4123 |
|
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/idl/idl/generator.py | python | _CppSourceFileWriter.gen_config_options | (self, spec, header_file_name) | Generate Config Option instances. | Generate Config Option instances. | [
"Generate",
"Config",
"Option",
"instances",
"."
] | def gen_config_options(self, spec, header_file_name):
# type: (ast.IDLAST, str) -> None
"""Generate Config Option instances."""
# pylint: disable=too-many-branches,too-many-statements
has_storage_targets = False
for opt in spec.configs:
if opt.cpp_varname is not None:
has_storage_targets = True
if opt.cpp_vartype is not None:
with self._condition(opt.condition, preprocessor_only=True):
init = ('{%s}' % (opt.default.expr)) if opt.default else ''
self._writer.write_line(
'%s %s%s;' % (opt.cpp_vartype, opt.cpp_varname, init))
self.write_empty_line()
root_opts = [] # type: List[ast.ConfigOption]
sections = {} # type: Dict[str, List[ast.ConfigOption]]
for opt in spec.configs:
if opt.section:
try:
sections[opt.section].append(opt)
except KeyError:
sections[opt.section] = [opt]
else:
root_opts.append(opt)
initializer = spec.globals.configs and spec.globals.configs.initializer
# pylint: disable=consider-using-ternary
blockname = (initializer and initializer.name) or (
'idl_' + hashlib.sha1(header_file_name.encode()).hexdigest())
if initializer and initializer.register:
with self._block(
'Status %s(optionenvironment::OptionSection* options_ptr) {' %
initializer.register, '}'):
self._writer.write_line('auto& options = *options_ptr;')
self._gen_config_options_register(root_opts, sections, True)
else:
with self.gen_namespace_block(''):
with self._block(
'MONGO_MODULE_STARTUP_OPTIONS_REGISTER(%s)(InitializerContext*) {' %
(blockname), '}'):
self._writer.write_line('auto& options = optionenvironment::startupOptions;')
self._gen_config_options_register(root_opts, sections, False)
self.write_empty_line()
if has_storage_targets:
if initializer and initializer.store:
with self._block(
'Status %s(const optionenvironment::Environment& params) {' %
initializer.store, '}'):
self._gen_config_options_store(spec.configs, True)
else:
with self.gen_namespace_block(''):
with self._block(
'MONGO_STARTUP_OPTIONS_STORE(%s)(InitializerContext*) {' % (blockname),
'}'):
# If all options are guarded by non-passing #ifdefs, then params will be unused.
self._writer.write_line(
'[[maybe_unused]] const auto& params = optionenvironment::startupOptionsParsed;'
)
self._gen_config_options_store(spec.configs, False)
self.write_empty_line() | [
"def",
"gen_config_options",
"(",
"self",
",",
"spec",
",",
"header_file_name",
")",
":",
"# type: (ast.IDLAST, str) -> None",
"# pylint: disable=too-many-branches,too-many-statements",
"has_storage_targets",
"=",
"False",
"for",
"opt",
"in",
"spec",
".",
"configs",
":",
"if",
"opt",
".",
"cpp_varname",
"is",
"not",
"None",
":",
"has_storage_targets",
"=",
"True",
"if",
"opt",
".",
"cpp_vartype",
"is",
"not",
"None",
":",
"with",
"self",
".",
"_condition",
"(",
"opt",
".",
"condition",
",",
"preprocessor_only",
"=",
"True",
")",
":",
"init",
"=",
"(",
"'{%s}'",
"%",
"(",
"opt",
".",
"default",
".",
"expr",
")",
")",
"if",
"opt",
".",
"default",
"else",
"''",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'%s %s%s;'",
"%",
"(",
"opt",
".",
"cpp_vartype",
",",
"opt",
".",
"cpp_varname",
",",
"init",
")",
")",
"self",
".",
"write_empty_line",
"(",
")",
"root_opts",
"=",
"[",
"]",
"# type: List[ast.ConfigOption]",
"sections",
"=",
"{",
"}",
"# type: Dict[str, List[ast.ConfigOption]]",
"for",
"opt",
"in",
"spec",
".",
"configs",
":",
"if",
"opt",
".",
"section",
":",
"try",
":",
"sections",
"[",
"opt",
".",
"section",
"]",
".",
"append",
"(",
"opt",
")",
"except",
"KeyError",
":",
"sections",
"[",
"opt",
".",
"section",
"]",
"=",
"[",
"opt",
"]",
"else",
":",
"root_opts",
".",
"append",
"(",
"opt",
")",
"initializer",
"=",
"spec",
".",
"globals",
".",
"configs",
"and",
"spec",
".",
"globals",
".",
"configs",
".",
"initializer",
"# pylint: disable=consider-using-ternary",
"blockname",
"=",
"(",
"initializer",
"and",
"initializer",
".",
"name",
")",
"or",
"(",
"'idl_'",
"+",
"hashlib",
".",
"sha1",
"(",
"header_file_name",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
")",
"if",
"initializer",
"and",
"initializer",
".",
"register",
":",
"with",
"self",
".",
"_block",
"(",
"'Status %s(optionenvironment::OptionSection* options_ptr) {'",
"%",
"initializer",
".",
"register",
",",
"'}'",
")",
":",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'auto& options = *options_ptr;'",
")",
"self",
".",
"_gen_config_options_register",
"(",
"root_opts",
",",
"sections",
",",
"True",
")",
"else",
":",
"with",
"self",
".",
"gen_namespace_block",
"(",
"''",
")",
":",
"with",
"self",
".",
"_block",
"(",
"'MONGO_MODULE_STARTUP_OPTIONS_REGISTER(%s)(InitializerContext*) {'",
"%",
"(",
"blockname",
")",
",",
"'}'",
")",
":",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'auto& options = optionenvironment::startupOptions;'",
")",
"self",
".",
"_gen_config_options_register",
"(",
"root_opts",
",",
"sections",
",",
"False",
")",
"self",
".",
"write_empty_line",
"(",
")",
"if",
"has_storage_targets",
":",
"if",
"initializer",
"and",
"initializer",
".",
"store",
":",
"with",
"self",
".",
"_block",
"(",
"'Status %s(const optionenvironment::Environment& params) {'",
"%",
"initializer",
".",
"store",
",",
"'}'",
")",
":",
"self",
".",
"_gen_config_options_store",
"(",
"spec",
".",
"configs",
",",
"True",
")",
"else",
":",
"with",
"self",
".",
"gen_namespace_block",
"(",
"''",
")",
":",
"with",
"self",
".",
"_block",
"(",
"'MONGO_STARTUP_OPTIONS_STORE(%s)(InitializerContext*) {'",
"%",
"(",
"blockname",
")",
",",
"'}'",
")",
":",
"# If all options are guarded by non-passing #ifdefs, then params will be unused.",
"self",
".",
"_writer",
".",
"write_line",
"(",
"'[[maybe_unused]] const auto& params = optionenvironment::startupOptionsParsed;'",
")",
"self",
".",
"_gen_config_options_store",
"(",
"spec",
".",
"configs",
",",
"False",
")",
"self",
".",
"write_empty_line",
"(",
")"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/idl/generator.py#L2524-L2592 |
||
microsoft/onnxruntime | f92e47e95b13a240e37caf7b36577983544f98fc | orttraining/orttraining/python/training/ortmodule/_io.py | python | _parse_outputs_and_extract_names_and_dynamic_axes | (module_output) | return output_names, output_dynamic_axes | Parses through the module output and returns output names and dynamic axes | Parses through the module output and returns output names and dynamic axes | [
"Parses",
"through",
"the",
"module",
"output",
"and",
"returns",
"output",
"names",
"and",
"dynamic",
"axes"
] | def _parse_outputs_and_extract_names_and_dynamic_axes(module_output):
"""Parses through the module output and returns output names and dynamic axes"""
def _populate_output_names_and_dynamic_axes(output, output_names, output_dynamic_axes, output_idx):
# Depth first traversal to traverse through the entire output collecting output names and dynamic axes
if output is None:
return
elif isinstance(output, torch.Tensor):
# Naming the outputs with a hyphen ensures that there can be no input with the same
# name, preventing collisions with other NodeArgs (for example an input to forward called output0)
output_name = f'output-{output_idx[0]}'
output_idx[0] += 1
output_names.append(output_name)
output_dynamic_axes[output_name] = {}
for dim_idx in range(len(output.shape)):
output_dynamic_axes[output_name].update({dim_idx: f'{output_name}_dim{dim_idx}'})
return
if isinstance(output, abc.Sequence):
for value in output:
_populate_output_names_and_dynamic_axes(value, output_names, output_dynamic_axes, output_idx)
elif isinstance(output, abc.Mapping):
for _, value in sorted(output.items()):
_populate_output_names_and_dynamic_axes(value, output_names, output_dynamic_axes, output_idx)
else:
raise wrap_exception(ORTModuleIOError,
TypeError(f'ORTModule does not support the following model output type {type(output)}'))
output_names = []
output_dynamic_axes = {}
output_idx = [0]
_populate_output_names_and_dynamic_axes(module_output, output_names, output_dynamic_axes, output_idx)
return output_names, output_dynamic_axes | [
"def",
"_parse_outputs_and_extract_names_and_dynamic_axes",
"(",
"module_output",
")",
":",
"def",
"_populate_output_names_and_dynamic_axes",
"(",
"output",
",",
"output_names",
",",
"output_dynamic_axes",
",",
"output_idx",
")",
":",
"# Depth first traversal to traverse through the entire output collecting output names and dynamic axes",
"if",
"output",
"is",
"None",
":",
"return",
"elif",
"isinstance",
"(",
"output",
",",
"torch",
".",
"Tensor",
")",
":",
"# Naming the outputs with a hyphen ensures that there can be no input with the same",
"# name, preventing collisions with other NodeArgs (for example an input to forward called output0)",
"output_name",
"=",
"f'output-{output_idx[0]}'",
"output_idx",
"[",
"0",
"]",
"+=",
"1",
"output_names",
".",
"append",
"(",
"output_name",
")",
"output_dynamic_axes",
"[",
"output_name",
"]",
"=",
"{",
"}",
"for",
"dim_idx",
"in",
"range",
"(",
"len",
"(",
"output",
".",
"shape",
")",
")",
":",
"output_dynamic_axes",
"[",
"output_name",
"]",
".",
"update",
"(",
"{",
"dim_idx",
":",
"f'{output_name}_dim{dim_idx}'",
"}",
")",
"return",
"if",
"isinstance",
"(",
"output",
",",
"abc",
".",
"Sequence",
")",
":",
"for",
"value",
"in",
"output",
":",
"_populate_output_names_and_dynamic_axes",
"(",
"value",
",",
"output_names",
",",
"output_dynamic_axes",
",",
"output_idx",
")",
"elif",
"isinstance",
"(",
"output",
",",
"abc",
".",
"Mapping",
")",
":",
"for",
"_",
",",
"value",
"in",
"sorted",
"(",
"output",
".",
"items",
"(",
")",
")",
":",
"_populate_output_names_and_dynamic_axes",
"(",
"value",
",",
"output_names",
",",
"output_dynamic_axes",
",",
"output_idx",
")",
"else",
":",
"raise",
"wrap_exception",
"(",
"ORTModuleIOError",
",",
"TypeError",
"(",
"f'ORTModule does not support the following model output type {type(output)}'",
")",
")",
"output_names",
"=",
"[",
"]",
"output_dynamic_axes",
"=",
"{",
"}",
"output_idx",
"=",
"[",
"0",
"]",
"_populate_output_names_and_dynamic_axes",
"(",
"module_output",
",",
"output_names",
",",
"output_dynamic_axes",
",",
"output_idx",
")",
"return",
"output_names",
",",
"output_dynamic_axes"
] | https://github.com/microsoft/onnxruntime/blob/f92e47e95b13a240e37caf7b36577983544f98fc/orttraining/orttraining/python/training/ortmodule/_io.py#L356-L390 |
|
google-coral/edgetpu | 5020de9386ff370dcc1f63291a2d0f98eeb98adb | edgetpu/basic/basic_engine.py | python | BasicEngine.get_input_tensor_shape | (self) | return self._engine.get_input_tensor_shape() | Gets the shape required for the input tensor.
For models trained for image classification / detection, the shape is always
[1, height, width, channels]. To be used as input for :func:`run_inference`,
this tensor shape must be flattened into a 1-D array with size ``height *
width * channels``. To instead get that 1-D array size, use
:func:`required_input_array_size`.
Returns:
A 1-D array (:obj:`numpy.ndarray`) representing the required input tensor
shape. | Gets the shape required for the input tensor. | [
"Gets",
"the",
"shape",
"required",
"for",
"the",
"input",
"tensor",
"."
] | def get_input_tensor_shape(self):
"""Gets the shape required for the input tensor.
For models trained for image classification / detection, the shape is always
[1, height, width, channels]. To be used as input for :func:`run_inference`,
this tensor shape must be flattened into a 1-D array with size ``height *
width * channels``. To instead get that 1-D array size, use
:func:`required_input_array_size`.
Returns:
A 1-D array (:obj:`numpy.ndarray`) representing the required input tensor
shape.
"""
return self._engine.get_input_tensor_shape() | [
"def",
"get_input_tensor_shape",
"(",
"self",
")",
":",
"return",
"self",
".",
"_engine",
".",
"get_input_tensor_shape",
"(",
")"
] | https://github.com/google-coral/edgetpu/blob/5020de9386ff370dcc1f63291a2d0f98eeb98adb/edgetpu/basic/basic_engine.py#L140-L153 |
|
psnonis/FinBERT | c0c555d833a14e2316a3701e59c0b5156f804b4e | bert-gpu/run_classifier.py | python | file_based_input_fn_builder | (input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None) | return input_fn | Creates an `input_fn` closure to be passed to Estimator. | Creates an `input_fn` closure to be passed to Estimator. | [
"Creates",
"an",
"input_fn",
"closure",
"to",
"be",
"passed",
"to",
"Estimator",
"."
] | def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn | [
"def",
"file_based_input_fn_builder",
"(",
"input_file",
",",
"batch_size",
",",
"seq_length",
",",
"is_training",
",",
"drop_remainder",
",",
"hvd",
"=",
"None",
")",
":",
"name_to_features",
"=",
"{",
"\"input_ids\"",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"seq_length",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"input_mask\"",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"seq_length",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"segment_ids\"",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"seq_length",
"]",
",",
"tf",
".",
"int64",
")",
",",
"\"label_ids\"",
":",
"tf",
".",
"FixedLenFeature",
"(",
"[",
"]",
",",
"tf",
".",
"int64",
")",
",",
"}",
"def",
"_decode_record",
"(",
"record",
",",
"name_to_features",
")",
":",
"\"\"\"Decodes a record to a TensorFlow example.\"\"\"",
"example",
"=",
"tf",
".",
"parse_single_example",
"(",
"record",
",",
"name_to_features",
")",
"# tf.Example only supports tf.int64, but the TPU only supports tf.int32.",
"# So cast all int64 to int32.",
"for",
"name",
"in",
"list",
"(",
"example",
".",
"keys",
"(",
")",
")",
":",
"t",
"=",
"example",
"[",
"name",
"]",
"if",
"t",
".",
"dtype",
"==",
"tf",
".",
"int64",
":",
"t",
"=",
"tf",
".",
"to_int32",
"(",
"t",
")",
"example",
"[",
"name",
"]",
"=",
"t",
"return",
"example",
"def",
"input_fn",
"(",
")",
":",
"\"\"\"The actual input function.\"\"\"",
"# For training, we want a lot of parallel reading and shuffling.",
"# For eval, we want no shuffling and parallel reading doesn't matter.",
"d",
"=",
"tf",
".",
"data",
".",
"TFRecordDataset",
"(",
"input_file",
")",
"if",
"is_training",
":",
"if",
"hvd",
"is",
"not",
"None",
":",
"d",
"=",
"d",
".",
"shard",
"(",
"hvd",
".",
"size",
"(",
")",
",",
"hvd",
".",
"rank",
"(",
")",
")",
"d",
"=",
"d",
".",
"repeat",
"(",
")",
"d",
"=",
"d",
".",
"shuffle",
"(",
"buffer_size",
"=",
"100",
")",
"d",
"=",
"d",
".",
"apply",
"(",
"tf",
".",
"contrib",
".",
"data",
".",
"map_and_batch",
"(",
"lambda",
"record",
":",
"_decode_record",
"(",
"record",
",",
"name_to_features",
")",
",",
"batch_size",
"=",
"batch_size",
",",
"drop_remainder",
"=",
"drop_remainder",
")",
")",
"return",
"d",
"return",
"input_fn"
] | https://github.com/psnonis/FinBERT/blob/c0c555d833a14e2316a3701e59c0b5156f804b4e/bert-gpu/run_classifier.py#L118-L162 |
|
baidu/tera | dbcd28af792d879d961bf9fc7eb60de81b437646 | src/sdk/python/TeraSdk.py | python | Table.__init__ | (self, table) | init | init | [
"init"
] | def __init__(self, table):
""" init """
self.table = table | [
"def",
"__init__",
"(",
"self",
",",
"table",
")",
":",
"self",
".",
"table",
"=",
"table"
] | https://github.com/baidu/tera/blob/dbcd28af792d879d961bf9fc7eb60de81b437646/src/sdk/python/TeraSdk.py#L450-L452 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/stc.py | python | StyledTextCtrl.SetStyleBits | (*args, **kwargs) | return _stc.StyledTextCtrl_SetStyleBits(*args, **kwargs) | SetStyleBits(self, int bits)
Divide each styling byte into lexical class bits (default: 5) and indicator
bits (default: 3). If a lexer requires more than 32 lexical states, then this
is used to expand the possible states. | SetStyleBits(self, int bits) | [
"SetStyleBits",
"(",
"self",
"int",
"bits",
")"
] | def SetStyleBits(*args, **kwargs):
"""
SetStyleBits(self, int bits)
Divide each styling byte into lexical class bits (default: 5) and indicator
bits (default: 3). If a lexer requires more than 32 lexical states, then this
is used to expand the possible states.
"""
return _stc.StyledTextCtrl_SetStyleBits(*args, **kwargs) | [
"def",
"SetStyleBits",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_SetStyleBits",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L2945-L2953 |
|
physercoe/starquant | c00cad64d1de2da05081b3dc320ef264c6295e08 | source/engine/strategy_engine.py | python | StrategyEngine.unregister_handler | (self, type_, handler) | unregister handler/subscriber | unregister handler/subscriber | [
"unregister",
"handler",
"/",
"subscriber"
] | def unregister_handler(self, type_, handler):
"""
unregister handler/subscriber
"""
# handlerList = self._handlers[type_]
# if handler in handlerList:
# self._handlers.remove(handler)
# if not handlerList:
# del self._handlers[type_]
pass | [
"def",
"unregister_handler",
"(",
"self",
",",
"type_",
",",
"handler",
")",
":",
"# handlerList = self._handlers[type_]",
"# if handler in handlerList:",
"# self._handlers.remove(handler)",
"# if not handlerList:",
"# del self._handlers[type_]",
"pass"
] | https://github.com/physercoe/starquant/blob/c00cad64d1de2da05081b3dc320ef264c6295e08/source/engine/strategy_engine.py#L1093-L1104 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/imaplib.py | python | IMAP4.list | (self, directory='""', pattern='*') | return self._untagged_response(typ, dat, name) | List mailbox names in directory matching pattern.
(typ, [data]) = <instance>.list(directory='""', pattern='*')
'data' is list of LIST responses. | List mailbox names in directory matching pattern. | [
"List",
"mailbox",
"names",
"in",
"directory",
"matching",
"pattern",
"."
] | def list(self, directory='""', pattern='*'):
"""List mailbox names in directory matching pattern.
(typ, [data]) = <instance>.list(directory='""', pattern='*')
'data' is list of LIST responses.
"""
name = 'LIST'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name) | [
"def",
"list",
"(",
"self",
",",
"directory",
"=",
"'\"\"'",
",",
"pattern",
"=",
"'*'",
")",
":",
"name",
"=",
"'LIST'",
"typ",
",",
"dat",
"=",
"self",
".",
"_simple_command",
"(",
"name",
",",
"directory",
",",
"pattern",
")",
"return",
"self",
".",
"_untagged_response",
"(",
"typ",
",",
"dat",
",",
"name",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/imaplib.py#L486-L495 |
|
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Code/Tools/waf-1.7.13/waflib/Tools/fc_config.py | python | link_main_routines_tg_method | (self) | The configuration test declares a unique task generator,
so we create other task generators from there for fortran link tests | The configuration test declares a unique task generator,
so we create other task generators from there for fortran link tests | [
"The",
"configuration",
"test",
"declares",
"a",
"unique",
"task",
"generator",
"so",
"we",
"create",
"other",
"task",
"generators",
"from",
"there",
"for",
"fortran",
"link",
"tests"
] | def link_main_routines_tg_method(self):
"""
The configuration test declares a unique task generator,
so we create other task generators from there for fortran link tests
"""
def write_test_file(task):
task.outputs[0].write(task.generator.code)
bld = self.bld
bld(rule=write_test_file, target='main.c', code=MAIN_CODE % self.__dict__)
bld(rule=write_test_file, target='test.f', code=ROUTINES_CODE)
bld(features='fc fcstlib', source='test.f', target='test')
bld(features='c fcprogram', source='main.c', target='app', use='test') | [
"def",
"link_main_routines_tg_method",
"(",
"self",
")",
":",
"def",
"write_test_file",
"(",
"task",
")",
":",
"task",
".",
"outputs",
"[",
"0",
"]",
".",
"write",
"(",
"task",
".",
"generator",
".",
"code",
")",
"bld",
"=",
"self",
".",
"bld",
"bld",
"(",
"rule",
"=",
"write_test_file",
",",
"target",
"=",
"'main.c'",
",",
"code",
"=",
"MAIN_CODE",
"%",
"self",
".",
"__dict__",
")",
"bld",
"(",
"rule",
"=",
"write_test_file",
",",
"target",
"=",
"'test.f'",
",",
"code",
"=",
"ROUTINES_CODE",
")",
"bld",
"(",
"features",
"=",
"'fc fcstlib'",
",",
"source",
"=",
"'test.f'",
",",
"target",
"=",
"'test'",
")",
"bld",
"(",
"features",
"=",
"'c fcprogram'",
",",
"source",
"=",
"'main.c'",
",",
"target",
"=",
"'app'",
",",
"use",
"=",
"'test'",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Tools/fc_config.py#L378-L389 |
||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | xmlDoc.debugDumpDocument | (self, output) | Dumps debug information for the document, it's recursive | Dumps debug information for the document, it's recursive | [
"Dumps",
"debug",
"information",
"for",
"the",
"document",
"it",
"s",
"recursive"
] | def debugDumpDocument(self, output):
"""Dumps debug information for the document, it's recursive """
libxml2mod.xmlDebugDumpDocument(output, self._o) | [
"def",
"debugDumpDocument",
"(",
"self",
",",
"output",
")",
":",
"libxml2mod",
".",
"xmlDebugDumpDocument",
"(",
"output",
",",
"self",
".",
"_o",
")"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L4085-L4087 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/exceptions.py | python | HashMismatch.__init__ | (self, allowed, gots) | :param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion | :param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion | [
":",
"param",
"allowed",
":",
"A",
"dict",
"of",
"algorithm",
"names",
"pointing",
"to",
"lists",
"of",
"allowed",
"hex",
"digests",
":",
"param",
"gots",
":",
"A",
"dict",
"of",
"algorithm",
"names",
"pointing",
"to",
"hashes",
"we",
"actually",
"got",
"from",
"the",
"files",
"under",
"suspicion"
] | def __init__(self, allowed, gots):
# type: (Dict[str, List[str]], Dict[str, _Hash]) -> None
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots | [
"def",
"__init__",
"(",
"self",
",",
"allowed",
",",
"gots",
")",
":",
"# type: (Dict[str, List[str]], Dict[str, _Hash]) -> None",
"self",
".",
"allowed",
"=",
"allowed",
"self",
".",
"gots",
"=",
"gots"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/exceptions.py#L317-L326 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/ipython/py2/IPython/core/formatters.py | python | DisplayFormatter.format | (self, obj, include=None, exclude=None) | return format_dict, md_dict | Return a format data dict for an object.
By default all format types will be computed.
The following MIME types are usually implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* application/pdf
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
obj : object
The Python object whose format data will be computed.
include : list, tuple or set; optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list, tuple or set; optional
A list of format type string (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
Mimetypes present in exclude will take precedence over the ones in include
Returns
-------
(format_dict, metadata_dict) : tuple of two dicts
format_dict is a dictionary of key/value pairs, one of each format that was
generated for the object. The keys are the format types, which
will usually be MIME type strings and the values and JSON'able
data structure containing the raw data for the representation in
that format.
metadata_dict is a dictionary of metadata about each mime-type output.
Its keys will be a strict subset of the keys in format_dict.
Notes
-----
If an object implement `_repr_mimebundle_` as well as various
`_repr_*_`, the data returned by `_repr_mimebundle_` will take
precedence and the corresponding `_repr_*_` for this mimetype will
not be called. | Return a format data dict for an object. | [
"Return",
"a",
"format",
"data",
"dict",
"for",
"an",
"object",
"."
] | def format(self, obj, include=None, exclude=None):
"""Return a format data dict for an object.
By default all format types will be computed.
The following MIME types are usually implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* application/pdf
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
obj : object
The Python object whose format data will be computed.
include : list, tuple or set; optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list, tuple or set; optional
A list of format type string (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
Mimetypes present in exclude will take precedence over the ones in include
Returns
-------
(format_dict, metadata_dict) : tuple of two dicts
format_dict is a dictionary of key/value pairs, one of each format that was
generated for the object. The keys are the format types, which
will usually be MIME type strings and the values and JSON'able
data structure containing the raw data for the representation in
that format.
metadata_dict is a dictionary of metadata about each mime-type output.
Its keys will be a strict subset of the keys in format_dict.
Notes
-----
If an object implement `_repr_mimebundle_` as well as various
`_repr_*_`, the data returned by `_repr_mimebundle_` will take
precedence and the corresponding `_repr_*_` for this mimetype will
not be called.
"""
format_dict = {}
md_dict = {}
if self.ipython_display_formatter(obj):
# object handled itself, don't proceed
return {}, {}
format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
if format_dict or md_dict:
if include:
format_dict = {k:v for k,v in format_dict.items() if k in include}
md_dict = {k:v for k,v in md_dict.items() if k in include}
if exclude:
format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
for format_type, formatter in self.formatters.items():
if format_type in format_dict:
# already got it from mimebundle, don't render again
continue
if include and format_type not in include:
continue
if exclude and format_type in exclude:
continue
md = None
try:
data = formatter(obj)
except:
# FIXME: log the exception
raise
# formatters can return raw data or (data, metadata)
if isinstance(data, tuple) and len(data) == 2:
data, md = data
if data is not None:
format_dict[format_type] = data
if md is not None:
md_dict[format_type] = md
return format_dict, md_dict | [
"def",
"format",
"(",
"self",
",",
"obj",
",",
"include",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"format_dict",
"=",
"{",
"}",
"md_dict",
"=",
"{",
"}",
"if",
"self",
".",
"ipython_display_formatter",
"(",
"obj",
")",
":",
"# object handled itself, don't proceed",
"return",
"{",
"}",
",",
"{",
"}",
"format_dict",
",",
"md_dict",
"=",
"self",
".",
"mimebundle_formatter",
"(",
"obj",
",",
"include",
"=",
"include",
",",
"exclude",
"=",
"exclude",
")",
"if",
"format_dict",
"or",
"md_dict",
":",
"if",
"include",
":",
"format_dict",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"format_dict",
".",
"items",
"(",
")",
"if",
"k",
"in",
"include",
"}",
"md_dict",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"md_dict",
".",
"items",
"(",
")",
"if",
"k",
"in",
"include",
"}",
"if",
"exclude",
":",
"format_dict",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"format_dict",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"exclude",
"}",
"md_dict",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"md_dict",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"exclude",
"}",
"for",
"format_type",
",",
"formatter",
"in",
"self",
".",
"formatters",
".",
"items",
"(",
")",
":",
"if",
"format_type",
"in",
"format_dict",
":",
"# already got it from mimebundle, don't render again",
"continue",
"if",
"include",
"and",
"format_type",
"not",
"in",
"include",
":",
"continue",
"if",
"exclude",
"and",
"format_type",
"in",
"exclude",
":",
"continue",
"md",
"=",
"None",
"try",
":",
"data",
"=",
"formatter",
"(",
"obj",
")",
"except",
":",
"# FIXME: log the exception",
"raise",
"# formatters can return raw data or (data, metadata)",
"if",
"isinstance",
"(",
"data",
",",
"tuple",
")",
"and",
"len",
"(",
"data",
")",
"==",
"2",
":",
"data",
",",
"md",
"=",
"data",
"if",
"data",
"is",
"not",
"None",
":",
"format_dict",
"[",
"format_type",
"]",
"=",
"data",
"if",
"md",
"is",
"not",
"None",
":",
"md_dict",
"[",
"format_type",
"]",
"=",
"md",
"return",
"format_dict",
",",
"md_dict"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py2/IPython/core/formatters.py#L91-L186 |
|
apache/mesos | 97d9a4063332aae3825d78de71611657e05cf5e2 | support/mesos-split.py | python | find_project | (filename) | return found_project | Find a project using its filename. | Find a project using its filename. | [
"Find",
"a",
"project",
"using",
"its",
"filename",
"."
] | def find_project(filename):
"""Find a project using its filename."""
# Find longest prefix match.
found_path_len = 0
found_project = BASE_PROJECT
for project, path in SUBPROJECTS.items():
if filename.startswith(path) and len(path) > found_path_len:
found_path_len = len(path)
found_project = project
return found_project | [
"def",
"find_project",
"(",
"filename",
")",
":",
"# Find longest prefix match.",
"found_path_len",
"=",
"0",
"found_project",
"=",
"BASE_PROJECT",
"for",
"project",
",",
"path",
"in",
"SUBPROJECTS",
".",
"items",
"(",
")",
":",
"if",
"filename",
".",
"startswith",
"(",
"path",
")",
"and",
"len",
"(",
"path",
")",
">",
"found_path_len",
":",
"found_path_len",
"=",
"len",
"(",
"path",
")",
"found_project",
"=",
"project",
"return",
"found_project"
] | https://github.com/apache/mesos/blob/97d9a4063332aae3825d78de71611657e05cf5e2/support/mesos-split.py#L44-L55 |
|
ValveSoftware/source-sdk-2013 | 0d8dceea4310fde5706b3ce1c70609d72a38efdf | mp/src/thirdparty/protobuf-2.3.0/python/mox.py | python | MockAnything.__eq__ | (self, rhs) | return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue) | Provide custom logic to compare objects. | Provide custom logic to compare objects. | [
"Provide",
"custom",
"logic",
"to",
"compare",
"objects",
"."
] | def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue) | [
"def",
"__eq__",
"(",
"self",
",",
"rhs",
")",
":",
"return",
"(",
"isinstance",
"(",
"rhs",
",",
"MockAnything",
")",
"and",
"self",
".",
"_replay_mode",
"==",
"rhs",
".",
"_replay_mode",
"and",
"self",
".",
"_expected_calls_queue",
"==",
"rhs",
".",
"_expected_calls_queue",
")"
] | https://github.com/ValveSoftware/source-sdk-2013/blob/0d8dceea4310fde5706b3ce1c70609d72a38efdf/mp/src/thirdparty/protobuf-2.3.0/python/mox.py#L314-L319 |
|
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/training/saver.py | python | BaseSaverBuilder._AddSaveOps | (self, filename_tensor, vars_to_save) | return control_flow_ops.with_dependencies([save], filename_tensor) | Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
vars_to_save: A list of _VarToSave objects.
Returns:
A tensor with the filename used to save. | Add ops to save variables that are on the same shard. | [
"Add",
"ops",
"to",
"save",
"variables",
"that",
"are",
"on",
"the",
"same",
"shard",
"."
] | def _AddSaveOps(self, filename_tensor, vars_to_save):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
vars_to_save: A list of _VarToSave objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, vars_to_save)
return control_flow_ops.with_dependencies([save], filename_tensor) | [
"def",
"_AddSaveOps",
"(",
"self",
",",
"filename_tensor",
",",
"vars_to_save",
")",
":",
"save",
"=",
"self",
".",
"save_op",
"(",
"filename_tensor",
",",
"vars_to_save",
")",
"return",
"control_flow_ops",
".",
"with_dependencies",
"(",
"[",
"save",
"]",
",",
"filename_tensor",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/training/saver.py#L203-L214 |
|
metashell/metashell | f4177e4854ea00c8dbc722cadab26ef413d798ea | 3rd/templight/compiler-rt/lib/sanitizer_common/scripts/cpplint.py | python | CheckRedundantVirtual | (filename, clean_lines, linenum, error) | Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Check if line contains a redundant "virtual" function-specifier. | [
"Check",
"if",
"line",
"contains",
"a",
"redundant",
"virtual",
"function",
"-",
"specifier",
"."
] | def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break | [
"def",
"CheckRedundantVirtual",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Look for \"virtual\" on current line.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"virtual",
"=",
"Match",
"(",
"r'^(.*)(\\bvirtual\\b)(.*)$'",
",",
"line",
")",
"if",
"not",
"virtual",
":",
"return",
"# Ignore \"virtual\" keywords that are near access-specifiers. These",
"# are only used in class base-specifier and do not apply to member",
"# functions.",
"if",
"(",
"Search",
"(",
"r'\\b(public|protected|private)\\s+$'",
",",
"virtual",
".",
"group",
"(",
"1",
")",
")",
"or",
"Match",
"(",
"r'^\\s+(public|protected|private)\\b'",
",",
"virtual",
".",
"group",
"(",
"3",
")",
")",
")",
":",
"return",
"# Ignore the \"virtual\" keyword from virtual base classes. Usually",
"# there is a column on the same line in these cases (virtual base",
"# classes are rare in google3 because multiple inheritance is rare).",
"if",
"Match",
"(",
"r'^.*[^:]:[^:].*$'",
",",
"line",
")",
":",
"return",
"# Look for the next opening parenthesis. This is the start of the",
"# parameter list (possibly on the next line shortly after virtual).",
"# TODO(unknown): doesn't work if there are virtual functions with",
"# decltype() or other things that use parentheses, but csearch suggests",
"# that this is rare.",
"end_col",
"=",
"-",
"1",
"end_line",
"=",
"-",
"1",
"start_col",
"=",
"len",
"(",
"virtual",
".",
"group",
"(",
"2",
")",
")",
"for",
"start_line",
"in",
"xrange",
"(",
"linenum",
",",
"min",
"(",
"linenum",
"+",
"3",
",",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"start_line",
"]",
"[",
"start_col",
":",
"]",
"parameter_list",
"=",
"Match",
"(",
"r'^([^(]*)\\('",
",",
"line",
")",
"if",
"parameter_list",
":",
"# Match parentheses to find the end of the parameter list",
"(",
"_",
",",
"end_line",
",",
"end_col",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"start_line",
",",
"start_col",
"+",
"len",
"(",
"parameter_list",
".",
"group",
"(",
"1",
")",
")",
")",
"break",
"start_col",
"=",
"0",
"if",
"end_col",
"<",
"0",
":",
"return",
"# Couldn't find end of parameter list, give up",
"# Look for \"override\" or \"final\" after the parameter list",
"# (possibly on the next few lines).",
"for",
"i",
"in",
"xrange",
"(",
"end_line",
",",
"min",
"(",
"end_line",
"+",
"3",
",",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"i",
"]",
"[",
"end_col",
":",
"]",
"match",
"=",
"Search",
"(",
"r'\\b(override|final)\\b'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/inheritance'",
",",
"4",
",",
"(",
"'\"virtual\" is redundant since function is '",
"'already declared as \"%s\"'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"# Set end_col to check whole lines after we are done with the",
"# first line.",
"end_col",
"=",
"0",
"if",
"Search",
"(",
"r'[^\\w]\\s*$'",
",",
"line",
")",
":",
"break"
] | https://github.com/metashell/metashell/blob/f4177e4854ea00c8dbc722cadab26ef413d798ea/3rd/templight/compiler-rt/lib/sanitizer_common/scripts/cpplint.py#L5621-L5682 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py | python | Wm.wm_transient | (self, master=None) | return self.tk.call('wm', 'transient', self._w, master) | Instruct the window manager that this widget is transient
with regard to widget MASTER. | Instruct the window manager that this widget is transient
with regard to widget MASTER. | [
"Instruct",
"the",
"window",
"manager",
"that",
"this",
"widget",
"is",
"transient",
"with",
"regard",
"to",
"widget",
"MASTER",
"."
] | def wm_transient(self, master=None):
"""Instruct the window manager that this widget is transient
with regard to widget MASTER."""
return self.tk.call('wm', 'transient', self._w, master) | [
"def",
"wm_transient",
"(",
"self",
",",
"master",
"=",
"None",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"'wm'",
",",
"'transient'",
",",
"self",
".",
"_w",
",",
"master",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py#L1987-L1990 |
|
pristineio/webrtc-mirror | 7a5bcdffaab90a05bc1146b2b1ea71c004e54d71 | webrtc/rtc_tools/compare_videos.py | python | _ParseArgs | () | return options | Registers the command-line options. | Registers the command-line options. | [
"Registers",
"the",
"command",
"-",
"line",
"options",
"."
] | def _ParseArgs():
"""Registers the command-line options."""
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--label', type='string', default='MY_TEST',
help=('Label of the test, used to identify different '
'tests. Default: %default'))
parser.add_option('--ref_video', type='string',
help='Reference video to compare with (YUV).')
parser.add_option('--test_video', type='string',
help=('Test video to be compared with the reference '
'video (YUV).'))
parser.add_option('--frame_analyzer', type='string',
help='Path to the frame analyzer executable.')
parser.add_option('--barcode_decoder', type='string',
help=('Path to the barcode decoder script. By default, we '
'will assume we can find it in barcode_tools/'
'relative to this directory.'))
parser.add_option('--ffmpeg_path', type='string',
help=('The path to where the ffmpeg executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name ffmpeg[.exe].'))
parser.add_option('--zxing_path', type='string',
help=('The path to where the zxing executable is located. '
'If omitted, it will be assumed to be present in the '
'PATH with the name zxing[.exe].'))
parser.add_option('--stats_file_ref', type='string', default='stats_ref.txt',
help=('Path to the temporary stats file to be created and '
'used for the reference video file. '
'Default: %default'))
parser.add_option('--stats_file_test', type='string',
default='stats_test.txt',
help=('Path to the temporary stats file to be created and '
'used for the test video file. Default: %default'))
parser.add_option('--stats_file', type='string',
help=('DEPRECATED'))
parser.add_option('--yuv_frame_width', type='int', default=640,
help='Width of the YUV file\'s frames. Default: %default')
parser.add_option('--yuv_frame_height', type='int', default=480,
help='Height of the YUV file\'s frames. Default: %default')
options, _ = parser.parse_args()
if options.stats_file:
options.stats_file_test = options.stats_file
print ('WARNING: Using deprecated switch --stats_file. '
'The new flag is --stats_file_test.')
if not options.ref_video:
parser.error('You must provide a path to the reference video!')
if not os.path.exists(options.ref_video):
parser.error('Cannot find the reference video at %s' % options.ref_video)
if not options.test_video:
parser.error('You must provide a path to the test video!')
if not os.path.exists(options.test_video):
parser.error('Cannot find the test video at %s' % options.test_video)
if not options.frame_analyzer:
parser.error('You must provide the path to the frame analyzer executable!')
if not os.path.exists(options.frame_analyzer):
parser.error('Cannot find frame analyzer executable at %s!' %
options.frame_analyzer)
return options | [
"def",
"_ParseArgs",
"(",
")",
":",
"usage",
"=",
"'usage: %prog [options]'",
"parser",
"=",
"optparse",
".",
"OptionParser",
"(",
"usage",
"=",
"usage",
")",
"parser",
".",
"add_option",
"(",
"'--label'",
",",
"type",
"=",
"'string'",
",",
"default",
"=",
"'MY_TEST'",
",",
"help",
"=",
"(",
"'Label of the test, used to identify different '",
"'tests. Default: %default'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--ref_video'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"'Reference video to compare with (YUV).'",
")",
"parser",
".",
"add_option",
"(",
"'--test_video'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"(",
"'Test video to be compared with the reference '",
"'video (YUV).'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--frame_analyzer'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"'Path to the frame analyzer executable.'",
")",
"parser",
".",
"add_option",
"(",
"'--barcode_decoder'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"(",
"'Path to the barcode decoder script. By default, we '",
"'will assume we can find it in barcode_tools/'",
"'relative to this directory.'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--ffmpeg_path'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"(",
"'The path to where the ffmpeg executable is located. '",
"'If omitted, it will be assumed to be present in the '",
"'PATH with the name ffmpeg[.exe].'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--zxing_path'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"(",
"'The path to where the zxing executable is located. '",
"'If omitted, it will be assumed to be present in the '",
"'PATH with the name zxing[.exe].'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--stats_file_ref'",
",",
"type",
"=",
"'string'",
",",
"default",
"=",
"'stats_ref.txt'",
",",
"help",
"=",
"(",
"'Path to the temporary stats file to be created and '",
"'used for the reference video file. '",
"'Default: %default'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--stats_file_test'",
",",
"type",
"=",
"'string'",
",",
"default",
"=",
"'stats_test.txt'",
",",
"help",
"=",
"(",
"'Path to the temporary stats file to be created and '",
"'used for the test video file. Default: %default'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--stats_file'",
",",
"type",
"=",
"'string'",
",",
"help",
"=",
"(",
"'DEPRECATED'",
")",
")",
"parser",
".",
"add_option",
"(",
"'--yuv_frame_width'",
",",
"type",
"=",
"'int'",
",",
"default",
"=",
"640",
",",
"help",
"=",
"'Width of the YUV file\\'s frames. Default: %default'",
")",
"parser",
".",
"add_option",
"(",
"'--yuv_frame_height'",
",",
"type",
"=",
"'int'",
",",
"default",
"=",
"480",
",",
"help",
"=",
"'Height of the YUV file\\'s frames. Default: %default'",
")",
"options",
",",
"_",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"options",
".",
"stats_file",
":",
"options",
".",
"stats_file_test",
"=",
"options",
".",
"stats_file",
"print",
"(",
"'WARNING: Using deprecated switch --stats_file. '",
"'The new flag is --stats_file_test.'",
")",
"if",
"not",
"options",
".",
"ref_video",
":",
"parser",
".",
"error",
"(",
"'You must provide a path to the reference video!'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"ref_video",
")",
":",
"parser",
".",
"error",
"(",
"'Cannot find the reference video at %s'",
"%",
"options",
".",
"ref_video",
")",
"if",
"not",
"options",
".",
"test_video",
":",
"parser",
".",
"error",
"(",
"'You must provide a path to the test video!'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"test_video",
")",
":",
"parser",
".",
"error",
"(",
"'Cannot find the test video at %s'",
"%",
"options",
".",
"test_video",
")",
"if",
"not",
"options",
".",
"frame_analyzer",
":",
"parser",
".",
"error",
"(",
"'You must provide the path to the frame analyzer executable!'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"options",
".",
"frame_analyzer",
")",
":",
"parser",
".",
"error",
"(",
"'Cannot find frame analyzer executable at %s!'",
"%",
"options",
".",
"frame_analyzer",
")",
"return",
"options"
] | https://github.com/pristineio/webrtc-mirror/blob/7a5bcdffaab90a05bc1146b2b1ea71c004e54d71/webrtc/rtc_tools/compare_videos.py#L24-L87 |
|
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/serial/tools/list_ports_windows.py | python | comports | (include_links=False) | return list(iterate_comports()) | Return a list of info objects about serial ports | Return a list of info objects about serial ports | [
"Return",
"a",
"list",
"of",
"info",
"objects",
"about",
"serial",
"ports"
] | def comports(include_links=False):
"""Return a list of info objects about serial ports"""
return list(iterate_comports()) | [
"def",
"comports",
"(",
"include_links",
"=",
"False",
")",
":",
"return",
"list",
"(",
"iterate_comports",
"(",
")",
")"
] | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/serial/tools/list_ports_windows.py#L297-L299 |
|
cloudfuzz/android-kernel-exploitation | 269d7467e259b85216fec34068933fe535415d1a | gdb/root-me.py | python | set_selinux_task_context | (task) | Set selinux task context
:param task: task_struct address | Set selinux task context | [
"Set",
"selinux",
"task",
"context"
] | def set_selinux_task_context(task):
"""
Set selinux task context
:param task: task_struct address
"""
cred = task["cred"]
security = cred["security"]
security_struct_t = gdb.lookup_type("struct task_security_struct").pointer()
security_struct = security.cast(security_struct_t)
osid = security_struct["osid"]
sid = security_struct["sid"]
write32(osid.address, 0x1) # SECINITSID_KERNEL = 1 = kernel
write32(sid.address, 0x1) | [
"def",
"set_selinux_task_context",
"(",
"task",
")",
":",
"cred",
"=",
"task",
"[",
"\"cred\"",
"]",
"security",
"=",
"cred",
"[",
"\"security\"",
"]",
"security_struct_t",
"=",
"gdb",
".",
"lookup_type",
"(",
"\"struct task_security_struct\"",
")",
".",
"pointer",
"(",
")",
"security_struct",
"=",
"security",
".",
"cast",
"(",
"security_struct_t",
")",
"osid",
"=",
"security_struct",
"[",
"\"osid\"",
"]",
"sid",
"=",
"security_struct",
"[",
"\"sid\"",
"]",
"write32",
"(",
"osid",
".",
"address",
",",
"0x1",
")",
"# SECINITSID_KERNEL = 1 = kernel",
"write32",
"(",
"sid",
".",
"address",
",",
"0x1",
")"
] | https://github.com/cloudfuzz/android-kernel-exploitation/blob/269d7467e259b85216fec34068933fe535415d1a/gdb/root-me.py#L106-L124 |
||
hpi-xnor/BMXNet-v2 | af2b1859eafc5c721b1397cef02f946aaf2ce20d | benchmark/python/sparse/sparse_op.py | python | test_dot_synthetic | () | benchmark mx.nd.dot(sparse_ndarray, dense_ndarray) with given density.
`t_sparse` is the time cost of dot(csr, dns), while `t_dense` is the time cost
of dot(dns, dns), with the same matrix except that it is in default storage type. | benchmark mx.nd.dot(sparse_ndarray, dense_ndarray) with given density.
`t_sparse` is the time cost of dot(csr, dns), while `t_dense` is the time cost
of dot(dns, dns), with the same matrix except that it is in default storage type. | [
"benchmark",
"mx",
".",
"nd",
".",
"dot",
"(",
"sparse_ndarray",
"dense_ndarray",
")",
"with",
"given",
"density",
".",
"t_sparse",
"is",
"the",
"time",
"cost",
"of",
"dot",
"(",
"csr",
"dns",
")",
"while",
"t_dense",
"is",
"the",
"time",
"cost",
"of",
"dot",
"(",
"dns",
"dns",
")",
"with",
"the",
"same",
"matrix",
"except",
"that",
"it",
"is",
"in",
"default",
"storage",
"type",
"."
] | def test_dot_synthetic():
"""benchmark mx.nd.dot(sparse_ndarray, dense_ndarray) with given density.
`t_sparse` is the time cost of dot(csr, dns), while `t_dense` is the time cost
of dot(dns, dns), with the same matrix except that it is in default storage type.
"""
def measure_cost_forward_baseline(repeat, dot, lhs, rhs):
start = time.time()
for i in range(repeat):
dot(lhs, rhs)
end = time.time()
diff = end - start
return diff / repeat
def measure_cost_backward_baseline(repeat, dot, transpose, lhs, rhs):
start = time.time()
for i in range(repeat):
dot(transpose(lhs), rhs)
end = time.time()
diff = end - start
return diff / repeat
def bench_dot_forward(m, k, n, density, ctx, repeat):
set_default_context(ctx)
dns = mx.nd.random.uniform(shape=(k, n)).copyto(ctx)
data_shape = (m, k)
csr_data = rand_ndarray(data_shape, 'csr', density)
dns_data = csr_data.tostype('default')
rhs_dns_np = dns.asnumpy()
lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy()) # csr in scipy
lhs_dns_np = lhs_csr_sp.tostype('default')
data = [dns_data, csr_data]
costs = []
for d in data:
dns.wait_to_read()
d.wait_to_read()
cost = measure_cost(repeat, mx.nd.dot, d, dns)
costs.append(cost)
ratio = costs[0] / costs[1]
costs_baseline = []
cost = measure_cost_forward_baseline(repeat, np.dot, lhs_dns_np, rhs_dns_np)
costs_baseline.append(cost)
cost = measure_cost_forward_baseline(repeat, sp.spmatrix.dot, lhs_csr_sp, rhs_dns_np)
costs_baseline.append(cost)
ratio_baseline = costs_baseline[0] / costs_baseline[1]
fmt = "%0.1f\t\t%s\t%d\t%d\t%d\t%0.2f\t\t\t%0.2f\t%0.5f\t\t%0.2f\t\t\t\t%0.6f\t%0.5f"
print(fmt % (density * 100, str(ctx), n, m, k, ratio, costs[0], costs[1],
ratio_baseline, costs_baseline[0], costs_baseline[1]))
def bench_dot_backward(m, k, n, density, ctx, repeat):
set_default_context(ctx)
dns = mx.nd.random.uniform(shape=(m, n)).copyto(ctx)
data_shape = (m, k)
csr_data = rand_ndarray(data_shape, 'csr', density)
dns_data = csr_data.tostype('default')
rhs_dns_np = dns.asnumpy()
lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy())
lhs_dns_np = lhs_csr_sp.tostype('default')
data = [dns_data, csr_data]
costs = []
for d in data:
dns.wait_to_read()
d.wait_to_read()
cost = measure_cost(repeat, mx.nd.dot, d, dns, transpose_a=True)
costs.append(cost)
ratio = costs[0] / costs[1]
costs_baseline = []
cost = measure_cost_backward_baseline(repeat, np.dot, np.transpose, lhs_dns_np, rhs_dns_np)
costs_baseline.append(cost)
cost = measure_cost_backward_baseline(repeat, sp.spmatrix.dot, sp.spmatrix.transpose, lhs_csr_sp, rhs_dns_np)
costs_baseline.append(cost)
ratio_baseline = costs_baseline[0] / costs_baseline[1]
fmt = "%0.1f\t\t%s\t%d\t%d\t%d\t%0.2f\t\t\t%0.2f\t%0.5f\t\t%0.2f\t\t\t\t%0.6f\t%0.5f"
print(fmt % (density * 100, str(ctx), n, m, k, ratio, costs[0], costs[1],
ratio_baseline, costs_baseline[0], costs_baseline[1]))
print("A = sparse NDArray of shape(m, k)")
print("B = dense NDArray of shape(k, n)")
print("dot_forward\tdot(csr, dns)")
print('density(%)\tcontext\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse'
'\tt_scipy_dense/t_scipy_sparse\tt_scipy_dense\tt_scipy_sparse')
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads)))
# TODO(haibin) make these runtime options
m = 512
k = [50000, 100000]
n = [64, 128]
density = [1.00, 0.90, 0.70, 0.50, 0.30, 0.20, 0.10, 0.07, 0.05, 0.02, 0.01, 0.005, 0.001]
num_repeat = 10
# contexts = [mx.cpu(), mx.gpu(0)]
contexts = [mx.cpu()]
for i in range(2):
for ctx in contexts:
for den in density:
bench_dot_forward(m, k[i], n[i], den, ctx, num_repeat)
print("dot_backward\tdot(csr.T, dns)")
print('density(%)\tcontext\tn\tm\tk\tt_dense/t_sparse\tt_dense\tt_sparse'
'\tt_scipy_dense/t_scipy_sparse\tt_scipy_dense\tt_scipy_sparse')
for i in range(2):
for ctx in contexts:
for den in density:
bench_dot_backward(m, k[i], n[i], den, ctx, num_repeat) | [
"def",
"test_dot_synthetic",
"(",
")",
":",
"def",
"measure_cost_forward_baseline",
"(",
"repeat",
",",
"dot",
",",
"lhs",
",",
"rhs",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"repeat",
")",
":",
"dot",
"(",
"lhs",
",",
"rhs",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"diff",
"=",
"end",
"-",
"start",
"return",
"diff",
"/",
"repeat",
"def",
"measure_cost_backward_baseline",
"(",
"repeat",
",",
"dot",
",",
"transpose",
",",
"lhs",
",",
"rhs",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"repeat",
")",
":",
"dot",
"(",
"transpose",
"(",
"lhs",
")",
",",
"rhs",
")",
"end",
"=",
"time",
".",
"time",
"(",
")",
"diff",
"=",
"end",
"-",
"start",
"return",
"diff",
"/",
"repeat",
"def",
"bench_dot_forward",
"(",
"m",
",",
"k",
",",
"n",
",",
"density",
",",
"ctx",
",",
"repeat",
")",
":",
"set_default_context",
"(",
"ctx",
")",
"dns",
"=",
"mx",
".",
"nd",
".",
"random",
".",
"uniform",
"(",
"shape",
"=",
"(",
"k",
",",
"n",
")",
")",
".",
"copyto",
"(",
"ctx",
")",
"data_shape",
"=",
"(",
"m",
",",
"k",
")",
"csr_data",
"=",
"rand_ndarray",
"(",
"data_shape",
",",
"'csr'",
",",
"density",
")",
"dns_data",
"=",
"csr_data",
".",
"tostype",
"(",
"'default'",
")",
"rhs_dns_np",
"=",
"dns",
".",
"asnumpy",
"(",
")",
"lhs_csr_sp",
"=",
"sp",
".",
"csr_matrix",
"(",
"dns_data",
".",
"asnumpy",
"(",
")",
")",
"# csr in scipy",
"lhs_dns_np",
"=",
"lhs_csr_sp",
".",
"tostype",
"(",
"'default'",
")",
"data",
"=",
"[",
"dns_data",
",",
"csr_data",
"]",
"costs",
"=",
"[",
"]",
"for",
"d",
"in",
"data",
":",
"dns",
".",
"wait_to_read",
"(",
")",
"d",
".",
"wait_to_read",
"(",
")",
"cost",
"=",
"measure_cost",
"(",
"repeat",
",",
"mx",
".",
"nd",
".",
"dot",
",",
"d",
",",
"dns",
")",
"costs",
".",
"append",
"(",
"cost",
")",
"ratio",
"=",
"costs",
"[",
"0",
"]",
"/",
"costs",
"[",
"1",
"]",
"costs_baseline",
"=",
"[",
"]",
"cost",
"=",
"measure_cost_forward_baseline",
"(",
"repeat",
",",
"np",
".",
"dot",
",",
"lhs_dns_np",
",",
"rhs_dns_np",
")",
"costs_baseline",
".",
"append",
"(",
"cost",
")",
"cost",
"=",
"measure_cost_forward_baseline",
"(",
"repeat",
",",
"sp",
".",
"spmatrix",
".",
"dot",
",",
"lhs_csr_sp",
",",
"rhs_dns_np",
")",
"costs_baseline",
".",
"append",
"(",
"cost",
")",
"ratio_baseline",
"=",
"costs_baseline",
"[",
"0",
"]",
"/",
"costs_baseline",
"[",
"1",
"]",
"fmt",
"=",
"\"%0.1f\\t\\t%s\\t%d\\t%d\\t%d\\t%0.2f\\t\\t\\t%0.2f\\t%0.5f\\t\\t%0.2f\\t\\t\\t\\t%0.6f\\t%0.5f\"",
"print",
"(",
"fmt",
"%",
"(",
"density",
"*",
"100",
",",
"str",
"(",
"ctx",
")",
",",
"n",
",",
"m",
",",
"k",
",",
"ratio",
",",
"costs",
"[",
"0",
"]",
",",
"costs",
"[",
"1",
"]",
",",
"ratio_baseline",
",",
"costs_baseline",
"[",
"0",
"]",
",",
"costs_baseline",
"[",
"1",
"]",
")",
")",
"def",
"bench_dot_backward",
"(",
"m",
",",
"k",
",",
"n",
",",
"density",
",",
"ctx",
",",
"repeat",
")",
":",
"set_default_context",
"(",
"ctx",
")",
"dns",
"=",
"mx",
".",
"nd",
".",
"random",
".",
"uniform",
"(",
"shape",
"=",
"(",
"m",
",",
"n",
")",
")",
".",
"copyto",
"(",
"ctx",
")",
"data_shape",
"=",
"(",
"m",
",",
"k",
")",
"csr_data",
"=",
"rand_ndarray",
"(",
"data_shape",
",",
"'csr'",
",",
"density",
")",
"dns_data",
"=",
"csr_data",
".",
"tostype",
"(",
"'default'",
")",
"rhs_dns_np",
"=",
"dns",
".",
"asnumpy",
"(",
")",
"lhs_csr_sp",
"=",
"sp",
".",
"csr_matrix",
"(",
"dns_data",
".",
"asnumpy",
"(",
")",
")",
"lhs_dns_np",
"=",
"lhs_csr_sp",
".",
"tostype",
"(",
"'default'",
")",
"data",
"=",
"[",
"dns_data",
",",
"csr_data",
"]",
"costs",
"=",
"[",
"]",
"for",
"d",
"in",
"data",
":",
"dns",
".",
"wait_to_read",
"(",
")",
"d",
".",
"wait_to_read",
"(",
")",
"cost",
"=",
"measure_cost",
"(",
"repeat",
",",
"mx",
".",
"nd",
".",
"dot",
",",
"d",
",",
"dns",
",",
"transpose_a",
"=",
"True",
")",
"costs",
".",
"append",
"(",
"cost",
")",
"ratio",
"=",
"costs",
"[",
"0",
"]",
"/",
"costs",
"[",
"1",
"]",
"costs_baseline",
"=",
"[",
"]",
"cost",
"=",
"measure_cost_backward_baseline",
"(",
"repeat",
",",
"np",
".",
"dot",
",",
"np",
".",
"transpose",
",",
"lhs_dns_np",
",",
"rhs_dns_np",
")",
"costs_baseline",
".",
"append",
"(",
"cost",
")",
"cost",
"=",
"measure_cost_backward_baseline",
"(",
"repeat",
",",
"sp",
".",
"spmatrix",
".",
"dot",
",",
"sp",
".",
"spmatrix",
".",
"transpose",
",",
"lhs_csr_sp",
",",
"rhs_dns_np",
")",
"costs_baseline",
".",
"append",
"(",
"cost",
")",
"ratio_baseline",
"=",
"costs_baseline",
"[",
"0",
"]",
"/",
"costs_baseline",
"[",
"1",
"]",
"fmt",
"=",
"\"%0.1f\\t\\t%s\\t%d\\t%d\\t%d\\t%0.2f\\t\\t\\t%0.2f\\t%0.5f\\t\\t%0.2f\\t\\t\\t\\t%0.6f\\t%0.5f\"",
"print",
"(",
"fmt",
"%",
"(",
"density",
"*",
"100",
",",
"str",
"(",
"ctx",
")",
",",
"n",
",",
"m",
",",
"k",
",",
"ratio",
",",
"costs",
"[",
"0",
"]",
",",
"costs",
"[",
"1",
"]",
",",
"ratio_baseline",
",",
"costs_baseline",
"[",
"0",
"]",
",",
"costs_baseline",
"[",
"1",
"]",
")",
")",
"print",
"(",
"\"A = sparse NDArray of shape(m, k)\"",
")",
"print",
"(",
"\"B = dense NDArray of shape(k, n)\"",
")",
"print",
"(",
"\"dot_forward\\tdot(csr, dns)\"",
")",
"print",
"(",
"'density(%)\\tcontext\\tn\\tm\\tk\\tt_dense/t_sparse\\tt_dense\\tt_sparse'",
"'\\tt_scipy_dense/t_scipy_sparse\\tt_scipy_dense\\tt_scipy_sparse'",
")",
"check_call",
"(",
"_LIB",
".",
"MXSetNumOMPThreads",
"(",
"ctypes",
".",
"c_int",
"(",
"args",
".",
"num_omp_threads",
")",
")",
")",
"# TODO(haibin) make these runtime options",
"m",
"=",
"512",
"k",
"=",
"[",
"50000",
",",
"100000",
"]",
"n",
"=",
"[",
"64",
",",
"128",
"]",
"density",
"=",
"[",
"1.00",
",",
"0.90",
",",
"0.70",
",",
"0.50",
",",
"0.30",
",",
"0.20",
",",
"0.10",
",",
"0.07",
",",
"0.05",
",",
"0.02",
",",
"0.01",
",",
"0.005",
",",
"0.001",
"]",
"num_repeat",
"=",
"10",
"# contexts = [mx.cpu(), mx.gpu(0)]",
"contexts",
"=",
"[",
"mx",
".",
"cpu",
"(",
")",
"]",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"for",
"ctx",
"in",
"contexts",
":",
"for",
"den",
"in",
"density",
":",
"bench_dot_forward",
"(",
"m",
",",
"k",
"[",
"i",
"]",
",",
"n",
"[",
"i",
"]",
",",
"den",
",",
"ctx",
",",
"num_repeat",
")",
"print",
"(",
"\"dot_backward\\tdot(csr.T, dns)\"",
")",
"print",
"(",
"'density(%)\\tcontext\\tn\\tm\\tk\\tt_dense/t_sparse\\tt_dense\\tt_sparse'",
"'\\tt_scipy_dense/t_scipy_sparse\\tt_scipy_dense\\tt_scipy_sparse'",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"for",
"ctx",
"in",
"contexts",
":",
"for",
"den",
"in",
"density",
":",
"bench_dot_backward",
"(",
"m",
",",
"k",
"[",
"i",
"]",
",",
"n",
"[",
"i",
"]",
",",
"den",
",",
"ctx",
",",
"num_repeat",
")"
] | https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/benchmark/python/sparse/sparse_op.py#L136-L241 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/boto/boto/ec2/elb/loadbalancer.py | python | LoadBalancer.get_attributes | (self, force=False) | return self._attributes | Gets the LbAttributes. The Attributes will be cached.
:type force: bool
:param force: Ignore cache value and reload.
:rtype: boto.ec2.elb.attributes.LbAttributes
:return: The LbAttribues object | Gets the LbAttributes. The Attributes will be cached. | [
"Gets",
"the",
"LbAttributes",
".",
"The",
"Attributes",
"will",
"be",
"cached",
"."
] | def get_attributes(self, force=False):
"""
Gets the LbAttributes. The Attributes will be cached.
:type force: bool
:param force: Ignore cache value and reload.
:rtype: boto.ec2.elb.attributes.LbAttributes
:return: The LbAttribues object
"""
if not self._attributes or force:
self._attributes = self.connection.get_all_lb_attributes(self.name)
return self._attributes | [
"def",
"get_attributes",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"_attributes",
"or",
"force",
":",
"self",
".",
"_attributes",
"=",
"self",
".",
"connection",
".",
"get_all_lb_attributes",
"(",
"self",
".",
"name",
")",
"return",
"self",
".",
"_attributes"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/ec2/elb/loadbalancer.py#L211-L223 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/traitlets/py2/traitlets/traitlets.py | python | Union.__init__ | (self, trait_types, **kwargs) | Construct a Union trait.
This trait allows values that are allowed by at least one of the
specified trait types. A Union traitlet cannot have metadata on
its own, besides the metadata of the listed types.
Parameters
----------
trait_types: sequence
The list of trait types of length at least 1.
Notes
-----
Union([Float(), Bool(), Int()]) attempts to validate the provided values
with the validation function of Float, then Bool, and finally Int. | Construct a Union trait. | [
"Construct",
"a",
"Union",
"trait",
"."
] | def __init__(self, trait_types, **kwargs):
"""Construct a Union trait.
This trait allows values that are allowed by at least one of the
specified trait types. A Union traitlet cannot have metadata on
its own, besides the metadata of the listed types.
Parameters
----------
trait_types: sequence
The list of trait types of length at least 1.
Notes
-----
Union([Float(), Bool(), Int()]) attempts to validate the provided values
with the validation function of Float, then Bool, and finally Int.
"""
self.trait_types = trait_types
self.info_text = " or ".join([tt.info() for tt in self.trait_types])
super(Union, self).__init__(**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"trait_types",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"trait_types",
"=",
"trait_types",
"self",
".",
"info_text",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"tt",
".",
"info",
"(",
")",
"for",
"tt",
"in",
"self",
".",
"trait_types",
"]",
")",
"super",
"(",
"Union",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py2/traitlets/traitlets.py#L1761-L1780 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_controls.py | python | ToolBarBase.FindById | (*args, **kwargs) | return _controls_.ToolBarBase_FindById(*args, **kwargs) | FindById(self, int toolid) -> ToolBarToolBase | FindById(self, int toolid) -> ToolBarToolBase | [
"FindById",
"(",
"self",
"int",
"toolid",
")",
"-",
">",
"ToolBarToolBase"
] | def FindById(*args, **kwargs):
"""FindById(self, int toolid) -> ToolBarToolBase"""
return _controls_.ToolBarBase_FindById(*args, **kwargs) | [
"def",
"FindById",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ToolBarBase_FindById",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L3903-L3905 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/command/sdist.py | python | sdist.checking_metadata | (self) | return self.metadata_check | Callable used for the check sub-command.
Placed here so user_options can view it | Callable used for the check sub-command. | [
"Callable",
"used",
"for",
"the",
"check",
"sub",
"-",
"command",
"."
] | def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check | [
"def",
"checking_metadata",
"(",
"self",
")",
":",
"return",
"self",
".",
"metadata_check"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/command/sdist.py#L40-L44 |
|
google/llvm-propeller | 45c226984fe8377ebfb2ad7713c680d652ba678d | llvm/utils/collect_and_build_with_pgo.py | python | _looks_like_llvm_dir | (directory) | return 'llvm' in include_listing | Arbitrary set of heuristics to determine if `directory` is an llvm dir.
Errs on the side of false-positives. | Arbitrary set of heuristics to determine if `directory` is an llvm dir. | [
"Arbitrary",
"set",
"of",
"heuristics",
"to",
"determine",
"if",
"directory",
"is",
"an",
"llvm",
"dir",
"."
] | def _looks_like_llvm_dir(directory):
"""Arbitrary set of heuristics to determine if `directory` is an llvm dir.
Errs on the side of false-positives."""
contents = set(os.listdir(directory))
expected_contents = [
'CODE_OWNERS.TXT',
'cmake',
'docs',
'include',
'utils',
]
if not all(c in contents for c in expected_contents):
return False
try:
include_listing = os.listdir(os.path.join(directory, 'include'))
except NotADirectoryError:
return False
return 'llvm' in include_listing | [
"def",
"_looks_like_llvm_dir",
"(",
"directory",
")",
":",
"contents",
"=",
"set",
"(",
"os",
".",
"listdir",
"(",
"directory",
")",
")",
"expected_contents",
"=",
"[",
"'CODE_OWNERS.TXT'",
",",
"'cmake'",
",",
"'docs'",
",",
"'include'",
",",
"'utils'",
",",
"]",
"if",
"not",
"all",
"(",
"c",
"in",
"contents",
"for",
"c",
"in",
"expected_contents",
")",
":",
"return",
"False",
"try",
":",
"include_listing",
"=",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'include'",
")",
")",
"except",
"NotADirectoryError",
":",
"return",
"False",
"return",
"'llvm'",
"in",
"include_listing"
] | https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/llvm/utils/collect_and_build_with_pgo.py#L412-L434 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/SANS/isis_reduction_steps.py | python | UnitsConvert.get_range | (self) | return str(self.wav_low) + '_' + str(self.wav_high) | Get the values of the highest and lowest boundaries
@return low'_'high | Get the values of the highest and lowest boundaries | [
"Get",
"the",
"values",
"of",
"the",
"highest",
"and",
"lowest",
"boundaries"
] | def get_range(self):
"""
Get the values of the highest and lowest boundaries
@return low'_'high
"""
return str(self.wav_low) + '_' + str(self.wav_high) | [
"def",
"get_range",
"(",
"self",
")",
":",
"return",
"str",
"(",
"self",
".",
"wav_low",
")",
"+",
"'_'",
"+",
"str",
"(",
"self",
".",
"wav_high",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/isis_reduction_steps.py#L3106-L3111 |
|
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py | python | QueueBase.dtypes | (self) | return self._dtypes | The list of dtypes for each component of a queue element. | The list of dtypes for each component of a queue element. | [
"The",
"list",
"of",
"dtypes",
"for",
"each",
"component",
"of",
"a",
"queue",
"element",
"."
] | def dtypes(self):
"""The list of dtypes for each component of a queue element."""
return self._dtypes | [
"def",
"dtypes",
"(",
"self",
")",
":",
"return",
"self",
".",
"_dtypes"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py#L206-L208 |
|
psi4/psi4 | be533f7f426b6ccc263904e55122899b16663395 | psi4/driver/qcdb/libmintsbasisset.py | python | BasisSet.constructor_zero_ao_basis | (self) | Constructs a zero AO basis set | Constructs a zero AO basis set | [
"Constructs",
"a",
"zero",
"AO",
"basis",
"set"
] | def constructor_zero_ao_basis(self):
"""Constructs a zero AO basis set"""
if not self.initialized_shared:
self.initialize_singletons()
# Add a dummy atom at the origin, to hold this basis function
self.molecule = Molecule()
self.molecule.add_atom(0, 0.0, 0.0, 0.0, 'X')
# Fill with data representing a single S function, at the origin, with 0 exponent
self.n_uprimitive = 1
self.n_shells = 1
self.PYnprimitive = 1
self.PYnao = 1
self.PYnbf = 1
self.uerd_coefficients = [1.0]
self.n_prim_per_shell = [1]
self.uexponents = [0.0]
self.ucoefficients = [1.0]
self.uoriginal_coefficients = [1.0]
self.shell_first_ao = [0]
self.shell_first_basis_function = [0]
self.ao_to_shell = [0]
self.function_to_shell = [0]
self.function_center = [0]
self.shell_center = [0]
self.center_to_nshell = [0]
self.center_to_shell = [0]
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 1
self.xyz = [0.0, 0.0, 0.0]
self.name = '(Empty Basis Set)'
self.shells = []
self.shells.append(ShellInfo(0, self.uoriginal_coefficients,
self.uexponents, 'Cartesian', 0, self.xyz, 0)) | [
"def",
"constructor_zero_ao_basis",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"initialized_shared",
":",
"self",
".",
"initialize_singletons",
"(",
")",
"# Add a dummy atom at the origin, to hold this basis function",
"self",
".",
"molecule",
"=",
"Molecule",
"(",
")",
"self",
".",
"molecule",
".",
"add_atom",
"(",
"0",
",",
"0.0",
",",
"0.0",
",",
"0.0",
",",
"'X'",
")",
"# Fill with data representing a single S function, at the origin, with 0 exponent",
"self",
".",
"n_uprimitive",
"=",
"1",
"self",
".",
"n_shells",
"=",
"1",
"self",
".",
"PYnprimitive",
"=",
"1",
"self",
".",
"PYnao",
"=",
"1",
"self",
".",
"PYnbf",
"=",
"1",
"self",
".",
"uerd_coefficients",
"=",
"[",
"1.0",
"]",
"self",
".",
"n_prim_per_shell",
"=",
"[",
"1",
"]",
"self",
".",
"uexponents",
"=",
"[",
"0.0",
"]",
"self",
".",
"ucoefficients",
"=",
"[",
"1.0",
"]",
"self",
".",
"uoriginal_coefficients",
"=",
"[",
"1.0",
"]",
"self",
".",
"shell_first_ao",
"=",
"[",
"0",
"]",
"self",
".",
"shell_first_basis_function",
"=",
"[",
"0",
"]",
"self",
".",
"ao_to_shell",
"=",
"[",
"0",
"]",
"self",
".",
"function_to_shell",
"=",
"[",
"0",
"]",
"self",
".",
"function_center",
"=",
"[",
"0",
"]",
"self",
".",
"shell_center",
"=",
"[",
"0",
"]",
"self",
".",
"center_to_nshell",
"=",
"[",
"0",
"]",
"self",
".",
"center_to_shell",
"=",
"[",
"0",
"]",
"self",
".",
"puream",
"=",
"False",
"self",
".",
"PYmax_am",
"=",
"0",
"self",
".",
"PYmax_nprimitive",
"=",
"1",
"self",
".",
"xyz",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
"self",
".",
"name",
"=",
"'(Empty Basis Set)'",
"self",
".",
"shells",
"=",
"[",
"]",
"self",
".",
"shells",
".",
"append",
"(",
"ShellInfo",
"(",
"0",
",",
"self",
".",
"uoriginal_coefficients",
",",
"self",
".",
"uexponents",
",",
"'Cartesian'",
",",
"0",
",",
"self",
".",
"xyz",
",",
"0",
")",
")"
] | https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/libmintsbasisset.py#L208-L243 |
||
bundy-dns/bundy | 3d41934996b82b0cd2fe22dd74d2abc1daba835d | src/lib/python/bundy/xfrin/diff.py | python | Diff.get_single_update_buffers | (self) | Returns the current buffers of changes not yet passed into the data
source. It is a tuple of the current deletions and additions, which
each are in a form like [('delete', rrset), ('delete', rrset), ...],
and [('add', rrset), ('add', rrset), ..].
Probably useful only for testing and introspection purposes. Don't
modify the lists.
Raises a ValueError if the buffer is not in single_update_mode. | Returns the current buffers of changes not yet passed into the data
source. It is a tuple of the current deletions and additions, which
each are in a form like [('delete', rrset), ('delete', rrset), ...],
and [('add', rrset), ('add', rrset), ..]. | [
"Returns",
"the",
"current",
"buffers",
"of",
"changes",
"not",
"yet",
"passed",
"into",
"the",
"data",
"source",
".",
"It",
"is",
"a",
"tuple",
"of",
"the",
"current",
"deletions",
"and",
"additions",
"which",
"each",
"are",
"in",
"a",
"form",
"like",
"[",
"(",
"delete",
"rrset",
")",
"(",
"delete",
"rrset",
")",
"...",
"]",
"and",
"[",
"(",
"add",
"rrset",
")",
"(",
"add",
"rrset",
")",
"..",
"]",
"."
] | def get_single_update_buffers(self):
"""
Returns the current buffers of changes not yet passed into the data
source. It is a tuple of the current deletions and additions, which
each are in a form like [('delete', rrset), ('delete', rrset), ...],
and [('add', rrset), ('add', rrset), ..].
Probably useful only for testing and introspection purposes. Don't
modify the lists.
Raises a ValueError if the buffer is not in single_update_mode.
"""
if not self.__single_update_mode:
raise ValueError("Separate buffers requested in single-update mode")
else:
return (self.__deletions, self.__additions) | [
"def",
"get_single_update_buffers",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__single_update_mode",
":",
"raise",
"ValueError",
"(",
"\"Separate buffers requested in single-update mode\"",
")",
"else",
":",
"return",
"(",
"self",
".",
"__deletions",
",",
"self",
".",
"__additions",
")"
] | https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/xfrin/diff.py#L369-L384 |
||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/keras/saving/saved_model/load.py | python | _restore_layer_activation_loss | (layer) | Restore actiation loss from SavedModel. | Restore actiation loss from SavedModel. | [
"Restore",
"actiation",
"loss",
"from",
"SavedModel",
"."
] | def _restore_layer_activation_loss(layer):
"""Restore actiation loss from SavedModel."""
# Use wrapped activity regularizer function if the layer's activity
# regularizer wasn't created during initialization.
activity_regularizer = getattr(_get_keras_attr(layer),
'activity_regularizer_fn', None)
if activity_regularizer and not layer.activity_regularizer:
try:
layer.activity_regularizer = activity_regularizer
except AttributeError:
# This may happen if a layer wrapper is saved with an activity
# regularizer. The wrapper object's activity regularizer is unsettable.
pass | [
"def",
"_restore_layer_activation_loss",
"(",
"layer",
")",
":",
"# Use wrapped activity regularizer function if the layer's activity",
"# regularizer wasn't created during initialization.",
"activity_regularizer",
"=",
"getattr",
"(",
"_get_keras_attr",
"(",
"layer",
")",
",",
"'activity_regularizer_fn'",
",",
"None",
")",
"if",
"activity_regularizer",
"and",
"not",
"layer",
".",
"activity_regularizer",
":",
"try",
":",
"layer",
".",
"activity_regularizer",
"=",
"activity_regularizer",
"except",
"AttributeError",
":",
"# This may happen if a layer wrapper is saved with an activity",
"# regularizer. The wrapper object's activity regularizer is unsettable.",
"pass"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/saving/saved_model/load.py#L934-L946 |
||
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/python/ops/nn_ops.py | python | bias_add | (value, bias, data_format=None, name=None) | Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`. | Adds `bias` to `value`. | [
"Adds",
"bias",
"to",
"value",
"."
] | def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAdd") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name) | [
"def",
"bias_add",
"(",
"value",
",",
"bias",
",",
"data_format",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"value",
",",
"bias",
"]",
",",
"name",
",",
"\"BiasAdd\"",
")",
"as",
"name",
":",
"value",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"value",
",",
"name",
"=",
"\"input\"",
")",
"bias",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"bias",
",",
"dtype",
"=",
"value",
".",
"dtype",
",",
"name",
"=",
"\"bias\"",
")",
"return",
"gen_nn_ops",
".",
"_bias_add",
"(",
"value",
",",
"bias",
",",
"data_format",
"=",
"data_format",
",",
"name",
"=",
"name",
")"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/nn_ops.py#L368-L391 |
||
hughperkins/tf-coriander | 970d3df6c11400ad68405f22b0c42a52374e94ca | tensorflow/python/training/input.py | python | range_input_producer | (limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None) | Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection. | Produces the integers from 0 to limit-1 in a queue. | [
"Produces",
"the",
"integers",
"from",
"0",
"to",
"limit",
"-",
"1",
"in",
"a",
"queue",
"."
] | def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.name_scope(name, "input_producer", [limit]) as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, name, "fraction_of_%d_full" % capacity) | [
"def",
"range_input_producer",
"(",
"limit",
",",
"num_epochs",
"=",
"None",
",",
"shuffle",
"=",
"True",
",",
"seed",
"=",
"None",
",",
"capacity",
"=",
"32",
",",
"shared_name",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"input_producer\"",
",",
"[",
"limit",
"]",
")",
"as",
"name",
":",
"range_tensor",
"=",
"math_ops",
".",
"range",
"(",
"limit",
")",
"return",
"input_producer",
"(",
"range_tensor",
",",
"[",
"]",
",",
"num_epochs",
",",
"shuffle",
",",
"seed",
",",
"capacity",
",",
"shared_name",
",",
"name",
",",
"\"fraction_of_%d_full\"",
"%",
"capacity",
")"
] | https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/training/input.py#L199-L225 |
||
facebookresearch/ELF | 1f790173095cd910976d9f651b80beb872ec5d12 | vendor/pybind11/tools/clang/cindex.py | python | Type.is_pod | (self) | return conf.lib.clang_isPODType(self) | Determine whether this Type represents plain old data (POD). | Determine whether this Type represents plain old data (POD). | [
"Determine",
"whether",
"this",
"Type",
"represents",
"plain",
"old",
"data",
"(",
"POD",
")",
"."
] | def is_pod(self):
"""Determine whether this Type represents plain old data (POD)."""
return conf.lib.clang_isPODType(self) | [
"def",
"is_pod",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_isPODType",
"(",
"self",
")"
] | https://github.com/facebookresearch/ELF/blob/1f790173095cd910976d9f651b80beb872ec5d12/vendor/pybind11/tools/clang/cindex.py#L2038-L2040 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/importlib/_bootstrap_external.py | python | SourceLoader._cache_bytecode | (self, source_path, cache_path, data) | return self.set_data(cache_path, data) | Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions | Optional method which writes data (bytes) to a file path (a str). | [
"Optional",
"method",
"which",
"writes",
"data",
"(",
"bytes",
")",
"to",
"a",
"file",
"path",
"(",
"a",
"str",
")",
"."
] | def _cache_bytecode(self, source_path, cache_path, data):
"""Optional method which writes data (bytes) to a file path (a str).
Implementing this method allows for the writing of bytecode files.
The source path is needed in order to correctly transfer permissions
"""
# For backwards compatibility, we delegate to set_data()
return self.set_data(cache_path, data) | [
"def",
"_cache_bytecode",
"(",
"self",
",",
"source_path",
",",
"cache_path",
",",
"data",
")",
":",
"# For backwards compatibility, we delegate to set_data()",
"return",
"self",
".",
"set_data",
"(",
"cache_path",
",",
"data",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/importlib/_bootstrap_external.py#L758-L766 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/debug/lib/debug_data.py | python | DebugTensorDatum.output_slot | (self) | return self._output_slot | Output slot index from which the tensor value was dumped.
Returns:
(`int`) output slot index watched by the debug op. | Output slot index from which the tensor value was dumped. | [
"Output",
"slot",
"index",
"from",
"which",
"the",
"tensor",
"value",
"was",
"dumped",
"."
] | def output_slot(self):
"""Output slot index from which the tensor value was dumped.
Returns:
(`int`) output slot index watched by the debug op.
"""
return self._output_slot | [
"def",
"output_slot",
"(",
"self",
")",
":",
"return",
"self",
".",
"_output_slot"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/debug/lib/debug_data.py#L404-L411 |
|
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/poplib.py | python | POP3.noop | (self) | return self._shortcmd('NOOP') | Does nothing.
One supposes the response indicates the server is alive. | Does nothing. | [
"Does",
"nothing",
"."
] | def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP') | [
"def",
"noop",
"(",
"self",
")",
":",
"return",
"self",
".",
"_shortcmd",
"(",
"'NOOP'",
")"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/poplib.py#L235-L240 |
|
cornell-zhang/heterocl | 6d9e4b4acc2ee2707b2d25b27298c0335bccedfd | python/heterocl/tvm/target.py | python | current_target | (allow_none=True) | return Target.current | Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set. | Returns the current target. | [
"Returns",
"the",
"current",
"target",
"."
] | def current_target(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
if Target.current:
return Target.current
if not allow_none:
raise RuntimeError(
"Requires a current target in generic function, but it is not set. "
"Please set it using `with TargetObject:`")
return Target.current | [
"def",
"current_target",
"(",
"allow_none",
"=",
"True",
")",
":",
"if",
"Target",
".",
"current",
":",
"return",
"Target",
".",
"current",
"if",
"not",
"allow_none",
":",
"raise",
"RuntimeError",
"(",
"\"Requires a current target in generic function, but it is not set. \"",
"\"Please set it using `with TargetObject:`\"",
")",
"return",
"Target",
".",
"current"
] | https://github.com/cornell-zhang/heterocl/blob/6d9e4b4acc2ee2707b2d25b27298c0335bccedfd/python/heterocl/tvm/target.py#L293-L311 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/quantize/python/input_to_ops.py | python | InputToOps.__init__ | (self, graph) | Initializes mapping from tensor's name to ops that take it.
Helps find edges between ops faster and avoids iterating over the whole
graph. The mapping is of type Dict[str, Set[tf.Operation]].
Note: while inserting operations into the graph, we do not update the
mapping, assuming that insertion points in the graph are never adjacent.
With that restriction, an out of date mapping still works fine.
Args:
graph: Graph to process. | Initializes mapping from tensor's name to ops that take it. | [
"Initializes",
"mapping",
"from",
"tensor",
"s",
"name",
"to",
"ops",
"that",
"take",
"it",
"."
] | def __init__(self, graph):
"""Initializes mapping from tensor's name to ops that take it.
Helps find edges between ops faster and avoids iterating over the whole
graph. The mapping is of type Dict[str, Set[tf.Operation]].
Note: while inserting operations into the graph, we do not update the
mapping, assuming that insertion points in the graph are never adjacent.
With that restriction, an out of date mapping still works fine.
Args:
graph: Graph to process.
"""
self.mapping = collections.defaultdict(set)
for op in (op for op in graph.get_operations()):
if op.name.startswith(common.SKIPPED_PREFIXES):
continue
for op_input in op.inputs:
self.mapping[op_input].add(op) | [
"def",
"__init__",
"(",
"self",
",",
"graph",
")",
":",
"self",
".",
"mapping",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"for",
"op",
"in",
"(",
"op",
"for",
"op",
"in",
"graph",
".",
"get_operations",
"(",
")",
")",
":",
"if",
"op",
".",
"name",
".",
"startswith",
"(",
"common",
".",
"SKIPPED_PREFIXES",
")",
":",
"continue",
"for",
"op_input",
"in",
"op",
".",
"inputs",
":",
"self",
".",
"mapping",
"[",
"op_input",
"]",
".",
"add",
"(",
"op",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/quantize/python/input_to_ops.py#L28-L46 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_windows.py | python | PrintDialog.__init__ | (self, *args, **kwargs) | __init__(self, Window parent, PrintDialogData data=None) -> PrintDialog | __init__(self, Window parent, PrintDialogData data=None) -> PrintDialog | [
"__init__",
"(",
"self",
"Window",
"parent",
"PrintDialogData",
"data",
"=",
"None",
")",
"-",
">",
"PrintDialog"
] | def __init__(self, *args, **kwargs):
"""__init__(self, Window parent, PrintDialogData data=None) -> PrintDialog"""
_windows_.PrintDialog_swiginit(self,_windows_.new_PrintDialog(*args, **kwargs)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_windows_",
".",
"PrintDialog_swiginit",
"(",
"self",
",",
"_windows_",
".",
"new_PrintDialog",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L5176-L5178 |
||
lyxok1/Tiny-DSOD | 94d15450699bea0dd3720e75e2d273e476174fba | scripts/cpp_lint.py | python | IsErrorSuppressedByNolint | (category, linenum) | return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set())) | Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment. | Returns true if the specified error category is suppressed on this line. | [
"Returns",
"true",
"if",
"the",
"specified",
"error",
"category",
"is",
"suppressed",
"on",
"this",
"line",
"."
] | def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set())) | [
"def",
"IsErrorSuppressedByNolint",
"(",
"category",
",",
"linenum",
")",
":",
"return",
"(",
"linenum",
"in",
"_error_suppressions",
".",
"get",
"(",
"category",
",",
"set",
"(",
")",
")",
"or",
"linenum",
"in",
"_error_suppressions",
".",
"get",
"(",
"None",
",",
"set",
"(",
")",
")",
")"
] | https://github.com/lyxok1/Tiny-DSOD/blob/94d15450699bea0dd3720e75e2d273e476174fba/scripts/cpp_lint.py#L500-L513 |
|
junhyukoh/caffe-lstm | 598d45456fa2a1b127a644f4aa38daa8fb9fc722 | python/caffe/draw.py | python | draw_net | (caffe_net, rankdir, ext='png') | return get_pydot_graph(caffe_net, rankdir).create(format=ext) | Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph. | Draws a caffe net and returns the image string encoded using the given
extension. | [
"Draws",
"a",
"caffe",
"net",
"and",
"returns",
"the",
"image",
"string",
"encoded",
"using",
"the",
"given",
"extension",
"."
] | def draw_net(caffe_net, rankdir, ext='png'):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir).create(format=ext) | [
"def",
"draw_net",
"(",
"caffe_net",
",",
"rankdir",
",",
"ext",
"=",
"'png'",
")",
":",
"return",
"get_pydot_graph",
"(",
"caffe_net",
",",
"rankdir",
")",
".",
"create",
"(",
"format",
"=",
"ext",
")"
] | https://github.com/junhyukoh/caffe-lstm/blob/598d45456fa2a1b127a644f4aa38daa8fb9fc722/python/caffe/draw.py#L180-L195 |
|
devsisters/libquic | 8954789a056d8e7d5fcb6452fd1572ca57eb5c4e | src/third_party/protobuf/python/google/protobuf/internal/python_message.py | python | _ExtensionDict.__init__ | (self, extended_message) | extended_message: Message instance for which we are the Extensions dict. | extended_message: Message instance for which we are the Extensions dict. | [
"extended_message",
":",
"Message",
"instance",
"for",
"which",
"we",
"are",
"the",
"Extensions",
"dict",
"."
] | def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message | [
"def",
"__init__",
"(",
"self",
",",
"extended_message",
")",
":",
"self",
".",
"_extended_message",
"=",
"extended_message"
] | https://github.com/devsisters/libquic/blob/8954789a056d8e7d5fcb6452fd1572ca57eb5c4e/src/third_party/protobuf/python/google/protobuf/internal/python_message.py#L1445-L1449 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/python2_version/klampt/src/robotsim.py | python | RobotPoser.addIKConstraint | (self, obj) | return _robotsim.RobotPoser_addIKConstraint(self, obj) | addIKConstraint(RobotPoser self, IKObjective obj) | addIKConstraint(RobotPoser self, IKObjective obj) | [
"addIKConstraint",
"(",
"RobotPoser",
"self",
"IKObjective",
"obj",
")"
] | def addIKConstraint(self, obj):
"""
addIKConstraint(RobotPoser self, IKObjective obj)
"""
return _robotsim.RobotPoser_addIKConstraint(self, obj) | [
"def",
"addIKConstraint",
"(",
"self",
",",
"obj",
")",
":",
"return",
"_robotsim",
".",
"RobotPoser_addIKConstraint",
"(",
"self",
",",
"obj",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/src/robotsim.py#L3418-L3425 |
|
dmlc/xgboost | 2775c2a1abd4b5b759ff517617434c8b9aeb4cc0 | demo/guide-python/quantile_data_iterator.py | python | IterForDMatrixDemo.next | (self, input_data) | return 1 | Yield next batch of data. | Yield next batch of data. | [
"Yield",
"next",
"batch",
"of",
"data",
"."
] | def next(self, input_data):
'''Yield next batch of data.'''
if self.it == len(self._data):
# Return 0 when there's no more batch.
return 0
input_data(data=self.data(), label=self.labels(),
weight=self.weights())
self.it += 1
return 1 | [
"def",
"next",
"(",
"self",
",",
"input_data",
")",
":",
"if",
"self",
".",
"it",
"==",
"len",
"(",
"self",
".",
"_data",
")",
":",
"# Return 0 when there's no more batch.",
"return",
"0",
"input_data",
"(",
"data",
"=",
"self",
".",
"data",
"(",
")",
",",
"label",
"=",
"self",
".",
"labels",
"(",
")",
",",
"weight",
"=",
"self",
".",
"weights",
"(",
")",
")",
"self",
".",
"it",
"+=",
"1",
"return",
"1"
] | https://github.com/dmlc/xgboost/blob/2775c2a1abd4b5b759ff517617434c8b9aeb4cc0/demo/guide-python/quantile_data_iterator.py#L75-L83 |
|
p4lang/behavioral-model | 81ce0163f0770c6b9d6056a28ce2e0cc035bb6e9 | tools/cpplint.py | python | CheckTrailingSemicolon | (filename, clean_lines, linenum, error) | Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Looks for redundant trailing semicolon. | [
"Looks",
"for",
"redundant",
"trailing",
"semicolon",
"."
] | def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we explicitly list the allowed rules rather
# than listing the disallowed ones. These are the places where "};"
# should be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a list of safe macros instead of a list of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the allowed checks wrong means some extra
# semicolons, while the downside for getting disallowed checks wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }") | [
"def",
"CheckTrailingSemicolon",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# Block bodies should not be followed by a semicolon. Due to C++11",
"# brace initialization, there are more places where semicolons are",
"# required than not, so we explicitly list the allowed rules rather",
"# than listing the disallowed ones. These are the places where \"};\"",
"# should be replaced by just \"}\":",
"# 1. Some flavor of block following closing parenthesis:",
"# for (;;) {};",
"# while (...) {};",
"# switch (...) {};",
"# Function(...) {};",
"# if (...) {};",
"# if (...) else if (...) {};",
"#",
"# 2. else block:",
"# if (...) else {};",
"#",
"# 3. const member function:",
"# Function(...) const {};",
"#",
"# 4. Block following some statement:",
"# x = 42;",
"# {};",
"#",
"# 5. Block at the beginning of a function:",
"# Function(...) {",
"# {};",
"# }",
"#",
"# Note that naively checking for the preceding \"{\" will also match",
"# braces inside multi-dimensional arrays, but this is fine since",
"# that expression will not contain semicolons.",
"#",
"# 6. Block following another block:",
"# while (true) {}",
"# {};",
"#",
"# 7. End of namespaces:",
"# namespace {};",
"#",
"# These semicolons seems far more common than other kinds of",
"# redundant semicolons, possibly due to people converting classes",
"# to namespaces. For now we do not warn for this case.",
"#",
"# Try matching case 1 first.",
"match",
"=",
"Match",
"(",
"r'^(.*\\)\\s*)\\{'",
",",
"line",
")",
"if",
"match",
":",
"# Matched closing parenthesis (case 1). Check the token before the",
"# matching opening parenthesis, and don't warn if it looks like a",
"# macro. This avoids these false positives:",
"# - macro that defines a base class",
"# - multi-line macro that defines a base class",
"# - macro that defines the whole class-head",
"#",
"# But we still issue warnings for macros that we know are safe to",
"# warn, specifically:",
"# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P",
"# - TYPED_TEST",
"# - INTERFACE_DEF",
"# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:",
"#",
"# We implement a list of safe macros instead of a list of",
"# unsafe macros, even though the latter appears less frequently in",
"# google code and would have been easier to implement. This is because",
"# the downside for getting the allowed checks wrong means some extra",
"# semicolons, while the downside for getting disallowed checks wrong",
"# would result in compile errors.",
"#",
"# In addition to macros, we also don't want to warn on",
"# - Compound literals",
"# - Lambdas",
"# - alignas specifier with anonymous structs",
"# - decltype",
"closing_brace_pos",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"rfind",
"(",
"')'",
")",
"opening_parenthesis",
"=",
"ReverseCloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"closing_brace_pos",
")",
"if",
"opening_parenthesis",
"[",
"2",
"]",
">",
"-",
"1",
":",
"line_prefix",
"=",
"opening_parenthesis",
"[",
"0",
"]",
"[",
"0",
":",
"opening_parenthesis",
"[",
"2",
"]",
"]",
"macro",
"=",
"Search",
"(",
"r'\\b([A-Z_][A-Z0-9_]*)\\s*$'",
",",
"line_prefix",
")",
"func",
"=",
"Match",
"(",
"r'^(.*\\])\\s*$'",
",",
"line_prefix",
")",
"if",
"(",
"(",
"macro",
"and",
"macro",
".",
"group",
"(",
"1",
")",
"not",
"in",
"(",
"'TEST'",
",",
"'TEST_F'",
",",
"'MATCHER'",
",",
"'MATCHER_P'",
",",
"'TYPED_TEST'",
",",
"'EXCLUSIVE_LOCKS_REQUIRED'",
",",
"'SHARED_LOCKS_REQUIRED'",
",",
"'LOCKS_EXCLUDED'",
",",
"'INTERFACE_DEF'",
")",
")",
"or",
"(",
"func",
"and",
"not",
"Search",
"(",
"r'\\boperator\\s*\\[\\s*\\]'",
",",
"func",
".",
"group",
"(",
"1",
")",
")",
")",
"or",
"Search",
"(",
"r'\\b(?:struct|union)\\s+alignas\\s*$'",
",",
"line_prefix",
")",
"or",
"Search",
"(",
"r'\\bdecltype$'",
",",
"line_prefix",
")",
"or",
"Search",
"(",
"r'\\s+=\\s*$'",
",",
"line_prefix",
")",
")",
":",
"match",
"=",
"None",
"if",
"(",
"match",
"and",
"opening_parenthesis",
"[",
"1",
"]",
">",
"1",
"and",
"Search",
"(",
"r'\\]\\s*$'",
",",
"clean_lines",
".",
"elided",
"[",
"opening_parenthesis",
"[",
"1",
"]",
"-",
"1",
"]",
")",
")",
":",
"# Multi-line lambda-expression",
"match",
"=",
"None",
"else",
":",
"# Try matching cases 2-3.",
"match",
"=",
"Match",
"(",
"r'^(.*(?:else|\\)\\s*const)\\s*)\\{'",
",",
"line",
")",
"if",
"not",
"match",
":",
"# Try matching cases 4-6. These are always matched on separate lines.",
"#",
"# Note that we can't simply concatenate the previous line to the",
"# current line and do a single match, otherwise we may output",
"# duplicate warnings for the blank line case:",
"# if (cond) {",
"# // blank line",
"# }",
"prevline",
"=",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
"if",
"prevline",
"and",
"Search",
"(",
"r'[;{}]\\s*$'",
",",
"prevline",
")",
":",
"match",
"=",
"Match",
"(",
"r'^(\\s*)\\{'",
",",
"line",
")",
"# Check matching closing brace",
"if",
"match",
":",
"(",
"endline",
",",
"endlinenum",
",",
"endpos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"if",
"endpos",
">",
"-",
"1",
"and",
"Match",
"(",
"r'^\\s*;'",
",",
"endline",
"[",
"endpos",
":",
"]",
")",
":",
"# Current {} pair is eligible for semicolon check, and we have found",
"# the redundant semicolon, output warning here.",
"#",
"# Note: because we are scanning forward for opening braces, and",
"# outputting warnings for the matching closing brace, if there are",
"# nested blocks with trailing semicolons, we will get the error",
"# messages in reversed order.",
"# We need to check the line forward for NOLINT",
"raw_lines",
"=",
"clean_lines",
".",
"raw_lines",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"endlinenum",
"-",
"1",
"]",
",",
"endlinenum",
"-",
"1",
",",
"error",
")",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"endlinenum",
"]",
",",
"endlinenum",
",",
"error",
")",
"error",
"(",
"filename",
",",
"endlinenum",
",",
"'readability/braces'",
",",
"4",
",",
"\"You don't need a ; after a }\"",
")"
] | https://github.com/p4lang/behavioral-model/blob/81ce0163f0770c6b9d6056a28ce2e0cc035bb6e9/tools/cpplint.py#L4351-L4495 |
||
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/autopep8.py | python | FixPEP8.fix_e401 | (self, result) | Put imports on separate lines. | Put imports on separate lines. | [
"Put",
"imports",
"on",
"separate",
"lines",
"."
] | def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed | [
"def",
"fix_e401",
"(",
"self",
",",
"result",
")",
":",
"line_index",
"=",
"result",
"[",
"'line'",
"]",
"-",
"1",
"target",
"=",
"self",
".",
"source",
"[",
"line_index",
"]",
"offset",
"=",
"result",
"[",
"'column'",
"]",
"-",
"1",
"if",
"not",
"target",
".",
"lstrip",
"(",
")",
".",
"startswith",
"(",
"'import'",
")",
":",
"return",
"[",
"]",
"indentation",
"=",
"re",
".",
"split",
"(",
"pattern",
"=",
"r'\\bimport\\b'",
",",
"string",
"=",
"target",
",",
"maxsplit",
"=",
"1",
")",
"[",
"0",
"]",
"fixed",
"=",
"(",
"target",
"[",
":",
"offset",
"]",
".",
"rstrip",
"(",
"'\\t ,'",
")",
"+",
"'\\n'",
"+",
"indentation",
"+",
"'import '",
"+",
"target",
"[",
"offset",
":",
"]",
".",
"lstrip",
"(",
"'\\t ,'",
")",
")",
"self",
".",
"source",
"[",
"line_index",
"]",
"=",
"fixed"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/autopep8.py#L752-L765 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/oauthlib/oauth2/rfc6749/request_validator.py | python | RequestValidator.save_authorization_code | (self, client_id, code, request, *args, **kwargs) | Persist the authorization_code.
The code should at minimum be stored with:
- the client_id (client_id)
- the redirect URI used (request.redirect_uri)
- a resource owner / user (request.user)
- the authorized scopes (request.scopes)
- the client state, if given (code.get('state'))
The 'code' argument is actually a dictionary, containing at least a
'code' key with the actual authorization code:
{'code': 'sdf345jsdf0934f'}
It may also have a 'state' key containing a nonce for the client, if it
chose to send one. That value should be saved and used in
'validate_code'.
It may also have a 'claims' parameter which, when present, will be a dict
deserialized from JSON as described at
http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
This value should be saved in this method and used again in 'validate_code'.
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant and, optionally, state.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant | Persist the authorization_code. | [
"Persist",
"the",
"authorization_code",
"."
] | def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be stored with:
- the client_id (client_id)
- the redirect URI used (request.redirect_uri)
- a resource owner / user (request.user)
- the authorized scopes (request.scopes)
- the client state, if given (code.get('state'))
The 'code' argument is actually a dictionary, containing at least a
'code' key with the actual authorization code:
{'code': 'sdf345jsdf0934f'}
It may also have a 'state' key containing a nonce for the client, if it
chose to send one. That value should be saved and used in
'validate_code'.
It may also have a 'claims' parameter which, when present, will be a dict
deserialized from JSON as described at
http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
This value should be saved in this method and used again in 'validate_code'.
:param client_id: Unicode client identifier
:param code: A dict of the authorization code grant and, optionally, state.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.') | [
"def",
"save_authorization_code",
"(",
"self",
",",
"client_id",
",",
"code",
",",
"request",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Subclasses must implement this method.'",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/oauthlib/oauth2/rfc6749/request_validator.py#L208-L239 |
||
cksystemsgroup/scal | fa2208a97a77d65f4e90f85fef3404c27c1f2ac2 | tools/cpplint.py | python | _IsTestFilename | (filename) | Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise. | Determines if the given filename has a suffix that identifies it as a test. | [
"Determines",
"if",
"the",
"given",
"filename",
"has",
"a",
"suffix",
"that",
"identifies",
"it",
"as",
"a",
"test",
"."
] | def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False | [
"def",
"_IsTestFilename",
"(",
"filename",
")",
":",
"if",
"(",
"filename",
".",
"endswith",
"(",
"'_test.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_unittest.cc'",
")",
"or",
"filename",
".",
"endswith",
"(",
"'_regtest.cc'",
")",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | https://github.com/cksystemsgroup/scal/blob/fa2208a97a77d65f4e90f85fef3404c27c1f2ac2/tools/cpplint.py#L4528-L4542 |
||
deepmind/reverb | ef3c8f0be1b720a741d2dee335e15e44668c291a | reverb/client.py | python | Writer.create_item | (self, table: str, num_timesteps: int, priority: float) | Creates an item and sends it to the ReverbService.
This method is what effectively makes data available for sampling. See the
docstring of `append` for an illustrative example of the behavior.
Note: The item is not always immediately pushed. To ensure items
are pushed to the service, call `writer.flush()` or `writer.close()`.
Args:
table: Name of the priority table to insert the item into.
num_timesteps: The number of most recently added timesteps that the new
item should reference.
priority: The priority used for determining the sample probability of the
new item.
Raises:
ValueError: If num_timesteps is < 1.
StatusNotOk: If num_timesteps is > than the timesteps currently available
in the buffer. | Creates an item and sends it to the ReverbService. | [
"Creates",
"an",
"item",
"and",
"sends",
"it",
"to",
"the",
"ReverbService",
"."
] | def create_item(self, table: str, num_timesteps: int, priority: float):
"""Creates an item and sends it to the ReverbService.
This method is what effectively makes data available for sampling. See the
docstring of `append` for an illustrative example of the behavior.
Note: The item is not always immediately pushed. To ensure items
are pushed to the service, call `writer.flush()` or `writer.close()`.
Args:
table: Name of the priority table to insert the item into.
num_timesteps: The number of most recently added timesteps that the new
item should reference.
priority: The priority used for determining the sample probability of the
new item.
Raises:
ValueError: If num_timesteps is < 1.
StatusNotOk: If num_timesteps is > than the timesteps currently available
in the buffer.
"""
if num_timesteps < 1:
raise ValueError('num_timesteps (%d) must be a positive integer')
self._writer.CreateItem(table, num_timesteps, priority) | [
"def",
"create_item",
"(",
"self",
",",
"table",
":",
"str",
",",
"num_timesteps",
":",
"int",
",",
"priority",
":",
"float",
")",
":",
"if",
"num_timesteps",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'num_timesteps (%d) must be a positive integer'",
")",
"self",
".",
"_writer",
".",
"CreateItem",
"(",
"table",
",",
"num_timesteps",
",",
"priority",
")"
] | https://github.com/deepmind/reverb/blob/ef3c8f0be1b720a741d2dee335e15e44668c291a/reverb/client.py#L153-L176 |
||
google/nucleus | 68d3947fafba1337f294c0668a6e1c7f3f1273e3 | nucleus/io/genomics_writer.py | python | GenomicsWriter.__exit__ | (self, unused_type, unused_value, unused_traceback) | Exit a `with` block. Typically, this will close the file. | Exit a `with` block. Typically, this will close the file. | [
"Exit",
"a",
"with",
"block",
".",
"Typically",
"this",
"will",
"close",
"the",
"file",
"."
] | def __exit__(self, unused_type, unused_value, unused_traceback):
"""Exit a `with` block. Typically, this will close the file.""" | [
"def",
"__exit__",
"(",
"self",
",",
"unused_type",
",",
"unused_value",
",",
"unused_traceback",
")",
":"
] | https://github.com/google/nucleus/blob/68d3947fafba1337f294c0668a6e1c7f3f1273e3/nucleus/io/genomics_writer.py#L81-L82 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqt/mantidqt/widgets/sliceviewer/model.py | python | SliceViewerModel.export_cuts_to_workspace_matrix | (self, slicepoint, bin_params, limits: tuple,
transpose: bool, dimension_indices: Sequence[int], cut: str) | return help_msg | Export 1D cuts in the X/Y direction for the extent. Signature matches other export functions.
slicepoint, bin_params are unused
:param limits: An optional ND sequence containing limits for plotting dimensions. If
not provided the full extent of each dimension is used
:param transpose: If true then the limits are transposed .w.r.t. the data
:param cut: A string denoting which cut to export. Options=c,x,y. | Export 1D cuts in the X/Y direction for the extent. Signature matches other export functions.
slicepoint, bin_params are unused
:param limits: An optional ND sequence containing limits for plotting dimensions. If
not provided the full extent of each dimension is used
:param transpose: If true then the limits are transposed .w.r.t. the data
:param cut: A string denoting which cut to export. Options=c,x,y. | [
"Export",
"1D",
"cuts",
"in",
"the",
"X",
"/",
"Y",
"direction",
"for",
"the",
"extent",
".",
"Signature",
"matches",
"other",
"export",
"functions",
".",
"slicepoint",
"bin_params",
"are",
"unused",
":",
"param",
"limits",
":",
"An",
"optional",
"ND",
"sequence",
"containing",
"limits",
"for",
"plotting",
"dimensions",
".",
"If",
"not",
"provided",
"the",
"full",
"extent",
"of",
"each",
"dimension",
"is",
"used",
":",
"param",
"transpose",
":",
"If",
"true",
"then",
"the",
"limits",
"are",
"transposed",
".",
"w",
".",
"r",
".",
"t",
".",
"the",
"data",
":",
"param",
"cut",
":",
"A",
"string",
"denoting",
"which",
"cut",
"to",
"export",
".",
"Options",
"=",
"c",
"x",
"y",
"."
] | def export_cuts_to_workspace_matrix(self, slicepoint, bin_params, limits: tuple,
transpose: bool, dimension_indices: Sequence[int], cut: str):
"""
Export 1D cuts in the X/Y direction for the extent. Signature matches other export functions.
slicepoint, bin_params are unused
:param limits: An optional ND sequence containing limits for plotting dimensions. If
not provided the full extent of each dimension is used
:param transpose: If true then the limits are transposed .w.r.t. the data
:param cut: A string denoting which cut to export. Options=c,x,y.
"""
workspace = self._get_ws()
if transpose:
# swap back to model order
limits = limits[1], limits[0]
yaxis = workspace.getAxis(1)
(xmin, xmax), (ymin, ymax) = limits[0], limits[1]
xcut_name, ycut_name, help_msg = self._cut_names(cut)
if transpose:
xcut_name, ycut_name = ycut_name, xcut_name
if yaxis.isSpectra() or yaxis.isNumeric():
extract_cuts_matrix(workspace, xmin, xmax, ymin, ymax, xcut_name, ycut_name)
else:
help_msg = 'Unknown Y axis type. Unable to perform cuts'
return help_msg | [
"def",
"export_cuts_to_workspace_matrix",
"(",
"self",
",",
"slicepoint",
",",
"bin_params",
",",
"limits",
":",
"tuple",
",",
"transpose",
":",
"bool",
",",
"dimension_indices",
":",
"Sequence",
"[",
"int",
"]",
",",
"cut",
":",
"str",
")",
":",
"workspace",
"=",
"self",
".",
"_get_ws",
"(",
")",
"if",
"transpose",
":",
"# swap back to model order",
"limits",
"=",
"limits",
"[",
"1",
"]",
",",
"limits",
"[",
"0",
"]",
"yaxis",
"=",
"workspace",
".",
"getAxis",
"(",
"1",
")",
"(",
"xmin",
",",
"xmax",
")",
",",
"(",
"ymin",
",",
"ymax",
")",
"=",
"limits",
"[",
"0",
"]",
",",
"limits",
"[",
"1",
"]",
"xcut_name",
",",
"ycut_name",
",",
"help_msg",
"=",
"self",
".",
"_cut_names",
"(",
"cut",
")",
"if",
"transpose",
":",
"xcut_name",
",",
"ycut_name",
"=",
"ycut_name",
",",
"xcut_name",
"if",
"yaxis",
".",
"isSpectra",
"(",
")",
"or",
"yaxis",
".",
"isNumeric",
"(",
")",
":",
"extract_cuts_matrix",
"(",
"workspace",
",",
"xmin",
",",
"xmax",
",",
"ymin",
",",
"ymax",
",",
"xcut_name",
",",
"ycut_name",
")",
"else",
":",
"help_msg",
"=",
"'Unknown Y axis type. Unable to perform cuts'",
"return",
"help_msg"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/sliceviewer/model.py#L415-L440 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/general_fitting/general_fitting_model.py | python | GeneralFittingModel._update_fit_functions_after_sequential_fit | (self, workspaces: list, functions: list) | Updates the fit functions after a sequential fit has been run on the Sequential fitting tab. | Updates the fit functions after a sequential fit has been run on the Sequential fitting tab. | [
"Updates",
"the",
"fit",
"functions",
"after",
"a",
"sequential",
"fit",
"has",
"been",
"run",
"on",
"the",
"Sequential",
"fitting",
"tab",
"."
] | def _update_fit_functions_after_sequential_fit(self, workspaces: list, functions: list) -> None:
"""Updates the fit functions after a sequential fit has been run on the Sequential fitting tab."""
if self.fitting_context.simultaneous_fitting_mode:
self._update_simultaneous_fit_function_after_sequential(workspaces, functions)
else:
super()._update_fit_functions_after_sequential_fit(workspaces, functions) | [
"def",
"_update_fit_functions_after_sequential_fit",
"(",
"self",
",",
"workspaces",
":",
"list",
",",
"functions",
":",
"list",
")",
"->",
"None",
":",
"if",
"self",
".",
"fitting_context",
".",
"simultaneous_fitting_mode",
":",
"self",
".",
"_update_simultaneous_fit_function_after_sequential",
"(",
"workspaces",
",",
"functions",
")",
"else",
":",
"super",
"(",
")",
".",
"_update_fit_functions_after_sequential_fit",
"(",
"workspaces",
",",
"functions",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/general_fitting/general_fitting_model.py#L538-L543 |
||
scribusproject/scribus | 41ec7c775a060912cf251682a8b1437f753f80f4 | codegen/cheetah/Cheetah/FileUtils.py | python | replaceStrInFiles | (files, theStr, repl) | return FindAndReplace(files, pattern, repl).results() | Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details. | Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found. | [
"Replace",
"all",
"instances",
"of",
"theStr",
"with",
"repl",
"for",
"each",
"file",
"in",
"the",
"files",
"list",
".",
"Returns",
"a",
"dictionary",
"with",
"data",
"about",
"the",
"matches",
"found",
"."
] | def replaceStrInFiles(files, theStr, repl):
"""Replace all instances of 'theStr' with 'repl' for each file in the 'files'
list. Returns a dictionary with data about the matches found.
This is like string.replace() on a multi-file basis.
This function is a wrapper around the FindAndReplace class. See its
docstring for more details."""
pattern = _escapeRegexChars(theStr)
return FindAndReplace(files, pattern, repl).results() | [
"def",
"replaceStrInFiles",
"(",
"files",
",",
"theStr",
",",
"repl",
")",
":",
"pattern",
"=",
"_escapeRegexChars",
"(",
"theStr",
")",
"return",
"FindAndReplace",
"(",
"files",
",",
"pattern",
",",
"repl",
")",
".",
"results",
"(",
")"
] | https://github.com/scribusproject/scribus/blob/41ec7c775a060912cf251682a8b1437f753f80f4/codegen/cheetah/Cheetah/FileUtils.py#L21-L32 |
|
giuspen/cherrytree | 84712f206478fcf9acf30174009ad28c648c6344 | pygtk2/modules/core.py | python | CherryTree.nodes_add_from_notecase_file | (self, action) | Add Nodes Parsing a NoteCase File | Add Nodes Parsing a NoteCase File | [
"Add",
"Nodes",
"Parsing",
"a",
"NoteCase",
"File"
] | def nodes_add_from_notecase_file(self, action):
"""Add Nodes Parsing a NoteCase File"""
filepath = support.dialog_file_select(filter_pattern=["*.ncd"],
filter_name=_("NoteCase Document"),
curr_folder=self.pick_dir_import,
parent=self.window)
if not filepath: return
self.pick_dir_import = os.path.dirname(filepath)
try:
file_descriptor = open(filepath, 'r')
notecase_string = file_descriptor.read()
file_descriptor.close()
except:
support.dialog_error("Error importing the file %s" % filepath, self.window)
raise
return
notecase = imports.NotecaseHandler(self)
cherrytree_string = notecase.get_cherrytree_xml(notecase_string)
self.nodes_add_from_cherrytree_data(cherrytree_string) | [
"def",
"nodes_add_from_notecase_file",
"(",
"self",
",",
"action",
")",
":",
"filepath",
"=",
"support",
".",
"dialog_file_select",
"(",
"filter_pattern",
"=",
"[",
"\"*.ncd\"",
"]",
",",
"filter_name",
"=",
"_",
"(",
"\"NoteCase Document\"",
")",
",",
"curr_folder",
"=",
"self",
".",
"pick_dir_import",
",",
"parent",
"=",
"self",
".",
"window",
")",
"if",
"not",
"filepath",
":",
"return",
"self",
".",
"pick_dir_import",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filepath",
")",
"try",
":",
"file_descriptor",
"=",
"open",
"(",
"filepath",
",",
"'r'",
")",
"notecase_string",
"=",
"file_descriptor",
".",
"read",
"(",
")",
"file_descriptor",
".",
"close",
"(",
")",
"except",
":",
"support",
".",
"dialog_error",
"(",
"\"Error importing the file %s\"",
"%",
"filepath",
",",
"self",
".",
"window",
")",
"raise",
"return",
"notecase",
"=",
"imports",
".",
"NotecaseHandler",
"(",
"self",
")",
"cherrytree_string",
"=",
"notecase",
".",
"get_cherrytree_xml",
"(",
"notecase_string",
")",
"self",
".",
"nodes_add_from_cherrytree_data",
"(",
"cherrytree_string",
")"
] | https://github.com/giuspen/cherrytree/blob/84712f206478fcf9acf30174009ad28c648c6344/pygtk2/modules/core.py#L884-L902 |
||
moflow/moflow | 2dfb27c799c90c6caf1477508eca3eec616ef7d2 | bap/libtracewrap/libtrace/protobuf/python/google/protobuf/text_format.py | python | _Tokenizer.ConsumeInt32 | (self) | return result | Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed. | Consumes a signed 32bit integer number. | [
"Consumes",
"a",
"signed",
"32bit",
"integer",
"number",
"."
] | def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result | [
"def",
"ConsumeInt32",
"(",
"self",
")",
":",
"try",
":",
"result",
"=",
"ParseInteger",
"(",
"self",
".",
"token",
",",
"is_signed",
"=",
"True",
",",
"is_long",
"=",
"False",
")",
"except",
"ValueError",
",",
"e",
":",
"raise",
"self",
".",
"_ParseError",
"(",
"str",
"(",
"e",
")",
")",
"self",
".",
"NextToken",
"(",
")",
"return",
"result"
] | https://github.com/moflow/moflow/blob/2dfb27c799c90c6caf1477508eca3eec616ef7d2/bap/libtracewrap/libtrace/protobuf/python/google/protobuf/text_format.py#L395-L409 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/plat-mac/pimp.py | python | PimpPackage_binary.installPackageOnly | (self, output=None) | return None | Install a single source package.
If output is given it should be a file-like object and it
will receive a log of what happened. | Install a single source package. | [
"Install",
"a",
"single",
"source",
"package",
"."
] | def installPackageOnly(self, output=None):
"""Install a single source package.
If output is given it should be a file-like object and it
will receive a log of what happened."""
if 'Install-command' in self._dict:
return "%s: Binary package cannot have Install-command" % self.fullname()
if 'Pre-install-command' in self._dict:
if _cmd(output, '/tmp', self._dict['Pre-install-command']):
return "pre-install %s: running \"%s\" failed" % \
(self.fullname(), self._dict['Pre-install-command'])
self.beforeInstall()
# Install by unpacking
filename = os.path.split(self.archiveFilename)[1]
for ext, unpackerClass, arg in ARCHIVE_FORMATS:
if filename[-len(ext):] == ext:
break
else:
return "%s: unknown extension for archive file: %s" % (self.fullname(), filename)
self.basename = filename[:-len(ext)]
install_renames = []
for k, newloc in self._db.preferences.installLocations:
if not newloc:
continue
if k == "--install-lib":
oldloc = DEFAULT_INSTALLDIR
else:
return "%s: Don't know installLocation %s" % (self.fullname(), k)
install_renames.append((oldloc, newloc))
unpacker = unpackerClass(arg, dir="/", renames=install_renames)
rv = unpacker.unpack(self.archiveFilename, output=output, package=self)
if rv:
return rv
self.afterInstall()
if 'Post-install-command' in self._dict:
if _cmd(output, '/tmp', self._dict['Post-install-command']):
return "%s: post-install: running \"%s\" failed" % \
(self.fullname(), self._dict['Post-install-command'])
return None | [
"def",
"installPackageOnly",
"(",
"self",
",",
"output",
"=",
"None",
")",
":",
"if",
"'Install-command'",
"in",
"self",
".",
"_dict",
":",
"return",
"\"%s: Binary package cannot have Install-command\"",
"%",
"self",
".",
"fullname",
"(",
")",
"if",
"'Pre-install-command'",
"in",
"self",
".",
"_dict",
":",
"if",
"_cmd",
"(",
"output",
",",
"'/tmp'",
",",
"self",
".",
"_dict",
"[",
"'Pre-install-command'",
"]",
")",
":",
"return",
"\"pre-install %s: running \\\"%s\\\" failed\"",
"%",
"(",
"self",
".",
"fullname",
"(",
")",
",",
"self",
".",
"_dict",
"[",
"'Pre-install-command'",
"]",
")",
"self",
".",
"beforeInstall",
"(",
")",
"# Install by unpacking",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"self",
".",
"archiveFilename",
")",
"[",
"1",
"]",
"for",
"ext",
",",
"unpackerClass",
",",
"arg",
"in",
"ARCHIVE_FORMATS",
":",
"if",
"filename",
"[",
"-",
"len",
"(",
"ext",
")",
":",
"]",
"==",
"ext",
":",
"break",
"else",
":",
"return",
"\"%s: unknown extension for archive file: %s\"",
"%",
"(",
"self",
".",
"fullname",
"(",
")",
",",
"filename",
")",
"self",
".",
"basename",
"=",
"filename",
"[",
":",
"-",
"len",
"(",
"ext",
")",
"]",
"install_renames",
"=",
"[",
"]",
"for",
"k",
",",
"newloc",
"in",
"self",
".",
"_db",
".",
"preferences",
".",
"installLocations",
":",
"if",
"not",
"newloc",
":",
"continue",
"if",
"k",
"==",
"\"--install-lib\"",
":",
"oldloc",
"=",
"DEFAULT_INSTALLDIR",
"else",
":",
"return",
"\"%s: Don't know installLocation %s\"",
"%",
"(",
"self",
".",
"fullname",
"(",
")",
",",
"k",
")",
"install_renames",
".",
"append",
"(",
"(",
"oldloc",
",",
"newloc",
")",
")",
"unpacker",
"=",
"unpackerClass",
"(",
"arg",
",",
"dir",
"=",
"\"/\"",
",",
"renames",
"=",
"install_renames",
")",
"rv",
"=",
"unpacker",
".",
"unpack",
"(",
"self",
".",
"archiveFilename",
",",
"output",
"=",
"output",
",",
"package",
"=",
"self",
")",
"if",
"rv",
":",
"return",
"rv",
"self",
".",
"afterInstall",
"(",
")",
"if",
"'Post-install-command'",
"in",
"self",
".",
"_dict",
":",
"if",
"_cmd",
"(",
"output",
",",
"'/tmp'",
",",
"self",
".",
"_dict",
"[",
"'Post-install-command'",
"]",
")",
":",
"return",
"\"%s: post-install: running \\\"%s\\\" failed\"",
"%",
"(",
"self",
".",
"fullname",
"(",
")",
",",
"self",
".",
"_dict",
"[",
"'Post-install-command'",
"]",
")",
"return",
"None"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/plat-mac/pimp.py#L797-L844 |
|
MythTV/mythtv | d282a209cb8be85d036f85a62a8ec971b67d45f4 | mythtv/contrib/imports/mirobridge/mirobridge/mirobridge_interpreter_6_0_0.py | python | MiroInterpreter.do_downloads | (self, line) | downloads -- Selects the downloads tab. | downloads -- Selects the downloads tab. | [
"downloads",
"--",
"Selects",
"the",
"downloads",
"tab",
"."
] | def do_downloads(self, line):
"""downloads -- Selects the downloads tab."""
self.tab = FakeTab("statictab", "downloadtab")
self.tab_changed() | [
"def",
"do_downloads",
"(",
"self",
",",
"line",
")",
":",
"self",
".",
"tab",
"=",
"FakeTab",
"(",
"\"statictab\"",
",",
"\"downloadtab\"",
")",
"self",
".",
"tab_changed",
"(",
")"
] | https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/contrib/imports/mirobridge/mirobridge/mirobridge_interpreter_6_0_0.py#L632-L635 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_core.py | python | ZipFSHandler.FindNext | (*args, **kwargs) | return _core_.ZipFSHandler_FindNext(*args, **kwargs) | FindNext(self) -> String | FindNext(self) -> String | [
"FindNext",
"(",
"self",
")",
"-",
">",
"String"
] | def FindNext(*args, **kwargs):
"""FindNext(self) -> String"""
return _core_.ZipFSHandler_FindNext(*args, **kwargs) | [
"def",
"FindNext",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ZipFSHandler_FindNext",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L2520-L2522 |
|
HKUST-Aerial-Robotics/Fast-Planner | 2ddd7793eecd573dbb5b47e2c985aa06606df3cf | uav_simulator/Utils/quadrotor_msgs/src/quadrotor_msgs/msg/_PPROutputData.py | python | PPROutputData.deserialize | (self, str) | unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str`` | unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str`` | [
"unpack",
"serialized",
"message",
"in",
"str",
"into",
"this",
"message",
"instance",
":",
"param",
"str",
":",
"byte",
"array",
"of",
"serialized",
"message",
"str"
] | def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 106
(_x.quad_time, _x.des_thrust, _x.des_roll, _x.des_pitch, _x.des_yaw, _x.est_roll, _x.est_pitch, _x.est_yaw, _x.est_angvel_x, _x.est_angvel_y, _x.est_angvel_z, _x.est_acc_x, _x.est_acc_y, _x.est_acc_z,) = _struct_H13d.unpack(str[start:end])
start = end
end += 8
self.pwm = _struct_4H.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | [
"def",
"deserialize",
"(",
"self",
",",
"str",
")",
":",
"try",
":",
"if",
"self",
".",
"header",
"is",
"None",
":",
"self",
".",
"header",
"=",
"std_msgs",
".",
"msg",
".",
"Header",
"(",
")",
"end",
"=",
"0",
"_x",
"=",
"self",
"start",
"=",
"end",
"end",
"+=",
"12",
"(",
"_x",
".",
"header",
".",
"seq",
",",
"_x",
".",
"header",
".",
"stamp",
".",
"secs",
",",
"_x",
".",
"header",
".",
"stamp",
".",
"nsecs",
",",
")",
"=",
"_struct_3I",
".",
"unpack",
"(",
"str",
"[",
"start",
":",
"end",
"]",
")",
"start",
"=",
"end",
"end",
"+=",
"4",
"(",
"length",
",",
")",
"=",
"_struct_I",
".",
"unpack",
"(",
"str",
"[",
"start",
":",
"end",
"]",
")",
"start",
"=",
"end",
"end",
"+=",
"length",
"if",
"python3",
":",
"self",
".",
"header",
".",
"frame_id",
"=",
"str",
"[",
"start",
":",
"end",
"]",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"self",
".",
"header",
".",
"frame_id",
"=",
"str",
"[",
"start",
":",
"end",
"]",
"_x",
"=",
"self",
"start",
"=",
"end",
"end",
"+=",
"106",
"(",
"_x",
".",
"quad_time",
",",
"_x",
".",
"des_thrust",
",",
"_x",
".",
"des_roll",
",",
"_x",
".",
"des_pitch",
",",
"_x",
".",
"des_yaw",
",",
"_x",
".",
"est_roll",
",",
"_x",
".",
"est_pitch",
",",
"_x",
".",
"est_yaw",
",",
"_x",
".",
"est_angvel_x",
",",
"_x",
".",
"est_angvel_y",
",",
"_x",
".",
"est_angvel_z",
",",
"_x",
".",
"est_acc_x",
",",
"_x",
".",
"est_acc_y",
",",
"_x",
".",
"est_acc_z",
",",
")",
"=",
"_struct_H13d",
".",
"unpack",
"(",
"str",
"[",
"start",
":",
"end",
"]",
")",
"start",
"=",
"end",
"end",
"+=",
"8",
"self",
".",
"pwm",
"=",
"_struct_4H",
".",
"unpack",
"(",
"str",
"[",
"start",
":",
"end",
"]",
")",
"return",
"self",
"except",
"struct",
".",
"error",
"as",
"e",
":",
"raise",
"genpy",
".",
"DeserializationError",
"(",
"e",
")"
] | https://github.com/HKUST-Aerial-Robotics/Fast-Planner/blob/2ddd7793eecd573dbb5b47e2c985aa06606df3cf/uav_simulator/Utils/quadrotor_msgs/src/quadrotor_msgs/msg/_PPROutputData.py#L148-L179 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py | python | _ScalarToVoidShape | (op) | return [] | Shape function for ops that take a scalar and produce no outputs. | Shape function for ops that take a scalar and produce no outputs. | [
"Shape",
"function",
"for",
"ops",
"that",
"take",
"a",
"scalar",
"and",
"produce",
"no",
"outputs",
"."
] | def _ScalarToVoidShape(op):
"""Shape function for ops that take a scalar and produce no outputs."""
op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
return [] | [
"def",
"_ScalarToVoidShape",
"(",
"op",
")",
":",
"op",
".",
"inputs",
"[",
"0",
"]",
".",
"get_shape",
"(",
")",
".",
"merge_with",
"(",
"tensor_shape",
".",
"scalar",
"(",
")",
")",
"return",
"[",
"]"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py#L1044-L1047 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/package_index.py | python | _splituser | (host) | return (user if delim else None), host | splituser('user[:passwd]@host[:port]')
--> 'user[:passwd]', 'host[:port]'. | splituser('user[:passwd] | [
"splituser",
"(",
"user",
"[",
":",
"passwd",
"]"
] | def _splituser(host):
"""splituser('user[:passwd]@host[:port]')
--> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host | [
"def",
"_splituser",
"(",
"host",
")",
":",
"user",
",",
"delim",
",",
"host",
"=",
"host",
".",
"rpartition",
"(",
"'@'",
")",
"return",
"(",
"user",
"if",
"delim",
"else",
"None",
")",
",",
"host"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/setuptools/package_index.py#L1097-L1101 |
|
adnanaziz/epicode | e81d4387d2ae442d21631dfc958690d424e1d84d | cpp/cpplint.py | python | ProcessLine | (filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error, extra_check_functions=[]) | Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error | Processes a single line in the file. | [
"Processes",
"a",
"single",
"line",
"in",
"the",
"file",
"."
] | def ProcessLine(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error, extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, class_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
class_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error) | [
"def",
"ProcessLine",
"(",
"filename",
",",
"file_extension",
",",
"clean_lines",
",",
"line",
",",
"include_state",
",",
"function_state",
",",
"class_state",
",",
"error",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"raw_lines",
"=",
"clean_lines",
".",
"raw_lines",
"ParseNolintSuppressions",
"(",
"filename",
",",
"raw_lines",
"[",
"line",
"]",
",",
"line",
",",
"error",
")",
"CheckForFunctionLengths",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"function_state",
",",
"error",
")",
"CheckForMultilineCommentsAndStrings",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckStyle",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"file_extension",
",",
"class_state",
",",
"error",
")",
"CheckLanguage",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"file_extension",
",",
"include_state",
",",
"error",
")",
"CheckForNonStandardConstructs",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"class_state",
",",
"error",
")",
"CheckPosixThreading",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckInvalidIncrement",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"CheckMakePairUsesDeduction",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")",
"for",
"check_fn",
"in",
"extra_check_functions",
":",
"check_fn",
"(",
"filename",
",",
"clean_lines",
",",
"line",
",",
"error",
")"
] | https://github.com/adnanaziz/epicode/blob/e81d4387d2ae442d21631dfc958690d424e1d84d/cpp/cpplint.py#L3119-L3153 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | PrePyControl | (*args, **kwargs) | return val | PrePyControl() -> PyControl | PrePyControl() -> PyControl | [
"PrePyControl",
"()",
"-",
">",
"PyControl"
] | def PrePyControl(*args, **kwargs):
"""PrePyControl() -> PyControl"""
val = _controls_.new_PrePyControl(*args, **kwargs)
return val | [
"def",
"PrePyControl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"val",
"=",
"_controls_",
".",
"new_PrePyControl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"val"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L5998-L6001 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/customtreectrl.py | python | CustomTreeCtrl.PrependItem | (self, parent, text, ct_type=0, wnd=None, image=-1, selImage=-1, data=None, separator=False) | return self.DoInsertItem(parent, 0, text, ct_type, wnd, image, selImage, data, separator) | Prepends an item as a first child of parent.
:param `parent`: an instance of :class:`GenericTreeItem` representing the
item's parent;
:param string `text`: the item text label;
:param integer `ct_type`: the item type (see :meth:`~CustomTreeCtrl.SetItemType` for a list of valid
item types);
:param `wnd`: if not ``None``, a non-toplevel window to show next to the item, any
subclass of :class:`Window` except top-level windows;
:param integer `image`: an index within the normal image list specifying the image to
use for the item in unselected state;
:param integer `selImage`: an index within the normal image list specifying the image to
use for the item in selected state; if `image` > -1 and `selImage` is -1, the
same image is used for both selected and unselected items;
:param object `data`: associate the given Python object `data` with the item;
:param bool `separator`: ``True`` if the item is a separator, ``False`` otherwise.
:return: An instance of :class:`GenericTreeItem` upon successful insertion.
:see: :meth:`~CustomTreeCtrl.DoInsertItem` for possible exceptions generated by this method. | Prepends an item as a first child of parent. | [
"Prepends",
"an",
"item",
"as",
"a",
"first",
"child",
"of",
"parent",
"."
] | def PrependItem(self, parent, text, ct_type=0, wnd=None, image=-1, selImage=-1, data=None, separator=False):
"""
Prepends an item as a first child of parent.
:param `parent`: an instance of :class:`GenericTreeItem` representing the
item's parent;
:param string `text`: the item text label;
:param integer `ct_type`: the item type (see :meth:`~CustomTreeCtrl.SetItemType` for a list of valid
item types);
:param `wnd`: if not ``None``, a non-toplevel window to show next to the item, any
subclass of :class:`Window` except top-level windows;
:param integer `image`: an index within the normal image list specifying the image to
use for the item in unselected state;
:param integer `selImage`: an index within the normal image list specifying the image to
use for the item in selected state; if `image` > -1 and `selImage` is -1, the
same image is used for both selected and unselected items;
:param object `data`: associate the given Python object `data` with the item;
:param bool `separator`: ``True`` if the item is a separator, ``False`` otherwise.
:return: An instance of :class:`GenericTreeItem` upon successful insertion.
:see: :meth:`~CustomTreeCtrl.DoInsertItem` for possible exceptions generated by this method.
"""
return self.DoInsertItem(parent, 0, text, ct_type, wnd, image, selImage, data, separator) | [
"def",
"PrependItem",
"(",
"self",
",",
"parent",
",",
"text",
",",
"ct_type",
"=",
"0",
",",
"wnd",
"=",
"None",
",",
"image",
"=",
"-",
"1",
",",
"selImage",
"=",
"-",
"1",
",",
"data",
"=",
"None",
",",
"separator",
"=",
"False",
")",
":",
"return",
"self",
".",
"DoInsertItem",
"(",
"parent",
",",
"0",
",",
"text",
",",
"ct_type",
",",
"wnd",
",",
"image",
",",
"selImage",
",",
"data",
",",
"separator",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L4985-L5009 |
|
infinit/memo | 3a8394d0f647efe03ccb8bfe885a7279cb8be8a6 | elle/drake/src/drake/__init__.py | python | Builder.hash | (self) | return None | A hash for this builder | A hash for this builder | [
"A",
"hash",
"for",
"this",
"builder"
] | def hash(self):
"""A hash for this builder"""
return None | [
"def",
"hash",
"(",
"self",
")",
":",
"return",
"None"
] | https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L2050-L2052 |
|
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/tools/gyp/pylib/gyp/xcodeproj_file.py | python | XCObject.VerifyHasRequiredProperties | (self) | Ensure that all properties identified as required by the schema are
set. | Ensure that all properties identified as required by the schema are
set. | [
"Ensure",
"that",
"all",
"properties",
"identified",
"as",
"required",
"by",
"the",
"schema",
"are",
"set",
"."
] | def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property) | [
"def",
"VerifyHasRequiredProperties",
"(",
"self",
")",
":",
"# TODO(mark): A stronger verification mechanism is needed. Some",
"# subclasses need to perform validation beyond what the schema can enforce.",
"for",
"property",
",",
"attributes",
"in",
"self",
".",
"_schema",
".",
"iteritems",
"(",
")",
":",
"(",
"is_list",
",",
"property_type",
",",
"is_strong",
",",
"is_required",
")",
"=",
"attributes",
"[",
"0",
":",
"4",
"]",
"if",
"is_required",
"and",
"not",
"property",
"in",
"self",
".",
"_properties",
":",
"raise",
"KeyError",
"(",
"self",
".",
"__class__",
".",
"__name__",
"+",
"' requires '",
"+",
"property",
")"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/gyp/pylib/gyp/xcodeproj_file.py#L861-L871 |
||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/parallel/_auto_parallel_context.py | python | _AutoParallelContext.set_strategy_ckpt_save_file | (self, strategy_ckpt_save_file) | Set strategy checkpoint save path.
Args:
strategy_ckpt_save_file (bool): Path to save parallel strategy checkpoint. | Set strategy checkpoint save path. | [
"Set",
"strategy",
"checkpoint",
"save",
"path",
"."
] | def set_strategy_ckpt_save_file(self, strategy_ckpt_save_file):
"""
Set strategy checkpoint save path.
Args:
strategy_ckpt_save_file (bool): Path to save parallel strategy checkpoint.
"""
self.check_context_handle()
dir_path = os.path.dirname(strategy_ckpt_save_file)
if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
self._context_handle.set_strategy_ckpt_save_file(strategy_ckpt_save_file) | [
"def",
"set_strategy_ckpt_save_file",
"(",
"self",
",",
"strategy_ckpt_save_file",
")",
":",
"self",
".",
"check_context_handle",
"(",
")",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"strategy_ckpt_save_file",
")",
"if",
"dir_path",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_path",
")",
"self",
".",
"_context_handle",
".",
"set_strategy_ckpt_save_file",
"(",
"strategy_ckpt_save_file",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/parallel/_auto_parallel_context.py#L484-L495 |
||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py | python | newComment | (content) | return xmlNode(_obj=ret) | Creation of a new node containing a comment. | Creation of a new node containing a comment. | [
"Creation",
"of",
"a",
"new",
"node",
"containing",
"a",
"comment",
"."
] | def newComment(content):
"""Creation of a new node containing a comment. """
ret = libxml2mod.xmlNewComment(content)
if ret is None:raise treeError('xmlNewComment() failed')
return xmlNode(_obj=ret) | [
"def",
"newComment",
"(",
"content",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlNewComment",
"(",
"content",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlNewComment() failed'",
")",
"return",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py#L891-L895 |
|
lyxok1/Tiny-DSOD | 94d15450699bea0dd3720e75e2d273e476174fba | scripts/cpp_lint.py | python | CheckForFunctionLengths | (filename, clean_lines, linenum,
function_state, error) | Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found. | Reports for long function bodies. | [
"Reports",
"for",
"long",
"function",
"bodies",
"."
] | def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() | [
"def",
"CheckForFunctionLengths",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"function_state",
",",
"error",
")",
":",
"lines",
"=",
"clean_lines",
".",
"lines",
"line",
"=",
"lines",
"[",
"linenum",
"]",
"raw",
"=",
"clean_lines",
".",
"raw_lines",
"raw_line",
"=",
"raw",
"[",
"linenum",
"]",
"joined_line",
"=",
"''",
"starting_func",
"=",
"False",
"regexp",
"=",
"r'(\\w(\\w|::|\\*|\\&|\\s)*)\\('",
"# decls * & space::name( ...",
"match_result",
"=",
"Match",
"(",
"regexp",
",",
"line",
")",
"if",
"match_result",
":",
"# If the name is all caps and underscores, figure it's a macro and",
"# ignore it, unless it's TEST or TEST_F.",
"function_name",
"=",
"match_result",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
")",
"[",
"-",
"1",
"]",
"if",
"function_name",
"==",
"'TEST'",
"or",
"function_name",
"==",
"'TEST_F'",
"or",
"(",
"not",
"Match",
"(",
"r'[A-Z_]+$'",
",",
"function_name",
")",
")",
":",
"starting_func",
"=",
"True",
"if",
"starting_func",
":",
"body_found",
"=",
"False",
"for",
"start_linenum",
"in",
"xrange",
"(",
"linenum",
",",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
":",
"start_line",
"=",
"lines",
"[",
"start_linenum",
"]",
"joined_line",
"+=",
"' '",
"+",
"start_line",
".",
"lstrip",
"(",
")",
"if",
"Search",
"(",
"r'(;|})'",
",",
"start_line",
")",
":",
"# Declarations and trivial functions",
"body_found",
"=",
"True",
"break",
"# ... ignore",
"elif",
"Search",
"(",
"r'{'",
",",
"start_line",
")",
":",
"body_found",
"=",
"True",
"function",
"=",
"Search",
"(",
"r'((\\w|:)*)\\('",
",",
"line",
")",
".",
"group",
"(",
"1",
")",
"if",
"Match",
"(",
"r'TEST'",
",",
"function",
")",
":",
"# Handle TEST... macros",
"parameter_regexp",
"=",
"Search",
"(",
"r'(\\(.*\\))'",
",",
"joined_line",
")",
"if",
"parameter_regexp",
":",
"# Ignore bad syntax",
"function",
"+=",
"parameter_regexp",
".",
"group",
"(",
"1",
")",
"else",
":",
"function",
"+=",
"'()'",
"function_state",
".",
"Begin",
"(",
"function",
")",
"break",
"if",
"not",
"body_found",
":",
"# No body for the function (or evidence of a non-function) was found.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/fn_size'",
",",
"5",
",",
"'Lint failed to find start of function body.'",
")",
"elif",
"Match",
"(",
"r'^\\}\\s*$'",
",",
"line",
")",
":",
"# function end",
"function_state",
".",
"Check",
"(",
"error",
",",
"filename",
",",
"linenum",
")",
"function_state",
".",
"End",
"(",
")",
"elif",
"not",
"Match",
"(",
"r'^\\s*$'",
",",
"line",
")",
":",
"function_state",
".",
"Count",
"(",
")"
] | https://github.com/lyxok1/Tiny-DSOD/blob/94d15450699bea0dd3720e75e2d273e476174fba/scripts/cpp_lint.py#L2388-L2455 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/framework/python/framework/checkpoint_utils.py | python | init_from_checkpoint | (checkpoint_dir, assignment_map) | Using assignment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with variable from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with variable from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Create variables.
with tf.compat.v1.variable_scope('test'):
m = tf.compat.v1.get_variable('my_var')
with tf.compat.v1.variable_scope('test2'):
var2 = tf.compat.v1.get_variable('my_var')
var3 = tf.compat.v1.get_variable(name="my1", shape=[100, 100],
partitioner=lambda shape, dtype: [5, 1])
...
# Specify which variables to initialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'some_var': 'test/my_var',
'some_scope/': 'test2/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
'some_scope/var2': var2,
})
# Initialize partitioned variables
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': 'part_var',
})
# Or specifying the list of `Variable` objects.
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': var3._get_variable_list(),
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph. | Using assignment map initializes current variables with loaded tensors. | [
"Using",
"assignment",
"map",
"initializes",
"current",
"variables",
"with",
"loaded",
"tensors",
"."
] | def init_from_checkpoint(checkpoint_dir, assignment_map):
"""Using assignment map initializes current variables with loaded tensors.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching variable
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with variable from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with variable from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Create variables.
with tf.compat.v1.variable_scope('test'):
m = tf.compat.v1.get_variable('my_var')
with tf.compat.v1.variable_scope('test2'):
var2 = tf.compat.v1.get_variable('my_var')
var3 = tf.compat.v1.get_variable(name="my1", shape=[100, 100],
partitioner=lambda shape, dtype: [5, 1])
...
# Specify which variables to initialize from checkpoint.
init_from_checkpoint(checkpoint_dir, {
'some_var': 'test/my_var',
'some_scope/': 'test2/'})
...
# Or use `Variable` objects to identify what to initialize.
init_from_checkpoint(checkpoint_dir, {
'some_scope/var2': var2,
})
# Initialize partitioned variables
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': 'part_var',
})
# Or specifying the list of `Variable` objects.
init_from_checkpoint(checkpoint_dir, {
'some_var_from_ckpt': var3._get_variable_list(),
})
...
# Initialize variables as usual.
session.run(tf.get_all_variables())
```
Args:
checkpoint_dir: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
filepattern = _get_checkpoint_filename(checkpoint_dir)
reader = load_checkpoint(checkpoint_dir)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in six.iteritems(assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
is_var = lambda x: isinstance(x, variables.Variable)
if is_var(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(is_var(v) for v in current_var_or_name)):
var = current_var_or_name
else:
var_scope = vs._get_default_variable_store()
# Check if this variable is in var_store.
var = var_scope._vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, var_scope)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, checkpoint_dir, variable_map
))
if is_var(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, filepattern, tensor_name_in_ckpt)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, tensor_name_in_ckpt
))
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in var_scope._vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in scope_variables:
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, checkpoint_dir
))
var = var_scope._vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, var_scope)
_set_variable_or_list_initializer(var, filepattern, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s" % (
var_name, checkpoint_dir, full_tensor_name
)) | [
"def",
"init_from_checkpoint",
"(",
"checkpoint_dir",
",",
"assignment_map",
")",
":",
"filepattern",
"=",
"_get_checkpoint_filename",
"(",
"checkpoint_dir",
")",
"reader",
"=",
"load_checkpoint",
"(",
"checkpoint_dir",
")",
"variable_map",
"=",
"reader",
".",
"get_variable_to_shape_map",
"(",
")",
"for",
"tensor_name_in_ckpt",
",",
"current_var_or_name",
"in",
"six",
".",
"iteritems",
"(",
"assignment_map",
")",
":",
"var",
"=",
"None",
"# Check if this is Variable object or list of Variable objects (in case of",
"# partitioned variables).",
"is_var",
"=",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"variables",
".",
"Variable",
")",
"if",
"is_var",
"(",
"current_var_or_name",
")",
"or",
"(",
"isinstance",
"(",
"current_var_or_name",
",",
"list",
")",
"and",
"all",
"(",
"is_var",
"(",
"v",
")",
"for",
"v",
"in",
"current_var_or_name",
")",
")",
":",
"var",
"=",
"current_var_or_name",
"else",
":",
"var_scope",
"=",
"vs",
".",
"_get_default_variable_store",
"(",
")",
"# Check if this variable is in var_store.",
"var",
"=",
"var_scope",
".",
"_vars",
".",
"get",
"(",
"current_var_or_name",
",",
"None",
")",
"# Also check if variable is partitioned as list.",
"if",
"var",
"is",
"None",
":",
"var",
"=",
"_collect_partitioned_variable",
"(",
"current_var_or_name",
",",
"var_scope",
")",
"if",
"var",
"is",
"not",
"None",
":",
"# If 1 to 1 mapping was provided, find variable in the checkpoint.",
"if",
"tensor_name_in_ckpt",
"not",
"in",
"variable_map",
":",
"raise",
"ValueError",
"(",
"\"Tensor %s is not found in %s checkpoint %s\"",
"%",
"(",
"tensor_name_in_ckpt",
",",
"checkpoint_dir",
",",
"variable_map",
")",
")",
"if",
"is_var",
"(",
"var",
")",
":",
"# Additional at-call-time checks.",
"if",
"not",
"var",
".",
"get_shape",
"(",
")",
".",
"is_compatible_with",
"(",
"variable_map",
"[",
"tensor_name_in_ckpt",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Shape of variable %s (%s) doesn't match with shape of \"",
"\"tensor %s (%s) from checkpoint reader.\"",
"%",
"(",
"var",
".",
"name",
",",
"str",
"(",
"var",
".",
"get_shape",
"(",
")",
")",
",",
"tensor_name_in_ckpt",
",",
"str",
"(",
"variable_map",
"[",
"tensor_name_in_ckpt",
"]",
")",
")",
")",
"var_name",
"=",
"var",
".",
"name",
"else",
":",
"var_name",
"=",
"\",\"",
".",
"join",
"(",
"[",
"v",
".",
"name",
"for",
"v",
"in",
"var",
"]",
")",
"_set_variable_or_list_initializer",
"(",
"var",
",",
"filepattern",
",",
"tensor_name_in_ckpt",
")",
"logging",
".",
"info",
"(",
"\"Initialize variable %s from checkpoint %s with %s\"",
"%",
"(",
"var_name",
",",
"checkpoint_dir",
",",
"tensor_name_in_ckpt",
")",
")",
"else",
":",
"scopes",
"=",
"\"\"",
"# TODO(vihanjain): Support list of 'current_var_or_name' here.",
"if",
"\"/\"",
"in",
"current_var_or_name",
":",
"scopes",
"=",
"current_var_or_name",
"[",
":",
"current_var_or_name",
".",
"rindex",
"(",
"\"/\"",
")",
"]",
"if",
"not",
"tensor_name_in_ckpt",
".",
"endswith",
"(",
"\"/\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Assignment map with scope only name {} should map to scope only \"",
"\"{}. Should be 'scope/': 'other_scope/'.\"",
".",
"format",
"(",
"scopes",
",",
"tensor_name_in_ckpt",
")",
")",
"# If scope to scope mapping was provided, find all variables in the scope",
"# and create variable to variable mapping.",
"scope_variables",
"=",
"set",
"(",
")",
"for",
"var_name",
"in",
"var_scope",
".",
"_vars",
":",
"if",
"not",
"scopes",
"or",
"var_name",
".",
"startswith",
"(",
"scopes",
"+",
"\"/\"",
")",
":",
"# Consume /part_ if partitioned variable.",
"if",
"\"/part_\"",
"in",
"var_name",
":",
"var_name",
"=",
"var_name",
"[",
":",
"var_name",
".",
"index",
"(",
"\"/part_\"",
")",
"]",
"scope_variables",
".",
"add",
"(",
"var_name",
")",
"for",
"var_name",
"in",
"scope_variables",
":",
"# Lookup name with specified prefix and suffix from current variable.",
"# If tensor_name given is '/' (root), don't use it for full name.",
"full_tensor_name",
"=",
"var_name",
"[",
"len",
"(",
"scopes",
")",
":",
"]",
"if",
"current_var_or_name",
"!=",
"\"/\"",
":",
"full_tensor_name",
"=",
"full_tensor_name",
"[",
"1",
":",
"]",
"if",
"tensor_name_in_ckpt",
"!=",
"\"/\"",
":",
"full_tensor_name",
"=",
"tensor_name_in_ckpt",
"+",
"full_tensor_name",
"if",
"full_tensor_name",
"not",
"in",
"variable_map",
":",
"raise",
"ValueError",
"(",
"\"Tensor %s (%s in %s) is not found in %s checkpoint\"",
"%",
"(",
"full_tensor_name",
",",
"var_name",
"[",
"len",
"(",
"scopes",
")",
"+",
"1",
":",
"]",
",",
"tensor_name_in_ckpt",
",",
"checkpoint_dir",
")",
")",
"var",
"=",
"var_scope",
".",
"_vars",
".",
"get",
"(",
"var_name",
",",
"None",
")",
"if",
"var",
"is",
"None",
":",
"var",
"=",
"_collect_partitioned_variable",
"(",
"var_name",
",",
"var_scope",
")",
"_set_variable_or_list_initializer",
"(",
"var",
",",
"filepattern",
",",
"full_tensor_name",
")",
"logging",
".",
"info",
"(",
"\"Initialize variable %s from checkpoint %s with %s\"",
"%",
"(",
"var_name",
",",
"checkpoint_dir",
",",
"full_tensor_name",
")",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/framework/python/framework/checkpoint_utils.py#L154-L302 |
||
Tencent/CMONGO | c40380caa14e05509f46993aa8b8da966b09b0b5 | buildscripts/cpplint.py | python | CheckForBadCharacters | (filename, lines, error) | Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found. | Logs an error for each line containing bad characters. | [
"Logs",
"an",
"error",
"for",
"each",
"line",
"containing",
"bad",
"characters",
"."
] | def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') | [
"def",
"CheckForBadCharacters",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"u'\\ufffd'",
"in",
"line",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/utf8'",
",",
"5",
",",
"'Line contains invalid UTF-8 (or Unicode replacement character).'",
")",
"if",
"'\\0'",
"in",
"line",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/nul'",
",",
"5",
",",
"'Line contains NUL byte.'",
")"
] | https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/buildscripts/cpplint.py#L1806-L1828 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/framework/python/framework/tensor_util.py | python | remove_squeezable_dimensions | (predictions, labels, name=None) | Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed. | Squeeze last dim if ranks of `predictions` and `labels` differ by 1. | [
"Squeeze",
"last",
"dim",
"if",
"ranks",
"of",
"predictions",
"and",
"labels",
"differ",
"by",
"1",
"."
] | def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels | [
"def",
"remove_squeezable_dimensions",
"(",
"predictions",
",",
"labels",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"'remove_squeezable_dimensions'",
",",
"[",
"predictions",
",",
"labels",
"]",
")",
":",
"predictions",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"predictions",
")",
"labels",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"labels",
")",
"predictions_shape",
"=",
"predictions",
".",
"get_shape",
"(",
")",
"predictions_rank",
"=",
"predictions_shape",
".",
"ndims",
"labels_shape",
"=",
"labels",
".",
"get_shape",
"(",
")",
"labels_rank",
"=",
"labels_shape",
".",
"ndims",
"if",
"(",
"labels_rank",
"is",
"not",
"None",
")",
"and",
"(",
"predictions_rank",
"is",
"not",
"None",
")",
":",
"# Use static rank.",
"rank_diff",
"=",
"predictions_rank",
"-",
"labels_rank",
"if",
"rank_diff",
"==",
"-",
"1",
":",
"labels",
"=",
"array_ops",
".",
"squeeze",
"(",
"labels",
",",
"[",
"-",
"1",
"]",
")",
"elif",
"rank_diff",
"==",
"1",
":",
"predictions",
"=",
"array_ops",
".",
"squeeze",
"(",
"predictions",
",",
"[",
"-",
"1",
"]",
")",
"return",
"predictions",
",",
"labels",
"# Use dynamic rank.",
"rank_diff",
"=",
"array_ops",
".",
"rank",
"(",
"predictions",
")",
"-",
"array_ops",
".",
"rank",
"(",
"labels",
")",
"if",
"(",
"predictions_rank",
"is",
"None",
")",
"or",
"(",
"predictions_shape",
".",
"dims",
"[",
"-",
"1",
"]",
".",
"is_compatible_with",
"(",
"1",
")",
")",
":",
"predictions",
"=",
"control_flow_ops",
".",
"cond",
"(",
"math_ops",
".",
"equal",
"(",
"1",
",",
"rank_diff",
")",
",",
"lambda",
":",
"array_ops",
".",
"squeeze",
"(",
"predictions",
",",
"[",
"-",
"1",
"]",
")",
",",
"lambda",
":",
"predictions",
")",
"if",
"(",
"labels_rank",
"is",
"None",
")",
"or",
"(",
"labels_shape",
".",
"dims",
"[",
"-",
"1",
"]",
".",
"is_compatible_with",
"(",
"1",
")",
")",
":",
"labels",
"=",
"control_flow_ops",
".",
"cond",
"(",
"math_ops",
".",
"equal",
"(",
"-",
"1",
",",
"rank_diff",
")",
",",
"lambda",
":",
"array_ops",
".",
"squeeze",
"(",
"labels",
",",
"[",
"-",
"1",
"]",
")",
",",
"lambda",
":",
"labels",
")",
"return",
"predictions",
",",
"labels"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/framework/python/framework/tensor_util.py#L84-L129 |
||
cvmfs/cvmfs | 4637bdb5153178eadf885c1acf37bdc5c685bf8a | cpplint.py | python | _CppLintState.SetVerboseLevel | (self, level) | return last_verbose_level | Sets the module's verbosity, and returns the previous setting. | Sets the module's verbosity, and returns the previous setting. | [
"Sets",
"the",
"module",
"s",
"verbosity",
"and",
"returns",
"the",
"previous",
"setting",
"."
] | def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level | [
"def",
"SetVerboseLevel",
"(",
"self",
",",
"level",
")",
":",
"last_verbose_level",
"=",
"self",
".",
"verbose_level",
"self",
".",
"verbose_level",
"=",
"level",
"return",
"last_verbose_level"
] | https://github.com/cvmfs/cvmfs/blob/4637bdb5153178eadf885c1acf37bdc5c685bf8a/cpplint.py#L779-L783 |
|
pwsafe/pwsafe | b5e4fe0c266feba12bbf2e5b3c3cbf61b3fd4e8b | Misc/sighlp_cmp.py | python | compare_folders | (folder1, folder2) | return True | Compares two folders and all files and subfolders in them.
:param folder1: Path to folder 1.
:param folder2: Path to folder 2.
:return: True on success, else an exception is raised. | Compares two folders and all files and subfolders in them.
:param folder1: Path to folder 1.
:param folder2: Path to folder 2.
:return: True on success, else an exception is raised. | [
"Compares",
"two",
"folders",
"and",
"all",
"files",
"and",
"subfolders",
"in",
"them",
".",
":",
"param",
"folder1",
":",
"Path",
"to",
"folder",
"1",
".",
":",
"param",
"folder2",
":",
"Path",
"to",
"folder",
"2",
".",
":",
"return",
":",
"True",
"on",
"success",
"else",
"an",
"exception",
"is",
"raised",
"."
] | def compare_folders(folder1, folder2):
"""
Compares two folders and all files and subfolders in them.
:param folder1: Path to folder 1.
:param folder2: Path to folder 2.
:return: True on success, else an exception is raised.
"""
global verbosity_level, dir_names_to_ignore, file_names_to_ignore
cond_print('Comparing "{}" to "{}" ...'.format(folder1, folder2), verbosity_level > VERBOSITY_LEVEL_SILENT)
folder1_dir_list = []
folder1_file_list = []
folder2_dir_list = []
folder2_file_list = []
# Get all directories & files as lists for folder1
for root, dirs, files in os.walk(folder1):
for ignored_dir in dir_names_to_ignore:
if ignored_dir in dirs:
dirs.remove(ignored_dir)
cond_print('Ignoring directory "{}".'.format(os.path.join(root, ignored_dir)), verbosity_level > VERBOSITY_LEVEL_SILENT)
for dir_ in dirs:
full_dir_path = os.path.join(root, dir_)
clean_dir_path = full_dir_path.replace(folder1, '', 1)
folder1_dir_list.append(clean_dir_path)
for file_ in files:
full_file_path = os.path.join(root, file_)
if file_ in file_names_to_ignore:
cond_print('Ignoring file "{}".'.format(full_file_path), verbosity_level > VERBOSITY_LEVEL_SILENT)
continue
clean_file_path = full_file_path.replace(folder1, '', 1)
folder1_file_list.append(clean_file_path)
# Get all directories & files as lists for the local path
for root, dirs, files in os.walk(folder2):
for ignored_dir in dir_names_to_ignore:
if ignored_dir in dirs:
dirs.remove(ignored_dir)
cond_print('Ignoring directory "{}".'.format(os.path.join(root, ignored_dir)), verbosity_level > VERBOSITY_LEVEL_SILENT)
for dir_ in dirs:
full_dir_path = os.path.join(root, dir_)
clean_dir_path = full_dir_path.replace(folder2, '', 1)
folder2_dir_list.append(clean_dir_path)
for file_ in files:
full_file_path = os.path.join(root, file_)
if file_ in file_names_to_ignore:
cond_print('Ignoring file "{}".'.format(full_file_path), verbosity_level > VERBOSITY_LEVEL_SILENT)
continue
clean_file_path = full_file_path.replace(folder2, '', 1)
folder2_file_list.append(clean_file_path)
folder1_dir_list.sort()
folder1_file_list.sort()
folder2_dir_list.sort()
folder2_file_list.sort()
# Now we have four lists: One directory list per root folder, one file list per root folder, all cleaned from their
# specific prefix and sorted alphabetically. The lists must have the same length and contain the same entries in
# the same order, else the root folders are not the same from our logic's point of view. For files, also compare
# the hash.
if len(folder1_dir_list) != len(folder2_dir_list):
for folder1_dir in folder1_dir_list:
if folder1_dir not in folder2_dir_list:
cond_print('Folder 1 directory "{}" has no match in folder 2.'.format(folder1_dir), verbosity_level > VERBOSITY_LEVEL_SILENT)
for folder2_dir in folder2_dir_list:
if folder2_dir not in folder1_dir_list:
cond_print('Folder 2 directory "{}" has no match in folder 1.'.format(folder2_dir), verbosity_level > VERBOSITY_LEVEL_SILENT)
raise RuntimeError('Directory structure is not equal. Aborting.')
for folder1_dir, folder2_dir in zip(folder1_dir_list, folder2_dir_list):
if folder1_dir != folder2_dir:
raise RuntimeError('Directory name not equal: "{}" in folder 1, "{}" on folder 2. Aborting'.format(folder1_dir, folder2_dir))
cond_print('Comparison passed: "{}" and "{}".'.format(folder1_dir, folder2_dir), verbosity_level > VERBOSITY_LEVEL_NORMAL)
# Compare files via name & hash
if len(folder1_file_list) != len(folder2_file_list):
for folder1_file in folder1_file_list:
if folder1_file not in folder2_file_list:
print('folder1 file "{}" has no match on folder2.'.format(folder1_file))
for folder2_file in folder2_file_list:
if folder2_file not in folder1_file_list:
print('folder2 file "{}" has no match in folder1.'.format(folder2_file))
raise RuntimeError('File count different. Aborting.')
for folder1_file, folder2_file in zip(folder1_file_list, folder2_file_list):
if folder1_file != folder2_file:
raise RuntimeError('File name not equal: "{}" in folder1, "{}" on folder2. Aborting'.format(folder1_file, folder2_file))
folder1_file_hash = get_sha512_hashdigest(os.path.join(folder1, folder1_file))
folder2_file_hash = get_sha512_hashdigest(os.path.join(folder2, folder2_file))
if folder1_file_hash != folder2_file_hash:
raise RuntimeError('Hash mismatch: "{}" for folder 1 file "{}", "{}" for folder 2 file "{}".'.format(folder1_file_hash, folder1_file, folder2_file_hash, folder2_file))
cond_print('Comparison passed: "{}" and "{}" [{}].'.format(folder1_file, folder2_file, folder1_file_hash), verbosity_level > VERBOSITY_LEVEL_NORMAL)
return True | [
"def",
"compare_folders",
"(",
"folder1",
",",
"folder2",
")",
":",
"global",
"verbosity_level",
",",
"dir_names_to_ignore",
",",
"file_names_to_ignore",
"cond_print",
"(",
"'Comparing \"{}\" to \"{}\" ...'",
".",
"format",
"(",
"folder1",
",",
"folder2",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"folder1_dir_list",
"=",
"[",
"]",
"folder1_file_list",
"=",
"[",
"]",
"folder2_dir_list",
"=",
"[",
"]",
"folder2_file_list",
"=",
"[",
"]",
"# Get all directories & files as lists for folder1",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"folder1",
")",
":",
"for",
"ignored_dir",
"in",
"dir_names_to_ignore",
":",
"if",
"ignored_dir",
"in",
"dirs",
":",
"dirs",
".",
"remove",
"(",
"ignored_dir",
")",
"cond_print",
"(",
"'Ignoring directory \"{}\".'",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"ignored_dir",
")",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"for",
"dir_",
"in",
"dirs",
":",
"full_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dir_",
")",
"clean_dir_path",
"=",
"full_dir_path",
".",
"replace",
"(",
"folder1",
",",
"''",
",",
"1",
")",
"folder1_dir_list",
".",
"append",
"(",
"clean_dir_path",
")",
"for",
"file_",
"in",
"files",
":",
"full_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file_",
")",
"if",
"file_",
"in",
"file_names_to_ignore",
":",
"cond_print",
"(",
"'Ignoring file \"{}\".'",
".",
"format",
"(",
"full_file_path",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"continue",
"clean_file_path",
"=",
"full_file_path",
".",
"replace",
"(",
"folder1",
",",
"''",
",",
"1",
")",
"folder1_file_list",
".",
"append",
"(",
"clean_file_path",
")",
"# Get all directories & files as lists for the local path",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"folder2",
")",
":",
"for",
"ignored_dir",
"in",
"dir_names_to_ignore",
":",
"if",
"ignored_dir",
"in",
"dirs",
":",
"dirs",
".",
"remove",
"(",
"ignored_dir",
")",
"cond_print",
"(",
"'Ignoring directory \"{}\".'",
".",
"format",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"ignored_dir",
")",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"for",
"dir_",
"in",
"dirs",
":",
"full_dir_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"dir_",
")",
"clean_dir_path",
"=",
"full_dir_path",
".",
"replace",
"(",
"folder2",
",",
"''",
",",
"1",
")",
"folder2_dir_list",
".",
"append",
"(",
"clean_dir_path",
")",
"for",
"file_",
"in",
"files",
":",
"full_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file_",
")",
"if",
"file_",
"in",
"file_names_to_ignore",
":",
"cond_print",
"(",
"'Ignoring file \"{}\".'",
".",
"format",
"(",
"full_file_path",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"continue",
"clean_file_path",
"=",
"full_file_path",
".",
"replace",
"(",
"folder2",
",",
"''",
",",
"1",
")",
"folder2_file_list",
".",
"append",
"(",
"clean_file_path",
")",
"folder1_dir_list",
".",
"sort",
"(",
")",
"folder1_file_list",
".",
"sort",
"(",
")",
"folder2_dir_list",
".",
"sort",
"(",
")",
"folder2_file_list",
".",
"sort",
"(",
")",
"# Now we have four lists: One directory list per root folder, one file list per root folder, all cleaned from their",
"# specific prefix and sorted alphabetically. The lists must have the same length and contain the same entries in",
"# the same order, else the root folders are not the same from our logic's point of view. For files, also compare",
"# the hash.",
"if",
"len",
"(",
"folder1_dir_list",
")",
"!=",
"len",
"(",
"folder2_dir_list",
")",
":",
"for",
"folder1_dir",
"in",
"folder1_dir_list",
":",
"if",
"folder1_dir",
"not",
"in",
"folder2_dir_list",
":",
"cond_print",
"(",
"'Folder 1 directory \"{}\" has no match in folder 2.'",
".",
"format",
"(",
"folder1_dir",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"for",
"folder2_dir",
"in",
"folder2_dir_list",
":",
"if",
"folder2_dir",
"not",
"in",
"folder1_dir_list",
":",
"cond_print",
"(",
"'Folder 2 directory \"{}\" has no match in folder 1.'",
".",
"format",
"(",
"folder2_dir",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_SILENT",
")",
"raise",
"RuntimeError",
"(",
"'Directory structure is not equal. Aborting.'",
")",
"for",
"folder1_dir",
",",
"folder2_dir",
"in",
"zip",
"(",
"folder1_dir_list",
",",
"folder2_dir_list",
")",
":",
"if",
"folder1_dir",
"!=",
"folder2_dir",
":",
"raise",
"RuntimeError",
"(",
"'Directory name not equal: \"{}\" in folder 1, \"{}\" on folder 2. Aborting'",
".",
"format",
"(",
"folder1_dir",
",",
"folder2_dir",
")",
")",
"cond_print",
"(",
"'Comparison passed: \"{}\" and \"{}\".'",
".",
"format",
"(",
"folder1_dir",
",",
"folder2_dir",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_NORMAL",
")",
"# Compare files via name & hash",
"if",
"len",
"(",
"folder1_file_list",
")",
"!=",
"len",
"(",
"folder2_file_list",
")",
":",
"for",
"folder1_file",
"in",
"folder1_file_list",
":",
"if",
"folder1_file",
"not",
"in",
"folder2_file_list",
":",
"print",
"(",
"'folder1 file \"{}\" has no match on folder2.'",
".",
"format",
"(",
"folder1_file",
")",
")",
"for",
"folder2_file",
"in",
"folder2_file_list",
":",
"if",
"folder2_file",
"not",
"in",
"folder1_file_list",
":",
"print",
"(",
"'folder2 file \"{}\" has no match in folder1.'",
".",
"format",
"(",
"folder2_file",
")",
")",
"raise",
"RuntimeError",
"(",
"'File count different. Aborting.'",
")",
"for",
"folder1_file",
",",
"folder2_file",
"in",
"zip",
"(",
"folder1_file_list",
",",
"folder2_file_list",
")",
":",
"if",
"folder1_file",
"!=",
"folder2_file",
":",
"raise",
"RuntimeError",
"(",
"'File name not equal: \"{}\" in folder1, \"{}\" on folder2. Aborting'",
".",
"format",
"(",
"folder1_file",
",",
"folder2_file",
")",
")",
"folder1_file_hash",
"=",
"get_sha512_hashdigest",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder1",
",",
"folder1_file",
")",
")",
"folder2_file_hash",
"=",
"get_sha512_hashdigest",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder2",
",",
"folder2_file",
")",
")",
"if",
"folder1_file_hash",
"!=",
"folder2_file_hash",
":",
"raise",
"RuntimeError",
"(",
"'Hash mismatch: \"{}\" for folder 1 file \"{}\", \"{}\" for folder 2 file \"{}\".'",
".",
"format",
"(",
"folder1_file_hash",
",",
"folder1_file",
",",
"folder2_file_hash",
",",
"folder2_file",
")",
")",
"cond_print",
"(",
"'Comparison passed: \"{}\" and \"{}\" [{}].'",
".",
"format",
"(",
"folder1_file",
",",
"folder2_file",
",",
"folder1_file_hash",
")",
",",
"verbosity_level",
">",
"VERBOSITY_LEVEL_NORMAL",
")",
"return",
"True"
] | https://github.com/pwsafe/pwsafe/blob/b5e4fe0c266feba12bbf2e5b3c3cbf61b3fd4e8b/Misc/sighlp_cmp.py#L132-L216 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.