nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
list | function
stringlengths 34
151k
| function_tokens
list | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
hughperkins/tf-coriander
|
970d3df6c11400ad68405f22b0c42a52374e94ca
|
tensorflow/python/framework/errors.py
|
python
|
DataLossError.__init__
|
(self, node_def, op, message)
|
Creates a `DataLossError`.
|
Creates a `DataLossError`.
|
[
"Creates",
"a",
"DataLossError",
"."
] |
def __init__(self, node_def, op, message):
"""Creates a `DataLossError`."""
super(DataLossError, self).__init__(node_def, op, message, DATA_LOSS)
|
[
"def",
"__init__",
"(",
"self",
",",
"node_def",
",",
"op",
",",
"message",
")",
":",
"super",
"(",
"DataLossError",
",",
"self",
")",
".",
"__init__",
"(",
"node_def",
",",
"op",
",",
"message",
",",
"DATA_LOSS",
")"
] |
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/framework/errors.py#L409-L411
|
||
pytorch/pytorch
|
7176c92687d3cc847cc046bf002269c6949a21c2
|
torch/optim/lr_scheduler.py
|
python
|
LambdaLR.load_state_dict
|
(self, state_dict)
|
Loads the schedulers state.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
|
Loads the schedulers state.
|
[
"Loads",
"the",
"schedulers",
"state",
"."
] |
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
lr_lambdas = state_dict.pop('lr_lambdas')
self.__dict__.update(state_dict)
# Restore state_dict keys in order to prevent side effects
# https://github.com/pytorch/pytorch/issues/32756
state_dict['lr_lambdas'] = lr_lambdas
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
|
[
"def",
"load_state_dict",
"(",
"self",
",",
"state_dict",
")",
":",
"lr_lambdas",
"=",
"state_dict",
".",
"pop",
"(",
"'lr_lambdas'",
")",
"self",
".",
"__dict__",
".",
"update",
"(",
"state_dict",
")",
"# Restore state_dict keys in order to prevent side effects",
"# https://github.com/pytorch/pytorch/issues/32756",
"state_dict",
"[",
"'lr_lambdas'",
"]",
"=",
"lr_lambdas",
"for",
"idx",
",",
"fn",
"in",
"enumerate",
"(",
"lr_lambdas",
")",
":",
"if",
"fn",
"is",
"not",
"None",
":",
"self",
".",
"lr_lambdas",
"[",
"idx",
"]",
".",
"__dict__",
".",
"update",
"(",
"fn",
")"
] |
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/optim/lr_scheduler.py#L227-L245
|
||
openmm/openmm
|
cb293447c4fc8b03976dfe11399f107bab70f3d9
|
docs-source/sphinx/autonumber.py
|
python
|
get_chapter
|
(node, depth, section_numbers)
|
return ".".join(str(i) for i in chapter)
|
Get the numerical position of the chapter in which node resides
args:
node:
A docutils node whose chapter we want the number of
depth:
How many levels deep into the toctree is a "chapter"
section_numbers:
The output of chapter_numbers_by_section(env)
|
Get the numerical position of the chapter in which node resides
|
[
"Get",
"the",
"numerical",
"position",
"of",
"the",
"chapter",
"in",
"which",
"node",
"resides"
] |
def get_chapter(node, depth, section_numbers):
"""Get the numerical position of the chapter in which node resides
args:
node:
A docutils node whose chapter we want the number of
depth:
How many levels deep into the toctree is a "chapter"
section_numbers:
The output of chapter_numbers_by_section(env)
"""
parent = node.parent
chapter = None
while chapter is None:
if isinstance(parent, section):
chapter = parent
parent = parent.parent
src = str(Path(chapter.source).with_suffix(""))
chapter_id = chapter.attributes["ids"][0]
key = src + ":" + chapter_id
try:
chapter = section_numbers[key][:depth]
except KeyError:
# The above will fail if the section is at the top of a file;
# There doesn't seem to be a way to get the top section label
# in chapter_numbers_by_section, so we'll just assume that if
# the above fails, we're looking for a section with no label:
key = src + ":"
warn(f"Assuming {repr(chapter_id)} is a top level section")
chapter = section_numbers[key][:depth]
return ".".join(str(i) for i in chapter)
|
[
"def",
"get_chapter",
"(",
"node",
",",
"depth",
",",
"section_numbers",
")",
":",
"parent",
"=",
"node",
".",
"parent",
"chapter",
"=",
"None",
"while",
"chapter",
"is",
"None",
":",
"if",
"isinstance",
"(",
"parent",
",",
"section",
")",
":",
"chapter",
"=",
"parent",
"parent",
"=",
"parent",
".",
"parent",
"src",
"=",
"str",
"(",
"Path",
"(",
"chapter",
".",
"source",
")",
".",
"with_suffix",
"(",
"\"\"",
")",
")",
"chapter_id",
"=",
"chapter",
".",
"attributes",
"[",
"\"ids\"",
"]",
"[",
"0",
"]",
"key",
"=",
"src",
"+",
"\":\"",
"+",
"chapter_id",
"try",
":",
"chapter",
"=",
"section_numbers",
"[",
"key",
"]",
"[",
":",
"depth",
"]",
"except",
"KeyError",
":",
"# The above will fail if the section is at the top of a file;",
"# There doesn't seem to be a way to get the top section label",
"# in chapter_numbers_by_section, so we'll just assume that if",
"# the above fails, we're looking for a section with no label:",
"key",
"=",
"src",
"+",
"\":\"",
"warn",
"(",
"f\"Assuming {repr(chapter_id)} is a top level section\"",
")",
"chapter",
"=",
"section_numbers",
"[",
"key",
"]",
"[",
":",
"depth",
"]",
"return",
"\".\"",
".",
"join",
"(",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"chapter",
")"
] |
https://github.com/openmm/openmm/blob/cb293447c4fc8b03976dfe11399f107bab70f3d9/docs-source/sphinx/autonumber.py#L45-L75
|
|
BlzFans/wke
|
b0fa21158312e40c5fbd84682d643022b6c34a93
|
cygwin/lib/python2.6/imghdr.py
|
python
|
test_rgb
|
(h, f)
|
SGI image library
|
SGI image library
|
[
"SGI",
"image",
"library"
] |
def test_rgb(h, f):
"""SGI image library"""
if h[:2] == '\001\332':
return 'rgb'
|
[
"def",
"test_rgb",
"(",
"h",
",",
"f",
")",
":",
"if",
"h",
"[",
":",
"2",
"]",
"==",
"'\\001\\332'",
":",
"return",
"'rgb'"
] |
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/imghdr.py#L71-L74
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py
|
python
|
WorkingSet.__init__
|
(self, entries=None)
|
Create working set from list of path entries (default=sys.path)
|
Create working set from list of path entries (default=sys.path)
|
[
"Create",
"working",
"set",
"from",
"list",
"of",
"path",
"entries",
"(",
"default",
"=",
"sys",
".",
"path",
")"
] |
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
|
[
"def",
"__init__",
"(",
"self",
",",
"entries",
"=",
"None",
")",
":",
"self",
".",
"entries",
"=",
"[",
"]",
"self",
".",
"entry_keys",
"=",
"{",
"}",
"self",
".",
"by_key",
"=",
"{",
"}",
"self",
".",
"callbacks",
"=",
"[",
"]",
"if",
"entries",
"is",
"None",
":",
"entries",
"=",
"sys",
".",
"path",
"for",
"entry",
"in",
"entries",
":",
"self",
".",
"add_entry",
"(",
"entry",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py#L556-L567
|
||
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/contrib/distributions/python/ops/sample_stats.py
|
python
|
percentile
|
(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None)
|
Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
|
Compute the `q`-th percentile of `x`.
|
[
"Compute",
"the",
"q",
"-",
"th",
"percentile",
"of",
"x",
"."
] |
def percentile(x,
q,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute the `q`-th percentile of `x`.
Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the
way from the minimum to the maximum in a sorted copy of `x`.
The values and distances of the two nearest neighbors as well as the
`interpolation` parameter will determine the percentile if the normalized
ranking does not match the location of `q` exactly.
This function is the same as the median if `q = 50`, the same as the minimum
if `q = 0` and the same as the maximum if `q = 100`.
```python
# Get 30th percentile with default ('nearest') interpolation.
x = [1., 2., 3., 4.]
percentile(x, q=30.)
==> 2.0
# Get 30th percentile with 'lower' interpolation
x = [1., 2., 3., 4.]
percentile(x, q=30., interpolation='lower')
==> 1.0
# Get 100th percentile (maximum). By default, this is computed over every dim
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100.)
==> 4.0
# Treat the leading dim as indexing samples, and find the 100th quantile (max)
# over all such samples.
x = [[1., 2.]
[3., 4.]]
percentile(x, q=100., axis=[0])
==> [3., 4.]
```
Compare to `numpy.percentile`.
Args:
x: Floating point `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
q: Scalar `Tensor` in `[0, 100]`. The percentile.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values.
The axis that hold independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {"lower", "higher", "nearest"}. Default: "nearest"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points `i < j`:
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity.
If False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is "percentile"
Returns:
A `(N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if
`axis` is `None`, a scalar.
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
"""
name = name or "percentile"
allowed_interpolations = {"lower", "higher", "nearest"}
if interpolation is None:
interpolation = "nearest"
else:
if interpolation not in allowed_interpolations:
raise ValueError("Argument 'interpolation' must be in %s. Found %s" %
(allowed_interpolations, interpolation))
with ops.name_scope(name, [x, q]):
x = ops.convert_to_tensor(x, name="x")
q = math_ops.to_float(q, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0), check_ops.assert_greater_equal(q, 0.),
check_ops.assert_less_equal(q, 100.)
], q)
if axis is None:
y = array_ops.reshape(x, [-1])
else:
axis = ops.convert_to_tensor(axis, name="axis")
check_ops.assert_integer(axis)
axis_ndims = _get_static_ndims(
axis, expect_static=True, expect_ndims_no_more_than=1)
axis_const = tensor_util.constant_value(axis)
if axis_const is None:
raise ValueError(
"Expected argument 'axis' to be statically available. Found: %s" %
axis)
axis = axis_const
if axis_ndims == 0:
axis = [axis]
axis = [int(a) for a in axis]
x_ndims = _get_static_ndims(
x, expect_static=True, expect_ndims_at_least=1)
axis = _make_static_axis_non_negative(axis, x_ndims)
y = _move_dims_to_flat_end(x, axis, x_ndims)
frac_at_q_or_above = 1. - q / 100.
d = math_ops.to_float(array_ops.shape(y)[-1])
if interpolation == "lower":
index = math_ops.ceil((d - 1) * frac_at_q_or_above)
elif interpolation == "higher":
index = math_ops.floor((d - 1) * frac_at_q_or_above)
elif interpolation == "nearest":
index = math_ops.round((d - 1) * frac_at_q_or_above)
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.
sorted_y = _sort_tensor(y)
# result.shape = B
result = sorted_y[..., math_ops.to_int32(index)]
result.set_shape(y.get_shape()[:-1])
if keep_dims:
if axis is None:
# ones_vec = [1, 1,..., 1], total length = len(S) + len(B).
ones_vec = array_ops.ones(
shape=[_get_best_effort_ndims(x)], dtype=dtypes.int32)
result *= array_ops.ones(ones_vec, dtype=x.dtype)
else:
result = _insert_back_keep_dims(result, axis)
return result
|
[
"def",
"percentile",
"(",
"x",
",",
"q",
",",
"axis",
"=",
"None",
",",
"interpolation",
"=",
"None",
",",
"keep_dims",
"=",
"False",
",",
"validate_args",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"name",
"=",
"name",
"or",
"\"percentile\"",
"allowed_interpolations",
"=",
"{",
"\"lower\"",
",",
"\"higher\"",
",",
"\"nearest\"",
"}",
"if",
"interpolation",
"is",
"None",
":",
"interpolation",
"=",
"\"nearest\"",
"else",
":",
"if",
"interpolation",
"not",
"in",
"allowed_interpolations",
":",
"raise",
"ValueError",
"(",
"\"Argument 'interpolation' must be in %s. Found %s\"",
"%",
"(",
"allowed_interpolations",
",",
"interpolation",
")",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"[",
"x",
",",
"q",
"]",
")",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"x\"",
")",
"q",
"=",
"math_ops",
".",
"to_float",
"(",
"q",
",",
"name",
"=",
"\"q\"",
")",
"_get_static_ndims",
"(",
"q",
",",
"expect_ndims",
"=",
"0",
")",
"if",
"validate_args",
":",
"q",
"=",
"control_flow_ops",
".",
"with_dependencies",
"(",
"[",
"check_ops",
".",
"assert_rank",
"(",
"q",
",",
"0",
")",
",",
"check_ops",
".",
"assert_greater_equal",
"(",
"q",
",",
"0.",
")",
",",
"check_ops",
".",
"assert_less_equal",
"(",
"q",
",",
"100.",
")",
"]",
",",
"q",
")",
"if",
"axis",
"is",
"None",
":",
"y",
"=",
"array_ops",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
"]",
")",
"else",
":",
"axis",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"axis",
",",
"name",
"=",
"\"axis\"",
")",
"check_ops",
".",
"assert_integer",
"(",
"axis",
")",
"axis_ndims",
"=",
"_get_static_ndims",
"(",
"axis",
",",
"expect_static",
"=",
"True",
",",
"expect_ndims_no_more_than",
"=",
"1",
")",
"axis_const",
"=",
"tensor_util",
".",
"constant_value",
"(",
"axis",
")",
"if",
"axis_const",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Expected argument 'axis' to be statically available. Found: %s\"",
"%",
"axis",
")",
"axis",
"=",
"axis_const",
"if",
"axis_ndims",
"==",
"0",
":",
"axis",
"=",
"[",
"axis",
"]",
"axis",
"=",
"[",
"int",
"(",
"a",
")",
"for",
"a",
"in",
"axis",
"]",
"x_ndims",
"=",
"_get_static_ndims",
"(",
"x",
",",
"expect_static",
"=",
"True",
",",
"expect_ndims_at_least",
"=",
"1",
")",
"axis",
"=",
"_make_static_axis_non_negative",
"(",
"axis",
",",
"x_ndims",
")",
"y",
"=",
"_move_dims_to_flat_end",
"(",
"x",
",",
"axis",
",",
"x_ndims",
")",
"frac_at_q_or_above",
"=",
"1.",
"-",
"q",
"/",
"100.",
"d",
"=",
"math_ops",
".",
"to_float",
"(",
"array_ops",
".",
"shape",
"(",
"y",
")",
"[",
"-",
"1",
"]",
")",
"if",
"interpolation",
"==",
"\"lower\"",
":",
"index",
"=",
"math_ops",
".",
"ceil",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"elif",
"interpolation",
"==",
"\"higher\"",
":",
"index",
"=",
"math_ops",
".",
"floor",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"elif",
"interpolation",
"==",
"\"nearest\"",
":",
"index",
"=",
"math_ops",
".",
"round",
"(",
"(",
"d",
"-",
"1",
")",
"*",
"frac_at_q_or_above",
")",
"# Sort everything, not just the top 'k' entries, which allows multiple calls",
"# to sort only once (under the hood) and use CSE.",
"sorted_y",
"=",
"_sort_tensor",
"(",
"y",
")",
"# result.shape = B",
"result",
"=",
"sorted_y",
"[",
"...",
",",
"math_ops",
".",
"to_int32",
"(",
"index",
")",
"]",
"result",
".",
"set_shape",
"(",
"y",
".",
"get_shape",
"(",
")",
"[",
":",
"-",
"1",
"]",
")",
"if",
"keep_dims",
":",
"if",
"axis",
"is",
"None",
":",
"# ones_vec = [1, 1,..., 1], total length = len(S) + len(B).",
"ones_vec",
"=",
"array_ops",
".",
"ones",
"(",
"shape",
"=",
"[",
"_get_best_effort_ndims",
"(",
"x",
")",
"]",
",",
"dtype",
"=",
"dtypes",
".",
"int32",
")",
"result",
"*=",
"array_ops",
".",
"ones",
"(",
"ones_vec",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"else",
":",
"result",
"=",
"_insert_back_keep_dims",
"(",
"result",
",",
"axis",
")",
"return",
"result"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/distributions/python/ops/sample_stats.py#L40-L183
|
||
microsoft/CNTK
|
e9396480025b9ca457d26b6f33dd07c474c6aa04
|
bindings/python/cntk/io/__init__.py
|
python
|
UserMinibatchSource.is_infinite
|
(self)
|
return False
|
Should return true if the user has not specified any limit on the number of sweeps and samples.
|
Should return true if the user has not specified any limit on the number of sweeps and samples.
|
[
"Should",
"return",
"true",
"if",
"the",
"user",
"has",
"not",
"specified",
"any",
"limit",
"on",
"the",
"number",
"of",
"sweeps",
"and",
"samples",
"."
] |
def is_infinite(self):
'''
Should return true if the user has not specified any limit on the number of sweeps and samples.
'''
return False
|
[
"def",
"is_infinite",
"(",
"self",
")",
":",
"return",
"False"
] |
https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/io/__init__.py#L520-L524
|
|
FreeCAD/FreeCAD
|
ba42231b9c6889b89e064d6d563448ed81e376ec
|
src/Mod/Arch/importIFCmulticore.py
|
python
|
createProduct
|
(ifcproduct,brep)
|
return obj
|
creates an Arch object from an IFC product
|
creates an Arch object from an IFC product
|
[
"creates",
"an",
"Arch",
"object",
"from",
"an",
"IFC",
"product"
] |
def createProduct(ifcproduct,brep):
"""creates an Arch object from an IFC product"""
import Part
shape = Part.Shape()
shape.importBrepFromString(brep,False)
shape.scale(1000.0) # IfcOpenShell outputs in meters
if ifcproduct.is_a("IfcSpace"):
obj = Arch.makeSpace()
else:
obj = Arch.makeComponent()
obj.Shape = shape
objects[ifcproduct.id()] = obj
setAttributes(obj,ifcproduct)
setProperties(obj,ifcproduct)
createLayer(obj,ifcproduct)
createMaterial(obj,ifcproduct)
createModelStructure(obj,ifcproduct)
setRelationships(obj,ifcproduct)
setColor(obj,ifcproduct)
return obj
|
[
"def",
"createProduct",
"(",
"ifcproduct",
",",
"brep",
")",
":",
"import",
"Part",
"shape",
"=",
"Part",
".",
"Shape",
"(",
")",
"shape",
".",
"importBrepFromString",
"(",
"brep",
",",
"False",
")",
"shape",
".",
"scale",
"(",
"1000.0",
")",
"# IfcOpenShell outputs in meters",
"if",
"ifcproduct",
".",
"is_a",
"(",
"\"IfcSpace\"",
")",
":",
"obj",
"=",
"Arch",
".",
"makeSpace",
"(",
")",
"else",
":",
"obj",
"=",
"Arch",
".",
"makeComponent",
"(",
")",
"obj",
".",
"Shape",
"=",
"shape",
"objects",
"[",
"ifcproduct",
".",
"id",
"(",
")",
"]",
"=",
"obj",
"setAttributes",
"(",
"obj",
",",
"ifcproduct",
")",
"setProperties",
"(",
"obj",
",",
"ifcproduct",
")",
"createLayer",
"(",
"obj",
",",
"ifcproduct",
")",
"createMaterial",
"(",
"obj",
",",
"ifcproduct",
")",
"createModelStructure",
"(",
"obj",
",",
"ifcproduct",
")",
"setRelationships",
"(",
"obj",
",",
"ifcproduct",
")",
"setColor",
"(",
"obj",
",",
"ifcproduct",
")",
"return",
"obj"
] |
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Arch/importIFCmulticore.py#L157-L179
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/scipy/sparse/linalg/matfuncs.py
|
python
|
_onenormest_product
|
(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None)
|
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
|
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
|
Efficiently estimate the 1-norm of the matrix product of the args.
|
[
"Efficiently",
"estimate",
"the",
"1",
"-",
"norm",
"of",
"the",
"matrix",
"product",
"of",
"the",
"args",
"."
] |
def _onenormest_product(operator_seq,
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
"""
Efficiently estimate the 1-norm of the matrix product of the args.
Parameters
----------
operator_seq : linear operator sequence
Matrices whose 1-norm of product is to be computed.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
structure : str, optional
A string describing the structure of all operators.
Only `upper_triangular` is currently supported.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return scipy.sparse.linalg.onenormest(
ProductOperator(*operator_seq, structure=structure))
|
[
"def",
"_onenormest_product",
"(",
"operator_seq",
",",
"t",
"=",
"2",
",",
"itmax",
"=",
"5",
",",
"compute_v",
"=",
"False",
",",
"compute_w",
"=",
"False",
",",
"structure",
"=",
"None",
")",
":",
"return",
"scipy",
".",
"sparse",
".",
"linalg",
".",
"onenormest",
"(",
"ProductOperator",
"(",
"*",
"operator_seq",
",",
"structure",
"=",
"structure",
")",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/sparse/linalg/matfuncs.py#L304-L343
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/python/ops/control_flow_ops.py
|
python
|
CondContext._init_from_proto
|
(self, context_def, import_scope=None)
|
Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
|
Creates a new `CondContext` from protocol buffer.
|
[
"Creates",
"a",
"new",
"CondContext",
"from",
"protocol",
"buffer",
"."
] |
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(
context_def.context_name, import_scope)
self._pred = g.as_graph_element(ops.prepend_name_scope(
context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(ops.prepend_name_scope(
context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
|
[
"def",
"_init_from_proto",
"(",
"self",
",",
"context_def",
",",
"import_scope",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"context_def",
",",
"control_flow_pb2",
".",
"CondContextDef",
")",
"# Create from context_def.",
"g",
"=",
"ops",
".",
"get_default_graph",
"(",
")",
"self",
".",
"_name",
"=",
"ops",
".",
"prepend_name_scope",
"(",
"context_def",
".",
"context_name",
",",
"import_scope",
")",
"self",
".",
"_pred",
"=",
"g",
".",
"as_graph_element",
"(",
"ops",
".",
"prepend_name_scope",
"(",
"context_def",
".",
"pred_name",
",",
"import_scope",
")",
")",
"self",
".",
"_pivot",
"=",
"g",
".",
"as_graph_element",
"(",
"ops",
".",
"prepend_name_scope",
"(",
"context_def",
".",
"pivot_name",
",",
"import_scope",
")",
")",
"self",
".",
"_branch",
"=",
"context_def",
".",
"branch",
"super",
"(",
"CondContext",
",",
"self",
")",
".",
"__init__",
"(",
"values_def",
"=",
"context_def",
".",
"values_def",
",",
"import_scope",
"=",
"import_scope",
")"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/control_flow_ops.py#L1544-L1562
|
||
FreeCAD/FreeCAD
|
ba42231b9c6889b89e064d6d563448ed81e376ec
|
src/Mod/Path/PathScripts/PathSlot.py
|
python
|
ObjectSlot._getOppMidPoints
|
(self, same)
|
return (p1, p2)
|
_getOppMidPoints(same)...
Find mid-points between ends of equal, oppossing edges passed in tuple (edge1, edge2).
|
_getOppMidPoints(same)...
Find mid-points between ends of equal, oppossing edges passed in tuple (edge1, edge2).
|
[
"_getOppMidPoints",
"(",
"same",
")",
"...",
"Find",
"mid",
"-",
"points",
"between",
"ends",
"of",
"equal",
"oppossing",
"edges",
"passed",
"in",
"tuple",
"(",
"edge1",
"edge2",
")",
"."
] |
def _getOppMidPoints(self, same):
"""_getOppMidPoints(same)...
Find mid-points between ends of equal, oppossing edges passed in tuple (edge1, edge2)."""
com1 = same[0].CenterOfMass
com2 = same[1].CenterOfMass
p1 = FreeCAD.Vector(com1.x, com1.y, 0.0)
p2 = FreeCAD.Vector(com2.x, com2.y, 0.0)
return (p1, p2)
|
[
"def",
"_getOppMidPoints",
"(",
"self",
",",
"same",
")",
":",
"com1",
"=",
"same",
"[",
"0",
"]",
".",
"CenterOfMass",
"com2",
"=",
"same",
"[",
"1",
"]",
".",
"CenterOfMass",
"p1",
"=",
"FreeCAD",
".",
"Vector",
"(",
"com1",
".",
"x",
",",
"com1",
".",
"y",
",",
"0.0",
")",
"p2",
"=",
"FreeCAD",
".",
"Vector",
"(",
"com2",
".",
"x",
",",
"com2",
".",
"y",
",",
"0.0",
")",
"return",
"(",
"p1",
",",
"p2",
")"
] |
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Path/PathScripts/PathSlot.py#L1326-L1333
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/pandas/py3/pandas/core/indexes/range.py
|
python
|
RangeIndex.equals
|
(self, other: object)
|
return super().equals(other)
|
Determines if two Index objects contain the same elements.
|
Determines if two Index objects contain the same elements.
|
[
"Determines",
"if",
"two",
"Index",
"objects",
"contain",
"the",
"same",
"elements",
"."
] |
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
|
[
"def",
"equals",
"(",
"self",
",",
"other",
":",
"object",
")",
"->",
"bool",
":",
"if",
"isinstance",
"(",
"other",
",",
"RangeIndex",
")",
":",
"return",
"self",
".",
"_range",
"==",
"other",
".",
"_range",
"return",
"super",
"(",
")",
".",
"equals",
"(",
"other",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/indexes/range.py#L543-L549
|
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pkg_resources.py
|
python
|
WorkingSet.add
|
(self, dist, entry=None, insert=True)
|
Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
|
Add `dist` to working set, associated with `entry`
|
[
"Add",
"dist",
"to",
"working",
"set",
"associated",
"with",
"entry"
] |
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
|
[
"def",
"add",
"(",
"self",
",",
"dist",
",",
"entry",
"=",
"None",
",",
"insert",
"=",
"True",
")",
":",
"if",
"insert",
":",
"dist",
".",
"insert_on",
"(",
"self",
".",
"entries",
",",
"entry",
")",
"if",
"entry",
"is",
"None",
":",
"entry",
"=",
"dist",
".",
"location",
"keys",
"=",
"self",
".",
"entry_keys",
".",
"setdefault",
"(",
"entry",
",",
"[",
"]",
")",
"keys2",
"=",
"self",
".",
"entry_keys",
".",
"setdefault",
"(",
"dist",
".",
"location",
",",
"[",
"]",
")",
"if",
"dist",
".",
"key",
"in",
"self",
".",
"by_key",
":",
"return",
"# ignore hidden distros",
"self",
".",
"by_key",
"[",
"dist",
".",
"key",
"]",
"=",
"dist",
"if",
"dist",
".",
"key",
"not",
"in",
"keys",
":",
"keys",
".",
"append",
"(",
"dist",
".",
"key",
")",
"if",
"dist",
".",
"key",
"not",
"in",
"keys2",
":",
"keys2",
".",
"append",
"(",
"dist",
".",
"key",
")",
"self",
".",
"_added_new",
"(",
"dist",
")"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pkg_resources.py#L511-L537
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_controls.py
|
python
|
DragImage.Show
|
(*args, **kwargs)
|
return _controls_.DragImage_Show(*args, **kwargs)
|
Show(self) -> bool
|
Show(self) -> bool
|
[
"Show",
"(",
"self",
")",
"-",
">",
"bool"
] |
def Show(*args, **kwargs):
"""Show(self) -> bool"""
return _controls_.DragImage_Show(*args, **kwargs)
|
[
"def",
"Show",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"DragImage_Show",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L6372-L6374
|
|
stan-dev/math
|
5fd79f89933269a4ca4d8dd1fde2a36d53d4768c
|
lib/boost_1.75.0/tools/build/src/build/project.py
|
python
|
ProjectRegistry.load_module
|
(self, name, extra_path=None)
|
Load a Python module that should be usable from Jamfiles.
There are generally two types of modules Jamfiles might want to
use:
- Core Boost.Build. Those are imported using plain names, e.g.
'toolset', so this function checks if we have module named
b2.package.module already.
- Python modules in the same directory as Jamfile. We don't
want to even temporary add Jamfile's directory to sys.path,
since then we might get naming conflicts between standard
Python modules and those.
|
Load a Python module that should be usable from Jamfiles.
|
[
"Load",
"a",
"Python",
"module",
"that",
"should",
"be",
"usable",
"from",
"Jamfiles",
"."
] |
def load_module(self, name, extra_path=None):
"""Load a Python module that should be usable from Jamfiles.
There are generally two types of modules Jamfiles might want to
use:
- Core Boost.Build. Those are imported using plain names, e.g.
'toolset', so this function checks if we have module named
b2.package.module already.
- Python modules in the same directory as Jamfile. We don't
want to even temporary add Jamfile's directory to sys.path,
since then we might get naming conflicts between standard
Python modules and those.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(extra_path, basestring) or extra_path is None
# See if we loaded module of this name already
existing = self.loaded_tool_modules_.get(name)
if existing:
return existing
# check the extra path as well as any paths outside
# of the b2 package and import the module if it exists
b2_path = os.path.normpath(b2.__path__[0])
# normalize the pathing in the BOOST_BUILD_PATH.
# this allows for using startswith() to determine
# if a path is a subdirectory of the b2 root_path
paths = [os.path.normpath(p) for p in self.manager.boost_build_path()]
# remove all paths that start with b2's root_path
paths = [p for p in paths if not p.startswith(b2_path)]
# add any extra paths
paths.extend(extra_path)
try:
# find_module is used so that the pyc's can be used.
# an ImportError is raised if not found
f, location, description = imp.find_module(name, paths)
except ImportError:
# if the module is not found in the b2 package,
# this error will be handled later
pass
else:
# we've found the module, now let's try loading it.
# it's possible that the module itself contains an ImportError
# which is why we're loading it in this else clause so that the
# proper error message is shown to the end user.
# TODO: does this module name really need to be mangled like this?
mname = name + "__for_jamfile"
self.loaded_tool_module_path_[mname] = location
module = imp.load_module(mname, f, location, description)
self.loaded_tool_modules_[name] = module
return module
# the cache is created here due to possibly importing packages
# that end up calling get_manager() which might fail
if not self.__python_module_cache:
self.__build_python_module_cache()
underscore_name = name.replace('-', '_')
# check to see if the module is within the b2 package
# and already loaded
mname = self.__python_module_cache.get(underscore_name)
if mname in sys.modules:
return sys.modules[mname]
# otherwise, if the module name is within the cache,
# the module exists within the BOOST_BUILD_PATH,
# load it.
elif mname:
# in some cases, self.loaded_tool_module_path_ needs to
# have the path to the file during the import
# (project.initialize() for example),
# so the path needs to be set *before* importing the module.
path = os.path.join(b2.__path__[0], *mname.split('.')[1:])
self.loaded_tool_module_path_[mname] = path
# mname is guaranteed to be importable since it was
# found within the cache
__import__(mname)
module = sys.modules[mname]
self.loaded_tool_modules_[name] = module
return module
self.manager.errors()("Cannot find module '%s'" % name)
|
[
"def",
"load_module",
"(",
"self",
",",
"name",
",",
"extra_path",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"assert",
"is_iterable_typed",
"(",
"extra_path",
",",
"basestring",
")",
"or",
"extra_path",
"is",
"None",
"# See if we loaded module of this name already",
"existing",
"=",
"self",
".",
"loaded_tool_modules_",
".",
"get",
"(",
"name",
")",
"if",
"existing",
":",
"return",
"existing",
"# check the extra path as well as any paths outside",
"# of the b2 package and import the module if it exists",
"b2_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"b2",
".",
"__path__",
"[",
"0",
"]",
")",
"# normalize the pathing in the BOOST_BUILD_PATH.",
"# this allows for using startswith() to determine",
"# if a path is a subdirectory of the b2 root_path",
"paths",
"=",
"[",
"os",
".",
"path",
".",
"normpath",
"(",
"p",
")",
"for",
"p",
"in",
"self",
".",
"manager",
".",
"boost_build_path",
"(",
")",
"]",
"# remove all paths that start with b2's root_path",
"paths",
"=",
"[",
"p",
"for",
"p",
"in",
"paths",
"if",
"not",
"p",
".",
"startswith",
"(",
"b2_path",
")",
"]",
"# add any extra paths",
"paths",
".",
"extend",
"(",
"extra_path",
")",
"try",
":",
"# find_module is used so that the pyc's can be used.",
"# an ImportError is raised if not found",
"f",
",",
"location",
",",
"description",
"=",
"imp",
".",
"find_module",
"(",
"name",
",",
"paths",
")",
"except",
"ImportError",
":",
"# if the module is not found in the b2 package,",
"# this error will be handled later",
"pass",
"else",
":",
"# we've found the module, now let's try loading it.",
"# it's possible that the module itself contains an ImportError",
"# which is why we're loading it in this else clause so that the",
"# proper error message is shown to the end user.",
"# TODO: does this module name really need to be mangled like this?",
"mname",
"=",
"name",
"+",
"\"__for_jamfile\"",
"self",
".",
"loaded_tool_module_path_",
"[",
"mname",
"]",
"=",
"location",
"module",
"=",
"imp",
".",
"load_module",
"(",
"mname",
",",
"f",
",",
"location",
",",
"description",
")",
"self",
".",
"loaded_tool_modules_",
"[",
"name",
"]",
"=",
"module",
"return",
"module",
"# the cache is created here due to possibly importing packages",
"# that end up calling get_manager() which might fail",
"if",
"not",
"self",
".",
"__python_module_cache",
":",
"self",
".",
"__build_python_module_cache",
"(",
")",
"underscore_name",
"=",
"name",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"# check to see if the module is within the b2 package",
"# and already loaded",
"mname",
"=",
"self",
".",
"__python_module_cache",
".",
"get",
"(",
"underscore_name",
")",
"if",
"mname",
"in",
"sys",
".",
"modules",
":",
"return",
"sys",
".",
"modules",
"[",
"mname",
"]",
"# otherwise, if the module name is within the cache,",
"# the module exists within the BOOST_BUILD_PATH,",
"# load it.",
"elif",
"mname",
":",
"# in some cases, self.loaded_tool_module_path_ needs to",
"# have the path to the file during the import",
"# (project.initialize() for example),",
"# so the path needs to be set *before* importing the module.",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"b2",
".",
"__path__",
"[",
"0",
"]",
",",
"*",
"mname",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
":",
"]",
")",
"self",
".",
"loaded_tool_module_path_",
"[",
"mname",
"]",
"=",
"path",
"# mname is guaranteed to be importable since it was",
"# found within the cache",
"__import__",
"(",
"mname",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"mname",
"]",
"self",
".",
"loaded_tool_modules_",
"[",
"name",
"]",
"=",
"module",
"return",
"module",
"self",
".",
"manager",
".",
"errors",
"(",
")",
"(",
"\"Cannot find module '%s'\"",
"%",
"name",
")"
] |
https://github.com/stan-dev/math/blob/5fd79f89933269a4ca4d8dd1fde2a36d53d4768c/lib/boost_1.75.0/tools/build/src/build/project.py#L726-L806
|
||
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/gsutil/gslib/util.py
|
python
|
HaveFileUrls
|
(args_to_check)
|
return False
|
Checks whether args_to_check contain any file URLs.
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any file URLs.
|
Checks whether args_to_check contain any file URLs.
|
[
"Checks",
"whether",
"args_to_check",
"contain",
"any",
"file",
"URLs",
"."
] |
def HaveFileUrls(args_to_check):
"""Checks whether args_to_check contain any file URLs.
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any file URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsFileUrl():
return True
return False
|
[
"def",
"HaveFileUrls",
"(",
"args_to_check",
")",
":",
"for",
"url_str",
"in",
"args_to_check",
":",
"storage_url",
"=",
"StorageUrlFromString",
"(",
"url_str",
")",
"if",
"storage_url",
".",
"IsFileUrl",
"(",
")",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/gslib/util.py#L926-L939
|
|
CRYTEK/CRYENGINE
|
232227c59a220cbbd311576f0fbeba7bb53b2a8c
|
Editor/Python/windows/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py
|
python
|
Timeout.get_connect_duration
|
(self)
|
return current_time() - self._start_connect
|
Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
|
Gets the time elapsed since the call to :meth:`start_connect`.
|
[
"Gets",
"the",
"time",
"elapsed",
"since",
"the",
"call",
"to",
":",
"meth",
":",
"start_connect",
"."
] |
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
|
[
"def",
"get_connect_duration",
"(",
"self",
")",
":",
"if",
"self",
".",
"_start_connect",
"is",
"None",
":",
"raise",
"TimeoutStateError",
"(",
"\"Can't get connect duration for timer \"",
"\"that has not started.\"",
")",
"return",
"current_time",
"(",
")",
"-",
"self",
".",
"_start_connect"
] |
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py#L180-L191
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py
|
python
|
AbstractFileSystem.du
|
(self, path, total=True, maxdepth=None, **kwargs)
|
Space used by files within a path
Parameters
----------
path: str
total: bool
whether to sum all the file sizes
maxdepth: int or None
maximum number of directory levels to descend, None for unlimited.
kwargs: passed to ``ls``
Returns
-------
Dict of {fn: size} if total=False, or int otherwise, where numbers
refer to bytes used.
|
Space used by files within a path
|
[
"Space",
"used",
"by",
"files",
"within",
"a",
"path"
] |
def du(self, path, total=True, maxdepth=None, **kwargs):
"""Space used by files within a path
Parameters
----------
path: str
total: bool
whether to sum all the file sizes
maxdepth: int or None
maximum number of directory levels to descend, None for unlimited.
kwargs: passed to ``ls``
Returns
-------
Dict of {fn: size} if total=False, or int otherwise, where numbers
refer to bytes used.
"""
sizes = {}
for f in self.find(path, maxdepth=maxdepth, **kwargs):
info = self.info(f)
sizes[info["name"]] = info["size"]
if total:
return sum(sizes.values())
else:
return sizes
|
[
"def",
"du",
"(",
"self",
",",
"path",
",",
"total",
"=",
"True",
",",
"maxdepth",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"sizes",
"=",
"{",
"}",
"for",
"f",
"in",
"self",
".",
"find",
"(",
"path",
",",
"maxdepth",
"=",
"maxdepth",
",",
"*",
"*",
"kwargs",
")",
":",
"info",
"=",
"self",
".",
"info",
"(",
"f",
")",
"sizes",
"[",
"info",
"[",
"\"name\"",
"]",
"]",
"=",
"info",
"[",
"\"size\"",
"]",
"if",
"total",
":",
"return",
"sum",
"(",
"sizes",
".",
"values",
"(",
")",
")",
"else",
":",
"return",
"sizes"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py#L408-L432
|
||
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/cookielib.py
|
python
|
CookiePolicy.return_ok
|
(self, cookie, request)
|
Return true if (and only if) cookie should be returned to server.
|
Return true if (and only if) cookie should be returned to server.
|
[
"Return",
"true",
"if",
"(",
"and",
"only",
"if",
")",
"cookie",
"should",
"be",
"returned",
"to",
"server",
"."
] |
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
|
[
"def",
"return_ok",
"(",
"self",
",",
"cookie",
",",
"request",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/cookielib.py#L823-L825
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/AWSPythonSDK/1.5.8/botocore/signers.py
|
python
|
RequestSigner.get_auth_instance
|
(self, signing_name, region_name,
signature_version=None, **kwargs)
|
return auth
|
Get an auth instance which can be used to sign a request
using the given signature version.
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:rtype: :py:class:`~botocore.auth.BaseSigner`
:return: Auth instance to sign a request.
|
Get an auth instance which can be used to sign a request
using the given signature version.
|
[
"Get",
"an",
"auth",
"instance",
"which",
"can",
"be",
"used",
"to",
"sign",
"a",
"request",
"using",
"the",
"given",
"signature",
"version",
"."
] |
def get_auth_instance(self, signing_name, region_name,
signature_version=None, **kwargs):
"""
Get an auth instance which can be used to sign a request
using the given signature version.
:type signing_name: string
:param signing_name: Service signing name. This is usually the
same as the service name, but can differ. E.g.
``emr`` vs. ``elasticmapreduce``.
:type region_name: string
:param region_name: Name of the service region, e.g. ``us-east-1``
:type signature_version: string
:param signature_version: Signature name like ``v4``.
:rtype: :py:class:`~botocore.auth.BaseSigner`
:return: Auth instance to sign a request.
"""
if signature_version is None:
signature_version = self._signature_version
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
if cls is None:
raise UnknownSignatureVersionError(
signature_version=signature_version)
# If there's no credentials provided (i.e credentials is None),
# then we'll pass a value of "None" over to the auth classes,
# which already handle the cases where no credentials have
# been provided.
frozen_credentials = None
if self._credentials is not None:
frozen_credentials = self._credentials.get_frozen_credentials()
kwargs['credentials'] = frozen_credentials
if cls.REQUIRES_REGION:
if self._region_name is None:
raise botocore.exceptions.NoRegionError()
kwargs['region_name'] = region_name
kwargs['service_name'] = signing_name
auth = cls(**kwargs)
return auth
|
[
"def",
"get_auth_instance",
"(",
"self",
",",
"signing_name",
",",
"region_name",
",",
"signature_version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"signature_version",
"is",
"None",
":",
"signature_version",
"=",
"self",
".",
"_signature_version",
"cls",
"=",
"botocore",
".",
"auth",
".",
"AUTH_TYPE_MAPS",
".",
"get",
"(",
"signature_version",
")",
"if",
"cls",
"is",
"None",
":",
"raise",
"UnknownSignatureVersionError",
"(",
"signature_version",
"=",
"signature_version",
")",
"# If there's no credentials provided (i.e credentials is None),",
"# then we'll pass a value of \"None\" over to the auth classes,",
"# which already handle the cases where no credentials have",
"# been provided.",
"frozen_credentials",
"=",
"None",
"if",
"self",
".",
"_credentials",
"is",
"not",
"None",
":",
"frozen_credentials",
"=",
"self",
".",
"_credentials",
".",
"get_frozen_credentials",
"(",
")",
"kwargs",
"[",
"'credentials'",
"]",
"=",
"frozen_credentials",
"if",
"cls",
".",
"REQUIRES_REGION",
":",
"if",
"self",
".",
"_region_name",
"is",
"None",
":",
"raise",
"botocore",
".",
"exceptions",
".",
"NoRegionError",
"(",
")",
"kwargs",
"[",
"'region_name'",
"]",
"=",
"region_name",
"kwargs",
"[",
"'service_name'",
"]",
"=",
"signing_name",
"auth",
"=",
"cls",
"(",
"*",
"*",
"kwargs",
")",
"return",
"auth"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/signers.py#L192-L233
|
|
crankyoldgit/IRremoteESP8266
|
6bc095af80e5aec47d66f8c6263f3a943ea3b4d5
|
tools/auto_analyse_raw_data.py
|
python
|
RawIRMessage.is_bit_mark
|
(self, usec)
|
return self._usec_compare(usec, self.bit_mark)
|
Is usec the bit mark?
|
Is usec the bit mark?
|
[
"Is",
"usec",
"the",
"bit",
"mark?"
] |
def is_bit_mark(self, usec):
"""Is usec the bit mark?"""
return self._usec_compare(usec, self.bit_mark)
|
[
"def",
"is_bit_mark",
"(",
"self",
",",
"usec",
")",
":",
"return",
"self",
".",
"_usec_compare",
"(",
"usec",
",",
"self",
".",
"bit_mark",
")"
] |
https://github.com/crankyoldgit/IRremoteESP8266/blob/6bc095af80e5aec47d66f8c6263f3a943ea3b4d5/tools/auto_analyse_raw_data.py#L263-L265
|
|
nci/drishti
|
89cd8b740239c5b2c8222dffd4e27432fde170a1
|
bin/assets/scripts/unet++/unet_collection/losses.py
|
python
|
iou_seg
|
(y_true, y_pred, dtype=tf.float32)
|
return 1-tf.math.divide_no_nan(area_intersect, area_union)
|
Inersection over Union (IoU) loss for segmentation maps.
iou_seg(y_true, y_pred, dtype=tf.float32)
----------
Rahman, M.A. and Wang, Y., 2016, December. Optimizing intersection-over-union in deep neural networks for
image segmentation. In International symposium on visual computing (pp. 234-244). Springer, Cham.
----------
Input
y_true: segmentation targets, c.f. `keras.losses.categorical_crossentropy`
y_pred: segmentation predictions.
dtype: the data type of input tensors.
Default is tf.float32.
|
Inersection over Union (IoU) loss for segmentation maps.
iou_seg(y_true, y_pred, dtype=tf.float32)
----------
Rahman, M.A. and Wang, Y., 2016, December. Optimizing intersection-over-union in deep neural networks for
image segmentation. In International symposium on visual computing (pp. 234-244). Springer, Cham.
----------
Input
y_true: segmentation targets, c.f. `keras.losses.categorical_crossentropy`
y_pred: segmentation predictions.
dtype: the data type of input tensors.
Default is tf.float32.
|
[
"Inersection",
"over",
"Union",
"(",
"IoU",
")",
"loss",
"for",
"segmentation",
"maps",
".",
"iou_seg",
"(",
"y_true",
"y_pred",
"dtype",
"=",
"tf",
".",
"float32",
")",
"----------",
"Rahman",
"M",
".",
"A",
".",
"and",
"Wang",
"Y",
".",
"2016",
"December",
".",
"Optimizing",
"intersection",
"-",
"over",
"-",
"union",
"in",
"deep",
"neural",
"networks",
"for",
"image",
"segmentation",
".",
"In",
"International",
"symposium",
"on",
"visual",
"computing",
"(",
"pp",
".",
"234",
"-",
"244",
")",
".",
"Springer",
"Cham",
".",
"----------",
"Input",
"y_true",
":",
"segmentation",
"targets",
"c",
".",
"f",
".",
"keras",
".",
"losses",
".",
"categorical_crossentropy",
"y_pred",
":",
"segmentation",
"predictions",
".",
"dtype",
":",
"the",
"data",
"type",
"of",
"input",
"tensors",
".",
"Default",
"is",
"tf",
".",
"float32",
"."
] |
def iou_seg(y_true, y_pred, dtype=tf.float32):
"""
Inersection over Union (IoU) loss for segmentation maps.
iou_seg(y_true, y_pred, dtype=tf.float32)
----------
Rahman, M.A. and Wang, Y., 2016, December. Optimizing intersection-over-union in deep neural networks for
image segmentation. In International symposium on visual computing (pp. 234-244). Springer, Cham.
----------
Input
y_true: segmentation targets, c.f. `keras.losses.categorical_crossentropy`
y_pred: segmentation predictions.
dtype: the data type of input tensors.
Default is tf.float32.
"""
# tf tensor casting
y_pred = tf.convert_to_tensor(y_pred)
y_pred = tf.cast(y_pred, dtype)
y_true = tf.cast(y_true, y_pred.dtype)
y_pred = tf.squeeze(y_pred)
y_true = tf.squeeze(y_true)
y_true_pos = tf.reshape(y_true, [-1])
y_pred_pos = tf.reshape(y_pred, [-1])
area_intersect = tf.reduce_sum(tf.multiply(y_true_pos, y_pred_pos))
area_true = tf.reduce_sum(y_true_pos)
area_pred = tf.reduce_sum(y_pred_pos)
area_union = area_true + area_pred - area_intersect
return 1-tf.math.divide_no_nan(area_intersect, area_union)
|
[
"def",
"iou_seg",
"(",
"y_true",
",",
"y_pred",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"# tf tensor casting",
"y_pred",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"y_pred",
")",
"y_pred",
"=",
"tf",
".",
"cast",
"(",
"y_pred",
",",
"dtype",
")",
"y_true",
"=",
"tf",
".",
"cast",
"(",
"y_true",
",",
"y_pred",
".",
"dtype",
")",
"y_pred",
"=",
"tf",
".",
"squeeze",
"(",
"y_pred",
")",
"y_true",
"=",
"tf",
".",
"squeeze",
"(",
"y_true",
")",
"y_true_pos",
"=",
"tf",
".",
"reshape",
"(",
"y_true",
",",
"[",
"-",
"1",
"]",
")",
"y_pred_pos",
"=",
"tf",
".",
"reshape",
"(",
"y_pred",
",",
"[",
"-",
"1",
"]",
")",
"area_intersect",
"=",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"multiply",
"(",
"y_true_pos",
",",
"y_pred_pos",
")",
")",
"area_true",
"=",
"tf",
".",
"reduce_sum",
"(",
"y_true_pos",
")",
"area_pred",
"=",
"tf",
".",
"reduce_sum",
"(",
"y_pred_pos",
")",
"area_union",
"=",
"area_true",
"+",
"area_pred",
"-",
"area_intersect",
"return",
"1",
"-",
"tf",
".",
"math",
".",
"divide_no_nan",
"(",
"area_intersect",
",",
"area_union",
")"
] |
https://github.com/nci/drishti/blob/89cd8b740239c5b2c8222dffd4e27432fde170a1/bin/assets/scripts/unet++/unet_collection/losses.py#L388-L425
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_controls.py
|
python
|
ListCtrl.InsertStringItem
|
(*args, **kwargs)
|
return _controls_.ListCtrl_InsertStringItem(*args, **kwargs)
|
InsertStringItem(self, long index, String label, int imageIndex=-1) -> long
|
InsertStringItem(self, long index, String label, int imageIndex=-1) -> long
|
[
"InsertStringItem",
"(",
"self",
"long",
"index",
"String",
"label",
"int",
"imageIndex",
"=",
"-",
"1",
")",
"-",
">",
"long"
] |
def InsertStringItem(*args, **kwargs):
"""InsertStringItem(self, long index, String label, int imageIndex=-1) -> long"""
return _controls_.ListCtrl_InsertStringItem(*args, **kwargs)
|
[
"def",
"InsertStringItem",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ListCtrl_InsertStringItem",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L4711-L4713
|
|
tkn-tub/ns3-gym
|
19bfe0a583e641142609939a090a09dfc63a095f
|
utils.py
|
python
|
get_list_from_file
|
(file_path, list_name)
|
return list
|
Looks for a Python list called list_name in the file specified
by file_path and returns it.
If the file or list name aren't found, this function will return
an empty list.
|
Looks for a Python list called list_name in the file specified
by file_path and returns it.
|
[
"Looks",
"for",
"a",
"Python",
"list",
"called",
"list_name",
"in",
"the",
"file",
"specified",
"by",
"file_path",
"and",
"returns",
"it",
"."
] |
def get_list_from_file(file_path, list_name):
'''Looks for a Python list called list_name in the file specified
by file_path and returns it.
If the file or list name aren't found, this function will return
an empty list.
'''
list = []
# Read in the file if it exists.
if os.path.exists(file_path):
file_in = open(file_path, "r")
# Look for the list.
list_string = ""
parsing_multiline_list = False
for line in file_in:
# Remove any comments.
if '#' in line:
(line, comment) = line.split('#', 1)
# Parse the line.
if list_name in line or parsing_multiline_list:
list_string += line
# Handle multiline lists.
if ']' not in list_string:
parsing_multiline_list = True
else:
# Evaluate the list once its end is reached.
# Make the split function only split it once.
list = eval(list_string.split('=', 1)[1].strip())
break
# Close the file
file_in.close()
return list
|
[
"def",
"get_list_from_file",
"(",
"file_path",
",",
"list_name",
")",
":",
"list",
"=",
"[",
"]",
"# Read in the file if it exists.",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"file_in",
"=",
"open",
"(",
"file_path",
",",
"\"r\"",
")",
"# Look for the list.",
"list_string",
"=",
"\"\"",
"parsing_multiline_list",
"=",
"False",
"for",
"line",
"in",
"file_in",
":",
"# Remove any comments.",
"if",
"'#'",
"in",
"line",
":",
"(",
"line",
",",
"comment",
")",
"=",
"line",
".",
"split",
"(",
"'#'",
",",
"1",
")",
"# Parse the line.",
"if",
"list_name",
"in",
"line",
"or",
"parsing_multiline_list",
":",
"list_string",
"+=",
"line",
"# Handle multiline lists.",
"if",
"']'",
"not",
"in",
"list_string",
":",
"parsing_multiline_list",
"=",
"True",
"else",
":",
"# Evaluate the list once its end is reached.",
"# Make the split function only split it once.",
"list",
"=",
"eval",
"(",
"list_string",
".",
"split",
"(",
"'='",
",",
"1",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"break",
"# Close the file",
"file_in",
".",
"close",
"(",
")",
"return",
"list"
] |
https://github.com/tkn-tub/ns3-gym/blob/19bfe0a583e641142609939a090a09dfc63a095f/utils.py#L10-L50
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/urllib3/util/retry.py
|
python
|
Retry.from_int
|
(cls, retries, redirect=True, default=None)
|
return new_retries
|
Backwards-compatibility for the old retries format.
|
Backwards-compatibility for the old retries format.
|
[
"Backwards",
"-",
"compatibility",
"for",
"the",
"old",
"retries",
"format",
"."
] |
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
|
[
"def",
"from_int",
"(",
"cls",
",",
"retries",
",",
"redirect",
"=",
"True",
",",
"default",
"=",
"None",
")",
":",
"if",
"retries",
"is",
"None",
":",
"retries",
"=",
"default",
"if",
"default",
"is",
"not",
"None",
"else",
"cls",
".",
"DEFAULT",
"if",
"isinstance",
"(",
"retries",
",",
"Retry",
")",
":",
"return",
"retries",
"redirect",
"=",
"bool",
"(",
"redirect",
")",
"and",
"None",
"new_retries",
"=",
"cls",
"(",
"retries",
",",
"redirect",
"=",
"redirect",
")",
"log",
".",
"debug",
"(",
"\"Converted retries value: %r -> %r\"",
",",
"retries",
",",
"new_retries",
")",
"return",
"new_retries"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/urllib3/util/retry.py#L219-L230
|
|
hughperkins/tf-coriander
|
970d3df6c11400ad68405f22b0c42a52374e94ca
|
tensorflow/contrib/metrics/python/ops/metric_ops.py
|
python
|
streaming_percentage_less
|
(values, threshold, ignore_mask=None, weights=None,
metrics_collections=None,
updates_collections=None,
name=None)
|
return streaming_mean(is_below_threshold, _mask_weights(ignore_mask, weights),
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
|
Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Alternatively, if `ignore_mask` is not `None`, then mask values where
`ignore_mask` is `True`.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
ignore_mask: An optional, `bool` `Tensor` whose shape matches `values`.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A tensor representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `ignore_mask` is not `None` and its shape doesn't match
`values`, or if `weights` is not `None` and its shape doesn't match
`values`, or if either `metrics_collections` or `updates_collections` are
not a list or tuple.
|
Computes the percentage of values less than the given threshold.
|
[
"Computes",
"the",
"percentage",
"of",
"values",
"less",
"than",
"the",
"given",
"threshold",
"."
] |
def streaming_percentage_less(values, threshold, ignore_mask=None, weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Alternatively, if `ignore_mask` is not `None`, then mask values where
`ignore_mask` is `True`.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
ignore_mask: An optional, `bool` `Tensor` whose shape matches `values`.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A tensor representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `ignore_mask` is not `None` and its shape doesn't match
`values`, or if `weights` is not `None` and its shape doesn't match
`values`, or if either `metrics_collections` or `updates_collections` are
not a list or tuple.
"""
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return streaming_mean(is_below_threshold, _mask_weights(ignore_mask, weights),
metrics_collections,
updates_collections,
name or 'percentage_below_threshold')
|
[
"def",
"streaming_percentage_less",
"(",
"values",
",",
"threshold",
",",
"ignore_mask",
"=",
"None",
",",
"weights",
"=",
"None",
",",
"metrics_collections",
"=",
"None",
",",
"updates_collections",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"is_below_threshold",
"=",
"math_ops",
".",
"to_float",
"(",
"math_ops",
".",
"less",
"(",
"values",
",",
"threshold",
")",
")",
"return",
"streaming_mean",
"(",
"is_below_threshold",
",",
"_mask_weights",
"(",
"ignore_mask",
",",
"weights",
")",
",",
"metrics_collections",
",",
"updates_collections",
",",
"name",
"or",
"'percentage_below_threshold'",
")"
] |
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/metrics/python/ops/metric_ops.py#L2584-L2631
|
|
apache/impala
|
8ddac48f3428c86f2cbd037ced89cfb903298b12
|
shell/ext-py/prettytable-0.7.2/prettytable.py
|
python
|
PrettyTable._get_sort_key
|
(self)
|
return self._sort_key
|
Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted
|
Sorting key function, applied to data points before sorting
|
[
"Sorting",
"key",
"function",
"applied",
"to",
"data",
"points",
"before",
"sorting"
] |
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted"""
return self._sort_key
|
[
"def",
"_get_sort_key",
"(",
"self",
")",
":",
"return",
"self",
".",
"_sort_key"
] |
https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/shell/ext-py/prettytable-0.7.2/prettytable.py#L521-L527
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
samples/pydocview/FindService.py
|
python
|
FindService.GetLineNumber
|
(self, parent)
|
return line
|
Display Goto Line Number dialog box
|
Display Goto Line Number dialog box
|
[
"Display",
"Goto",
"Line",
"Number",
"dialog",
"box"
] |
def GetLineNumber(self, parent):
""" Display Goto Line Number dialog box """
line = -1
dialog = wx.TextEntryDialog(parent, _("Enter line number to go to:"), _("Go to Line"))
dialog.CenterOnParent()
if dialog.ShowModal() == wx.ID_OK:
try:
line = int(dialog.GetValue())
if line > 65535:
line = 65535
except:
pass
dialog.Destroy()
# This one is ugly: wx.GetNumberFromUser("", _("Enter line number to go to:"), _("Go to Line"), 1, min = 1, max = 65535, parent = parent)
return line
|
[
"def",
"GetLineNumber",
"(",
"self",
",",
"parent",
")",
":",
"line",
"=",
"-",
"1",
"dialog",
"=",
"wx",
".",
"TextEntryDialog",
"(",
"parent",
",",
"_",
"(",
"\"Enter line number to go to:\"",
")",
",",
"_",
"(",
"\"Go to Line\"",
")",
")",
"dialog",
".",
"CenterOnParent",
"(",
")",
"if",
"dialog",
".",
"ShowModal",
"(",
")",
"==",
"wx",
".",
"ID_OK",
":",
"try",
":",
"line",
"=",
"int",
"(",
"dialog",
".",
"GetValue",
"(",
")",
")",
"if",
"line",
">",
"65535",
":",
"line",
"=",
"65535",
"except",
":",
"pass",
"dialog",
".",
"Destroy",
"(",
")",
"# This one is ugly: wx.GetNumberFromUser(\"\", _(\"Enter line number to go to:\"), _(\"Go to Line\"), 1, min = 1, max = 65535, parent = parent)",
"return",
"line"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/pydocview/FindService.py#L153-L167
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/gsutil/gslib/gcs_json_api.py
|
python
|
GcsJsonApi.UploadObject
|
(self, upload_stream, object_metadata, canned_acl=None,
size=None, preconditions=None, progress_callback=None,
provider=None, fields=None)
|
return self._UploadObject(
upload_stream, object_metadata, canned_acl=canned_acl,
size=size, preconditions=preconditions,
progress_callback=progress_callback, fields=fields,
apitools_strategy=apitools_transfer.SIMPLE_UPLOAD)
|
See CloudApi class for function doc strings.
|
See CloudApi class for function doc strings.
|
[
"See",
"CloudApi",
"class",
"for",
"function",
"doc",
"strings",
"."
] |
def UploadObject(self, upload_stream, object_metadata, canned_acl=None,
size=None, preconditions=None, progress_callback=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
return self._UploadObject(
upload_stream, object_metadata, canned_acl=canned_acl,
size=size, preconditions=preconditions,
progress_callback=progress_callback, fields=fields,
apitools_strategy=apitools_transfer.SIMPLE_UPLOAD)
|
[
"def",
"UploadObject",
"(",
"self",
",",
"upload_stream",
",",
"object_metadata",
",",
"canned_acl",
"=",
"None",
",",
"size",
"=",
"None",
",",
"preconditions",
"=",
"None",
",",
"progress_callback",
"=",
"None",
",",
"provider",
"=",
"None",
",",
"fields",
"=",
"None",
")",
":",
"return",
"self",
".",
"_UploadObject",
"(",
"upload_stream",
",",
"object_metadata",
",",
"canned_acl",
"=",
"canned_acl",
",",
"size",
"=",
"size",
",",
"preconditions",
"=",
"preconditions",
",",
"progress_callback",
"=",
"progress_callback",
",",
"fields",
"=",
"fields",
",",
"apitools_strategy",
"=",
"apitools_transfer",
".",
"SIMPLE_UPLOAD",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/gslib/gcs_json_api.py#L1028-L1036
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/RNN/rnn_quantizer/pytorch_binding/pytorch_nndct/nn/modules/prim_ops.py
|
python
|
deephi_ChannelScale.forward
|
(self, input:torch.Tensor, channel_scale:Union[torch.Tensor, Sequence[Any], float])
|
return output
|
if self.node.in_quant_part:
channel_scale = quant_channel_scale_params(self.node, channel_scale)
|
if self.node.in_quant_part:
channel_scale = quant_channel_scale_params(self.node, channel_scale)
|
[
"if",
"self",
".",
"node",
".",
"in_quant_part",
":",
"channel_scale",
"=",
"quant_channel_scale_params",
"(",
"self",
".",
"node",
"channel_scale",
")"
] |
def forward(self, input:torch.Tensor, channel_scale:Union[torch.Tensor, Sequence[Any], float]):
[input], _ = process_inputs_and_params(
self.node,
self.quantizer,
inputs=[input],
)
if isinstance(channel_scale, (list, tuple)):
channel_scale = torch.Tensor(channel_scale).to(input.device)
elif isinstance(channel_scale, float):
channel_scale = torch.Tensor([channel_scale]).to(input.device)
'''
if self.node.in_quant_part:
channel_scale = quant_channel_scale_params(self.node, channel_scale)
'''
output = input * channel_scale
if self.node.in_quant_part:
[output] = post_quant_process(self.node, [output])
return output
|
[
"def",
"forward",
"(",
"self",
",",
"input",
":",
"torch",
".",
"Tensor",
",",
"channel_scale",
":",
"Union",
"[",
"torch",
".",
"Tensor",
",",
"Sequence",
"[",
"Any",
"]",
",",
"float",
"]",
")",
":",
"[",
"input",
"]",
",",
"_",
"=",
"process_inputs_and_params",
"(",
"self",
".",
"node",
",",
"self",
".",
"quantizer",
",",
"inputs",
"=",
"[",
"input",
"]",
",",
")",
"if",
"isinstance",
"(",
"channel_scale",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"channel_scale",
"=",
"torch",
".",
"Tensor",
"(",
"channel_scale",
")",
".",
"to",
"(",
"input",
".",
"device",
")",
"elif",
"isinstance",
"(",
"channel_scale",
",",
"float",
")",
":",
"channel_scale",
"=",
"torch",
".",
"Tensor",
"(",
"[",
"channel_scale",
"]",
")",
".",
"to",
"(",
"input",
".",
"device",
")",
"output",
"=",
"input",
"*",
"channel_scale",
"if",
"self",
".",
"node",
".",
"in_quant_part",
":",
"[",
"output",
"]",
"=",
"post_quant_process",
"(",
"self",
".",
"node",
",",
"[",
"output",
"]",
")",
"return",
"output"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/RNN/rnn_quantizer/pytorch_binding/pytorch_nndct/nn/modules/prim_ops.py#L262-L282
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/core/fromnumeric.py
|
python
|
swapaxes
|
(a, axis1, axis2)
|
return _wrapfunc(a, 'swapaxes', axis1, axis2)
|
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
|
Interchange two axes of an array.
|
[
"Interchange",
"two",
"axes",
"of",
"an",
"array",
"."
] |
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
|
[
"def",
"swapaxes",
"(",
"a",
",",
"axis1",
",",
"axis2",
")",
":",
"return",
"_wrapfunc",
"(",
"a",
",",
"'swapaxes'",
",",
"axis1",
",",
"axis2",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/core/fromnumeric.py#L554-L597
|
|
cms-sw/cmssw
|
fd9de012d503d3405420bcbeec0ec879baa57cf2
|
DPGAnalysis/HcalTools/scripts/cmt/das_client.py
|
python
|
unique_filter
|
(rows)
|
Unique filter drop duplicate rows.
|
Unique filter drop duplicate rows.
|
[
"Unique",
"filter",
"drop",
"duplicate",
"rows",
"."
] |
def unique_filter(rows):
"""
Unique filter drop duplicate rows.
"""
old_row = {}
row = None
for row in rows:
row_data = dict(row)
try:
del row_data['_id']
del row_data['das']
del row_data['das_id']
del row_data['cache_id']
except:
pass
old_data = dict(old_row)
try:
del old_data['_id']
del old_data['das']
del old_data['das_id']
del old_data['cache_id']
except:
pass
if row_data == old_data:
continue
if old_row:
yield old_row
old_row = row
yield row
|
[
"def",
"unique_filter",
"(",
"rows",
")",
":",
"old_row",
"=",
"{",
"}",
"row",
"=",
"None",
"for",
"row",
"in",
"rows",
":",
"row_data",
"=",
"dict",
"(",
"row",
")",
"try",
":",
"del",
"row_data",
"[",
"'_id'",
"]",
"del",
"row_data",
"[",
"'das'",
"]",
"del",
"row_data",
"[",
"'das_id'",
"]",
"del",
"row_data",
"[",
"'cache_id'",
"]",
"except",
":",
"pass",
"old_data",
"=",
"dict",
"(",
"old_row",
")",
"try",
":",
"del",
"old_data",
"[",
"'_id'",
"]",
"del",
"old_data",
"[",
"'das'",
"]",
"del",
"old_data",
"[",
"'das_id'",
"]",
"del",
"old_data",
"[",
"'cache_id'",
"]",
"except",
":",
"pass",
"if",
"row_data",
"==",
"old_data",
":",
"continue",
"if",
"old_row",
":",
"yield",
"old_row",
"old_row",
"=",
"row",
"yield",
"row"
] |
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/DPGAnalysis/HcalTools/scripts/cmt/das_client.py#L203-L231
|
||
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/python/ops/sparse_ops.py
|
python
|
_take_many_sparse_from_tensors_map
|
(
sparse_map_op, sparse_handles, rank=None, name=None)
|
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
|
Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
|
Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
|
[
"Read",
"SparseTensors",
"from",
"a",
"SparseTensorsMap",
"and",
"concatenate",
"them",
"."
] |
def _take_many_sparse_from_tensors_map(
sparse_map_op, sparse_handles, rank=None, name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError("sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." %
sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops._take_many_sparse_from_tensors_map(
sparse_handles, dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
|
[
"def",
"_take_many_sparse_from_tensors_map",
"(",
"sparse_map_op",
",",
"sparse_handles",
",",
"rank",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"sparse_map_op",
",",
"ops",
".",
"Operation",
")",
":",
"raise",
"TypeError",
"(",
"\"sparse_map_op be an Operation\"",
")",
"if",
"sparse_map_op",
".",
"type",
"not",
"in",
"(",
"\"AddSparseToTensorsMap\"",
",",
"\"AddManySparseToTensorsMap\"",
")",
":",
"raise",
"TypeError",
"(",
"\"sparse_map_op must be one of AddSparseToTensorsMap or \"",
"\"AddSparseToTensorsMap. Instead, found `%s`.\"",
"%",
"sparse_map_op",
".",
"type",
")",
"with",
"ops",
".",
"colocate_with",
"(",
"sparse_map_op",
")",
":",
"shared_name",
"=",
"sparse_map_op",
".",
"get_attr",
"(",
"\"shared_name\"",
")",
"or",
"sparse_map_op",
".",
"name",
"output_indices",
",",
"output_values",
",",
"output_shape",
"=",
"(",
"gen_sparse_ops",
".",
"_take_many_sparse_from_tensors_map",
"(",
"sparse_handles",
",",
"dtype",
"=",
"sparse_map_op",
".",
"get_attr",
"(",
"\"T\"",
")",
",",
"container",
"=",
"sparse_map_op",
".",
"get_attr",
"(",
"\"container\"",
")",
",",
"shared_name",
"=",
"shared_name",
",",
"name",
"=",
"name",
")",
")",
"# Feed rank data back in, if available",
"output_indices",
".",
"set_shape",
"(",
"[",
"None",
",",
"rank",
"]",
")",
"output_shape",
".",
"set_shape",
"(",
"[",
"rank",
"]",
")",
"return",
"sparse_tensor",
".",
"SparseTensor",
"(",
"output_indices",
",",
"output_values",
",",
"output_shape",
")"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/sparse_ops.py#L1955-L2034
|
|
etodd/lasercrabs
|
91484d9ac3a47ac38b8f40ec3ff35194714dad8e
|
assets/script/etodd_blender_fbx/export_fbx_bin.py
|
python
|
fbx_data_camera_elements
|
(root, cam_obj, scene_data)
|
Write the Camera data blocks.
|
Write the Camera data blocks.
|
[
"Write",
"the",
"Camera",
"data",
"blocks",
"."
] |
def fbx_data_camera_elements(root, cam_obj, scene_data):
"""
Write the Camera data blocks.
"""
gscale = scene_data.settings.global_scale
cam = cam_obj.bdata
cam_data = cam.data
cam_key = scene_data.data_cameras[cam_obj]
# Real data now, good old camera!
# Object transform info.
loc, rot, scale, matrix, matrix_rot = cam_obj.fbx_object_tx(scene_data)
up = matrix_rot * Vector((0.0, 1.0, 0.0))
to = matrix_rot * Vector((0.0, 0.0, -1.0))
# Render settings.
# TODO We could export much more...
render = scene_data.scene.render
width = render.resolution_x
height = render.resolution_y
aspect = width / height
# Film width & height from mm to inches
filmwidth = convert_mm_to_inch(cam_data.sensor_width)
filmheight = convert_mm_to_inch(cam_data.sensor_height)
filmaspect = filmwidth / filmheight
# Film offset
offsetx = filmwidth * cam_data.shift_x
offsety = filmaspect * filmheight * cam_data.shift_y
cam = elem_data_single_int64(root, b"NodeAttribute", get_fbx_uuid_from_key(cam_key))
cam.add_string(fbx_name_class(cam_data.name.encode(), b"NodeAttribute"))
cam.add_string(b"Camera")
tmpl = elem_props_template_init(scene_data.templates, b"Camera")
props = elem_properties(cam)
elem_props_template_set(tmpl, props, "p_vector", b"Position", loc)
elem_props_template_set(tmpl, props, "p_vector", b"UpVector", up)
elem_props_template_set(tmpl, props, "p_vector", b"InterestPosition", loc + to) # Point, not vector!
# Should we use world value?
elem_props_template_set(tmpl, props, "p_color", b"BackgroundColor", (0.0, 0.0, 0.0))
elem_props_template_set(tmpl, props, "p_bool", b"DisplayTurnTableIcon", True)
elem_props_template_set(tmpl, props, "p_enum", b"AspectRatioMode", 2) # FixedResolution
elem_props_template_set(tmpl, props, "p_double", b"AspectWidth", float(render.resolution_x))
elem_props_template_set(tmpl, props, "p_double", b"AspectHeight", float(render.resolution_y))
elem_props_template_set(tmpl, props, "p_double", b"PixelAspectRatio",
float(render.pixel_aspect_x / render.pixel_aspect_y))
elem_props_template_set(tmpl, props, "p_double", b"FilmWidth", filmwidth)
elem_props_template_set(tmpl, props, "p_double", b"FilmHeight", filmheight)
elem_props_template_set(tmpl, props, "p_double", b"FilmAspectRatio", filmaspect)
elem_props_template_set(tmpl, props, "p_double", b"FilmOffsetX", offsetx)
elem_props_template_set(tmpl, props, "p_double", b"FilmOffsetY", offsety)
elem_props_template_set(tmpl, props, "p_enum", b"ApertureMode", 3) # FocalLength.
elem_props_template_set(tmpl, props, "p_enum", b"GateFit", 2) # FitHorizontal.
elem_props_template_set(tmpl, props, "p_fov", b"FieldOfView", math.degrees(cam_data.angle_x))
elem_props_template_set(tmpl, props, "p_fov_x", b"FieldOfViewX", math.degrees(cam_data.angle_x))
elem_props_template_set(tmpl, props, "p_fov_y", b"FieldOfViewY", math.degrees(cam_data.angle_y))
# No need to convert to inches here...
elem_props_template_set(tmpl, props, "p_double", b"FocalLength", cam_data.lens)
elem_props_template_set(tmpl, props, "p_double", b"SafeAreaAspectRatio", aspect)
# Default to perspective camera.
elem_props_template_set(tmpl, props, "p_enum", b"CameraProjectionType", 1 if cam_data.type == 'ORTHO' else 0)
elem_props_template_set(tmpl, props, "p_double", b"OrthoZoom", cam_data.ortho_scale)
elem_props_template_set(tmpl, props, "p_double", b"NearPlane", cam_data.clip_start * gscale)
elem_props_template_set(tmpl, props, "p_double", b"FarPlane", cam_data.clip_end * gscale)
elem_props_template_set(tmpl, props, "p_enum", b"BackPlaneDistanceMode", 1) # RelativeToCamera.
elem_props_template_set(tmpl, props, "p_double", b"BackPlaneDistance", cam_data.clip_end * gscale)
elem_props_template_finalize(tmpl, props)
# Custom properties.
if scene_data.settings.use_custom_props:
fbx_data_element_custom_properties(props, cam_data)
elem_data_single_string(cam, b"TypeFlags", b"Camera")
elem_data_single_int32(cam, b"GeometryVersion", 124) # Sic...
elem_data_vec_float64(cam, b"Position", loc)
elem_data_vec_float64(cam, b"Up", up)
elem_data_vec_float64(cam, b"LookAt", to)
elem_data_single_int32(cam, b"ShowInfoOnMoving", 1)
elem_data_single_int32(cam, b"ShowAudio", 0)
elem_data_vec_float64(cam, b"AudioColor", (0.0, 1.0, 0.0))
elem_data_single_float64(cam, b"CameraOrthoZoom", 1.0)
|
[
"def",
"fbx_data_camera_elements",
"(",
"root",
",",
"cam_obj",
",",
"scene_data",
")",
":",
"gscale",
"=",
"scene_data",
".",
"settings",
".",
"global_scale",
"cam",
"=",
"cam_obj",
".",
"bdata",
"cam_data",
"=",
"cam",
".",
"data",
"cam_key",
"=",
"scene_data",
".",
"data_cameras",
"[",
"cam_obj",
"]",
"# Real data now, good old camera!",
"# Object transform info.",
"loc",
",",
"rot",
",",
"scale",
",",
"matrix",
",",
"matrix_rot",
"=",
"cam_obj",
".",
"fbx_object_tx",
"(",
"scene_data",
")",
"up",
"=",
"matrix_rot",
"*",
"Vector",
"(",
"(",
"0.0",
",",
"1.0",
",",
"0.0",
")",
")",
"to",
"=",
"matrix_rot",
"*",
"Vector",
"(",
"(",
"0.0",
",",
"0.0",
",",
"-",
"1.0",
")",
")",
"# Render settings.",
"# TODO We could export much more...",
"render",
"=",
"scene_data",
".",
"scene",
".",
"render",
"width",
"=",
"render",
".",
"resolution_x",
"height",
"=",
"render",
".",
"resolution_y",
"aspect",
"=",
"width",
"/",
"height",
"# Film width & height from mm to inches",
"filmwidth",
"=",
"convert_mm_to_inch",
"(",
"cam_data",
".",
"sensor_width",
")",
"filmheight",
"=",
"convert_mm_to_inch",
"(",
"cam_data",
".",
"sensor_height",
")",
"filmaspect",
"=",
"filmwidth",
"/",
"filmheight",
"# Film offset",
"offsetx",
"=",
"filmwidth",
"*",
"cam_data",
".",
"shift_x",
"offsety",
"=",
"filmaspect",
"*",
"filmheight",
"*",
"cam_data",
".",
"shift_y",
"cam",
"=",
"elem_data_single_int64",
"(",
"root",
",",
"b\"NodeAttribute\"",
",",
"get_fbx_uuid_from_key",
"(",
"cam_key",
")",
")",
"cam",
".",
"add_string",
"(",
"fbx_name_class",
"(",
"cam_data",
".",
"name",
".",
"encode",
"(",
")",
",",
"b\"NodeAttribute\"",
")",
")",
"cam",
".",
"add_string",
"(",
"b\"Camera\"",
")",
"tmpl",
"=",
"elem_props_template_init",
"(",
"scene_data",
".",
"templates",
",",
"b\"Camera\"",
")",
"props",
"=",
"elem_properties",
"(",
"cam",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_vector\"",
",",
"b\"Position\"",
",",
"loc",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_vector\"",
",",
"b\"UpVector\"",
",",
"up",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_vector\"",
",",
"b\"InterestPosition\"",
",",
"loc",
"+",
"to",
")",
"# Point, not vector!",
"# Should we use world value?",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_color\"",
",",
"b\"BackgroundColor\"",
",",
"(",
"0.0",
",",
"0.0",
",",
"0.0",
")",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_bool\"",
",",
"b\"DisplayTurnTableIcon\"",
",",
"True",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_enum\"",
",",
"b\"AspectRatioMode\"",
",",
"2",
")",
"# FixedResolution",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"AspectWidth\"",
",",
"float",
"(",
"render",
".",
"resolution_x",
")",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"AspectHeight\"",
",",
"float",
"(",
"render",
".",
"resolution_y",
")",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"PixelAspectRatio\"",
",",
"float",
"(",
"render",
".",
"pixel_aspect_x",
"/",
"render",
".",
"pixel_aspect_y",
")",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FilmWidth\"",
",",
"filmwidth",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FilmHeight\"",
",",
"filmheight",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FilmAspectRatio\"",
",",
"filmaspect",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FilmOffsetX\"",
",",
"offsetx",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FilmOffsetY\"",
",",
"offsety",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_enum\"",
",",
"b\"ApertureMode\"",
",",
"3",
")",
"# FocalLength.",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_enum\"",
",",
"b\"GateFit\"",
",",
"2",
")",
"# FitHorizontal.",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_fov\"",
",",
"b\"FieldOfView\"",
",",
"math",
".",
"degrees",
"(",
"cam_data",
".",
"angle_x",
")",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_fov_x\"",
",",
"b\"FieldOfViewX\"",
",",
"math",
".",
"degrees",
"(",
"cam_data",
".",
"angle_x",
")",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_fov_y\"",
",",
"b\"FieldOfViewY\"",
",",
"math",
".",
"degrees",
"(",
"cam_data",
".",
"angle_y",
")",
")",
"# No need to convert to inches here...",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FocalLength\"",
",",
"cam_data",
".",
"lens",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"SafeAreaAspectRatio\"",
",",
"aspect",
")",
"# Default to perspective camera.",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_enum\"",
",",
"b\"CameraProjectionType\"",
",",
"1",
"if",
"cam_data",
".",
"type",
"==",
"'ORTHO'",
"else",
"0",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"OrthoZoom\"",
",",
"cam_data",
".",
"ortho_scale",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"NearPlane\"",
",",
"cam_data",
".",
"clip_start",
"*",
"gscale",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"FarPlane\"",
",",
"cam_data",
".",
"clip_end",
"*",
"gscale",
")",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_enum\"",
",",
"b\"BackPlaneDistanceMode\"",
",",
"1",
")",
"# RelativeToCamera.",
"elem_props_template_set",
"(",
"tmpl",
",",
"props",
",",
"\"p_double\"",
",",
"b\"BackPlaneDistance\"",
",",
"cam_data",
".",
"clip_end",
"*",
"gscale",
")",
"elem_props_template_finalize",
"(",
"tmpl",
",",
"props",
")",
"# Custom properties.",
"if",
"scene_data",
".",
"settings",
".",
"use_custom_props",
":",
"fbx_data_element_custom_properties",
"(",
"props",
",",
"cam_data",
")",
"elem_data_single_string",
"(",
"cam",
",",
"b\"TypeFlags\"",
",",
"b\"Camera\"",
")",
"elem_data_single_int32",
"(",
"cam",
",",
"b\"GeometryVersion\"",
",",
"124",
")",
"# Sic...",
"elem_data_vec_float64",
"(",
"cam",
",",
"b\"Position\"",
",",
"loc",
")",
"elem_data_vec_float64",
"(",
"cam",
",",
"b\"Up\"",
",",
"up",
")",
"elem_data_vec_float64",
"(",
"cam",
",",
"b\"LookAt\"",
",",
"to",
")",
"elem_data_single_int32",
"(",
"cam",
",",
"b\"ShowInfoOnMoving\"",
",",
"1",
")",
"elem_data_single_int32",
"(",
"cam",
",",
"b\"ShowAudio\"",
",",
"0",
")",
"elem_data_vec_float64",
"(",
"cam",
",",
"b\"AudioColor\"",
",",
"(",
"0.0",
",",
"1.0",
",",
"0.0",
")",
")",
"elem_data_single_float64",
"(",
"cam",
",",
"b\"CameraOrthoZoom\"",
",",
"1.0",
")"
] |
https://github.com/etodd/lasercrabs/blob/91484d9ac3a47ac38b8f40ec3ff35194714dad8e/assets/script/etodd_blender_fbx/export_fbx_bin.py#L619-L705
|
||
apache/incubator-mxnet
|
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
|
python/mxnet/symbol/numpy/_symbol.py
|
python
|
tile
|
(A, reps)
|
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
|
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : _Symbol or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `x` along each axis.
Returns
-------
c : _Symbol
The tiled output array.
|
r"""
Construct an array by repeating A the number of times given by reps.
|
[
"r",
"Construct",
"an",
"array",
"by",
"repeating",
"A",
"the",
"number",
"of",
"times",
"given",
"by",
"reps",
"."
] |
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : _Symbol or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `x` along each axis.
Returns
-------
c : _Symbol
The tiled output array.
"""
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
|
[
"def",
"tile",
"(",
"A",
",",
"reps",
")",
":",
"return",
"_unary_func_helper",
"(",
"A",
",",
"_npi",
".",
"tile",
",",
"_np",
".",
"tile",
",",
"reps",
"=",
"reps",
")"
] |
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/symbol/numpy/_symbol.py#L3823-L3852
|
|
casadi/casadi
|
8d0f80a4d0fe2054384bfb9748f7a0f6bae540ff
|
misc/cpplint.py
|
python
|
_CppLintState.SetCountingStyle
|
(self, counting_style)
|
Sets the module's counting options.
|
Sets the module's counting options.
|
[
"Sets",
"the",
"module",
"s",
"counting",
"options",
"."
] |
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
|
[
"def",
"SetCountingStyle",
"(",
"self",
",",
"counting_style",
")",
":",
"self",
".",
"counting",
"=",
"counting_style"
] |
https://github.com/casadi/casadi/blob/8d0f80a4d0fe2054384bfb9748f7a0f6bae540ff/misc/cpplint.py#L705-L707
|
||
zerotier/libzt
|
41eb9aebc80a5f1c816fa26a06cefde9de906676
|
src/bindings/python/sockets.py
|
python
|
errno
|
()
|
return libzt.cvar.zts_errno
|
Return errno value of low-level socket layer
|
Return errno value of low-level socket layer
|
[
"Return",
"errno",
"value",
"of",
"low",
"-",
"level",
"socket",
"layer"
] |
def errno():
"""Return errno value of low-level socket layer"""
return libzt.cvar.zts_errno
|
[
"def",
"errno",
"(",
")",
":",
"return",
"libzt",
".",
"cvar",
".",
"zts_errno"
] |
https://github.com/zerotier/libzt/blob/41eb9aebc80a5f1c816fa26a06cefde9de906676/src/bindings/python/sockets.py#L39-L41
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/email/iterators.py
|
python
|
walk
|
(self)
|
Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
|
Walk over the message tree, yielding each subpart.
|
[
"Walk",
"over",
"the",
"message",
"tree",
"yielding",
"each",
"subpart",
"."
] |
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
|
[
"def",
"walk",
"(",
"self",
")",
":",
"yield",
"self",
"if",
"self",
".",
"is_multipart",
"(",
")",
":",
"for",
"subpart",
"in",
"self",
".",
"get_payload",
"(",
")",
":",
"for",
"subsubpart",
"in",
"subpart",
".",
"walk",
"(",
")",
":",
"yield",
"subsubpart"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/email/iterators.py#L20-L30
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_controls.py
|
python
|
TextCtrl.__init__
|
(self, *args, **kwargs)
|
__init__(self, Window parent, int id=-1, String value=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, Validator validator=DefaultValidator,
String name=TextCtrlNameStr) -> TextCtrl
|
__init__(self, Window parent, int id=-1, String value=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, Validator validator=DefaultValidator,
String name=TextCtrlNameStr) -> TextCtrl
|
[
"__init__",
"(",
"self",
"Window",
"parent",
"int",
"id",
"=",
"-",
"1",
"String",
"value",
"=",
"EmptyString",
"Point",
"pos",
"=",
"DefaultPosition",
"Size",
"size",
"=",
"DefaultSize",
"long",
"style",
"=",
"0",
"Validator",
"validator",
"=",
"DefaultValidator",
"String",
"name",
"=",
"TextCtrlNameStr",
")",
"-",
">",
"TextCtrl"
] |
def __init__(self, *args, **kwargs):
"""
__init__(self, Window parent, int id=-1, String value=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=0, Validator validator=DefaultValidator,
String name=TextCtrlNameStr) -> TextCtrl
"""
_controls_.TextCtrl_swiginit(self,_controls_.new_TextCtrl(*args, **kwargs))
self._setOORInfo(self)
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_controls_",
".",
"TextCtrl_swiginit",
"(",
"self",
",",
"_controls_",
".",
"new_TextCtrl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"_setOORInfo",
"(",
"self",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L2012-L2020
|
||
eclipse/sumo
|
7132a9b8b6eea734bdec38479026b4d8c4336d03
|
tools/traci/_edge.py
|
python
|
EdgeDomain.getStreetName
|
(self, edgeID)
|
return self._getUniversal(tc.VAR_NAME, edgeID)
|
getStreetName(string) -> string
Returns the street name of this edge
|
getStreetName(string) -> string
|
[
"getStreetName",
"(",
"string",
")",
"-",
">",
"string"
] |
def getStreetName(self, edgeID):
"""getStreetName(string) -> string
Returns the street name of this edge
"""
return self._getUniversal(tc.VAR_NAME, edgeID)
|
[
"def",
"getStreetName",
"(",
"self",
",",
"edgeID",
")",
":",
"return",
"self",
".",
"_getUniversal",
"(",
"tc",
".",
"VAR_NAME",
",",
"edgeID",
")"
] |
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_edge.py#L140-L145
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/mailbox.py
|
python
|
MaildirMessage.get_date
|
(self)
|
return self._date
|
Return delivery date of message, in seconds since the epoch.
|
Return delivery date of message, in seconds since the epoch.
|
[
"Return",
"delivery",
"date",
"of",
"message",
"in",
"seconds",
"since",
"the",
"epoch",
"."
] |
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
|
[
"def",
"get_date",
"(",
"self",
")",
":",
"return",
"self",
".",
"_date"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/mailbox.py#L1566-L1568
|
|
whai362/PSENet
|
4d95395658662f2223805c36dcd573d9e190ce26
|
eval/ic15/script.py
|
python
|
evaluation_imports
|
()
|
return {
'Polygon':'plg',
'numpy':'np'
}
|
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
|
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
|
[
"evaluation_imports",
":",
"Dictionary",
"(",
"key",
"=",
"module",
"name",
"value",
"=",
"alias",
")",
"with",
"python",
"modules",
"used",
"in",
"the",
"evaluation",
"."
] |
def evaluation_imports():
"""
evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
"""
return {
'Polygon':'plg',
'numpy':'np'
}
|
[
"def",
"evaluation_imports",
"(",
")",
":",
"return",
"{",
"'Polygon'",
":",
"'plg'",
",",
"'numpy'",
":",
"'np'",
"}"
] |
https://github.com/whai362/PSENet/blob/4d95395658662f2223805c36dcd573d9e190ce26/eval/ic15/script.py#L7-L14
|
|
qgis/QGIS
|
15a77662d4bb712184f6aa60d0bd663010a76a75
|
python/plugins/db_manager/db_plugins/oracle/connector.py
|
python
|
OracleDBConnector.hasCreateSpatialViewSupport
|
(self)
|
return True
|
We can create Spatial Views.
|
We can create Spatial Views.
|
[
"We",
"can",
"create",
"Spatial",
"Views",
"."
] |
def hasCreateSpatialViewSupport(self):
"""We can create Spatial Views."""
return True
|
[
"def",
"hasCreateSpatialViewSupport",
"(",
"self",
")",
":",
"return",
"True"
] |
https://github.com/qgis/QGIS/blob/15a77662d4bb712184f6aa60d0bd663010a76a75/python/plugins/db_manager/db_plugins/oracle/connector.py#L209-L211
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/ed_editv.py
|
python
|
EdEditorView.ModifySave
|
(self)
|
return result
|
Called when document has been modified prompting
a message dialog asking if the user would like to save
the document before closing.
@return: Result value of whether the file was saved or not
|
Called when document has been modified prompting
a message dialog asking if the user would like to save
the document before closing.
@return: Result value of whether the file was saved or not
|
[
"Called",
"when",
"document",
"has",
"been",
"modified",
"prompting",
"a",
"message",
"dialog",
"asking",
"if",
"the",
"user",
"would",
"like",
"to",
"save",
"the",
"document",
"before",
"closing",
".",
"@return",
":",
"Result",
"value",
"of",
"whether",
"the",
"file",
"was",
"saved",
"or",
"not"
] |
def ModifySave(self):
"""Called when document has been modified prompting
a message dialog asking if the user would like to save
the document before closing.
@return: Result value of whether the file was saved or not
"""
name = self.GetFileName()
if name == u"":
name = self.GetTabLabel()
dlg = wx.MessageDialog(self,
_("The file: \"%s\" has been modified since "
"the last save point.\n\nWould you like to "
"save the changes?") % name,
_("Save Changes?"),
wx.YES_NO | wx.YES_DEFAULT | wx.CANCEL | \
wx.ICON_INFORMATION)
result = dlg.ShowModal()
dlg.Destroy()
# HACK
if result == wx.ID_YES:
evt = wx.MenuEvent(wx.wxEVT_COMMAND_MENU_SELECTED, ed_glob.ID_SAVE)
tlw = self.GetTopLevelParent()
if hasattr(tlw, 'OnSave'):
tlw.OnSave(evt)
return result
|
[
"def",
"ModifySave",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"GetFileName",
"(",
")",
"if",
"name",
"==",
"u\"\"",
":",
"name",
"=",
"self",
".",
"GetTabLabel",
"(",
")",
"dlg",
"=",
"wx",
".",
"MessageDialog",
"(",
"self",
",",
"_",
"(",
"\"The file: \\\"%s\\\" has been modified since \"",
"\"the last save point.\\n\\nWould you like to \"",
"\"save the changes?\"",
")",
"%",
"name",
",",
"_",
"(",
"\"Save Changes?\"",
")",
",",
"wx",
".",
"YES_NO",
"|",
"wx",
".",
"YES_DEFAULT",
"|",
"wx",
".",
"CANCEL",
"|",
"wx",
".",
"ICON_INFORMATION",
")",
"result",
"=",
"dlg",
".",
"ShowModal",
"(",
")",
"dlg",
".",
"Destroy",
"(",
")",
"# HACK",
"if",
"result",
"==",
"wx",
".",
"ID_YES",
":",
"evt",
"=",
"wx",
".",
"MenuEvent",
"(",
"wx",
".",
"wxEVT_COMMAND_MENU_SELECTED",
",",
"ed_glob",
".",
"ID_SAVE",
")",
"tlw",
"=",
"self",
".",
"GetTopLevelParent",
"(",
")",
"if",
"hasattr",
"(",
"tlw",
",",
"'OnSave'",
")",
":",
"tlw",
".",
"OnSave",
"(",
"evt",
")",
"return",
"result"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_editv.py#L613-L641
|
|
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py
|
python
|
MakeDescriptor
|
(desc_proto, package='', build_file_if_cpp=True,
syntax=None)
|
return Descriptor(desc_proto.name, desc_name, None, None, fields,
list(nested_types.values()), list(enum_types.values()), [],
options=_OptionsOrNone(desc_proto))
|
Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
|
Make a protobuf Descriptor given a DescriptorProto protobuf.
|
[
"Make",
"a",
"protobuf",
"Descriptor",
"given",
"a",
"DescriptorProto",
"protobuf",
"."
] |
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True,
syntax=None):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
"""
if api_implementation.Type() == 'cpp' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
from google.protobuf import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
# Generate a random name for this proto file to prevent conflicts with any
# imported ones. We need to specify a file name so the descriptor pool
# accepts our FileDescriptorProto, but it is not important what that file
# name is actually set to.
proto_name = str(uuid.uuid4())
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'),
proto_name + '.proto')
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = proto_name + '.proto'
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name, full_name, None, [
EnumValueDescriptor(enum_val.name, ii, enum_val.number)
for ii, enum_val in enumerate(enum_proto.value)])
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False,
syntax=syntax)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.json_name:
json_name = field_proto.json_name
else:
json_name = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(full_message_name +
[type_name[type_name.rfind('.')+1:]])
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, nested_desc, enum_desc, None, False, None,
options=_OptionsOrNone(field_proto), has_default_value=False,
json_name=json_name)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
list(nested_types.values()), list(enum_types.values()), [],
options=_OptionsOrNone(desc_proto))
|
[
"def",
"MakeDescriptor",
"(",
"desc_proto",
",",
"package",
"=",
"''",
",",
"build_file_if_cpp",
"=",
"True",
",",
"syntax",
"=",
"None",
")",
":",
"if",
"api_implementation",
".",
"Type",
"(",
")",
"==",
"'cpp'",
"and",
"build_file_if_cpp",
":",
"# The C++ implementation requires all descriptors to be backed by the same",
"# definition in the C++ descriptor pool. To do this, we build a",
"# FileDescriptorProto with the same definition as this descriptor and build",
"# it into the pool.",
"from",
"google",
".",
"protobuf",
"import",
"descriptor_pb2",
"file_descriptor_proto",
"=",
"descriptor_pb2",
".",
"FileDescriptorProto",
"(",
")",
"file_descriptor_proto",
".",
"message_type",
".",
"add",
"(",
")",
".",
"MergeFrom",
"(",
"desc_proto",
")",
"# Generate a random name for this proto file to prevent conflicts with any",
"# imported ones. We need to specify a file name so the descriptor pool",
"# accepts our FileDescriptorProto, but it is not important what that file",
"# name is actually set to.",
"proto_name",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"if",
"package",
":",
"file_descriptor_proto",
".",
"name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"package",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
",",
"proto_name",
"+",
"'.proto'",
")",
"file_descriptor_proto",
".",
"package",
"=",
"package",
"else",
":",
"file_descriptor_proto",
".",
"name",
"=",
"proto_name",
"+",
"'.proto'",
"_message",
".",
"default_pool",
".",
"Add",
"(",
"file_descriptor_proto",
")",
"result",
"=",
"_message",
".",
"default_pool",
".",
"FindFileByName",
"(",
"file_descriptor_proto",
".",
"name",
")",
"if",
"_USE_C_DESCRIPTORS",
":",
"return",
"result",
".",
"message_types_by_name",
"[",
"desc_proto",
".",
"name",
"]",
"full_message_name",
"=",
"[",
"desc_proto",
".",
"name",
"]",
"if",
"package",
":",
"full_message_name",
".",
"insert",
"(",
"0",
",",
"package",
")",
"# Create Descriptors for enum types",
"enum_types",
"=",
"{",
"}",
"for",
"enum_proto",
"in",
"desc_proto",
".",
"enum_type",
":",
"full_name",
"=",
"'.'",
".",
"join",
"(",
"full_message_name",
"+",
"[",
"enum_proto",
".",
"name",
"]",
")",
"enum_desc",
"=",
"EnumDescriptor",
"(",
"enum_proto",
".",
"name",
",",
"full_name",
",",
"None",
",",
"[",
"EnumValueDescriptor",
"(",
"enum_val",
".",
"name",
",",
"ii",
",",
"enum_val",
".",
"number",
")",
"for",
"ii",
",",
"enum_val",
"in",
"enumerate",
"(",
"enum_proto",
".",
"value",
")",
"]",
")",
"enum_types",
"[",
"full_name",
"]",
"=",
"enum_desc",
"# Create Descriptors for nested types",
"nested_types",
"=",
"{",
"}",
"for",
"nested_proto",
"in",
"desc_proto",
".",
"nested_type",
":",
"full_name",
"=",
"'.'",
".",
"join",
"(",
"full_message_name",
"+",
"[",
"nested_proto",
".",
"name",
"]",
")",
"# Nested types are just those defined inside of the message, not all types",
"# used by fields in the message, so no loops are possible here.",
"nested_desc",
"=",
"MakeDescriptor",
"(",
"nested_proto",
",",
"package",
"=",
"'.'",
".",
"join",
"(",
"full_message_name",
")",
",",
"build_file_if_cpp",
"=",
"False",
",",
"syntax",
"=",
"syntax",
")",
"nested_types",
"[",
"full_name",
"]",
"=",
"nested_desc",
"fields",
"=",
"[",
"]",
"for",
"field_proto",
"in",
"desc_proto",
".",
"field",
":",
"full_name",
"=",
"'.'",
".",
"join",
"(",
"full_message_name",
"+",
"[",
"field_proto",
".",
"name",
"]",
")",
"enum_desc",
"=",
"None",
"nested_desc",
"=",
"None",
"if",
"field_proto",
".",
"json_name",
":",
"json_name",
"=",
"field_proto",
".",
"json_name",
"else",
":",
"json_name",
"=",
"None",
"if",
"field_proto",
".",
"HasField",
"(",
"'type_name'",
")",
":",
"type_name",
"=",
"field_proto",
".",
"type_name",
"full_type_name",
"=",
"'.'",
".",
"join",
"(",
"full_message_name",
"+",
"[",
"type_name",
"[",
"type_name",
".",
"rfind",
"(",
"'.'",
")",
"+",
"1",
":",
"]",
"]",
")",
"if",
"full_type_name",
"in",
"nested_types",
":",
"nested_desc",
"=",
"nested_types",
"[",
"full_type_name",
"]",
"elif",
"full_type_name",
"in",
"enum_types",
":",
"enum_desc",
"=",
"enum_types",
"[",
"full_type_name",
"]",
"# Else type_name references a non-local type, which isn't implemented",
"field",
"=",
"FieldDescriptor",
"(",
"field_proto",
".",
"name",
",",
"full_name",
",",
"field_proto",
".",
"number",
"-",
"1",
",",
"field_proto",
".",
"number",
",",
"field_proto",
".",
"type",
",",
"FieldDescriptor",
".",
"ProtoTypeToCppProtoType",
"(",
"field_proto",
".",
"type",
")",
",",
"field_proto",
".",
"label",
",",
"None",
",",
"nested_desc",
",",
"enum_desc",
",",
"None",
",",
"False",
",",
"None",
",",
"options",
"=",
"_OptionsOrNone",
"(",
"field_proto",
")",
",",
"has_default_value",
"=",
"False",
",",
"json_name",
"=",
"json_name",
")",
"fields",
".",
"append",
"(",
"field",
")",
"desc_name",
"=",
"'.'",
".",
"join",
"(",
"full_message_name",
")",
"return",
"Descriptor",
"(",
"desc_proto",
".",
"name",
",",
"desc_name",
",",
"None",
",",
"None",
",",
"fields",
",",
"list",
"(",
"nested_types",
".",
"values",
"(",
")",
")",
",",
"list",
"(",
"enum_types",
".",
"values",
"(",
")",
")",
",",
"[",
"]",
",",
"options",
"=",
"_OptionsOrNone",
"(",
"desc_proto",
")",
")"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/descriptor.py#L919-L1020
|
|
xbmc/xbmc
|
091211a754589fc40a2a1f239b0ce9f4ee138268
|
addons/service.xbmc.versioncheck/resources/lib/version_check/distro/distro.py
|
python
|
LinuxDistribution.lsb_release_info
|
(self)
|
return self._lsb_release_info
|
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the OS
distribution.
For details, see :func:`distro.lsb_release_info`.
|
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the OS
distribution.
|
[
"Return",
"a",
"dictionary",
"containing",
"key",
"-",
"value",
"pairs",
"for",
"the",
"information",
"items",
"from",
"the",
"lsb_release",
"command",
"data",
"source",
"of",
"the",
"OS",
"distribution",
"."
] |
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the OS
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
|
[
"def",
"lsb_release_info",
"(",
"self",
")",
":",
"return",
"self",
".",
"_lsb_release_info"
] |
https://github.com/xbmc/xbmc/blob/091211a754589fc40a2a1f239b0ce9f4ee138268/addons/service.xbmc.versioncheck/resources/lib/version_check/distro/distro.py#L854-L862
|
|
bareos/bareos
|
56a10bb368b0a81e977bb51304033fe49d59efb0
|
core/src/plugins/filed/python/vmware/BareosFdPluginVMware.py
|
python
|
BareosVADPWrapper.remove_vm_snapshot
|
(self)
|
return True
|
Removes the snapshot taken before
|
Removes the snapshot taken before
|
[
"Removes",
"the",
"snapshot",
"taken",
"before"
] |
def remove_vm_snapshot(self):
"""
Removes the snapshot taken before
"""
if not self.create_snap_result:
bareosfd.JobMessage(
bareosfd.M_WARNING,
"No snapshot was taken, skipping snapshot removal\n",
)
return False
try:
rmsnap_task = self.create_snap_result.RemoveSnapshot_Task(
removeChildren=True
)
except vmodl.MethodFault as e:
bareosfd.JobMessage(
bareosfd.M_WARNING,
"Failed to remove snapshot %s\n" % (e.msg),
)
return False
self.vmomi_WaitForTasks([rmsnap_task])
return True
|
[
"def",
"remove_vm_snapshot",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"create_snap_result",
":",
"bareosfd",
".",
"JobMessage",
"(",
"bareosfd",
".",
"M_WARNING",
",",
"\"No snapshot was taken, skipping snapshot removal\\n\"",
",",
")",
"return",
"False",
"try",
":",
"rmsnap_task",
"=",
"self",
".",
"create_snap_result",
".",
"RemoveSnapshot_Task",
"(",
"removeChildren",
"=",
"True",
")",
"except",
"vmodl",
".",
"MethodFault",
"as",
"e",
":",
"bareosfd",
".",
"JobMessage",
"(",
"bareosfd",
".",
"M_WARNING",
",",
"\"Failed to remove snapshot %s\\n\"",
"%",
"(",
"e",
".",
"msg",
")",
",",
")",
"return",
"False",
"self",
".",
"vmomi_WaitForTasks",
"(",
"[",
"rmsnap_task",
"]",
")",
"return",
"True"
] |
https://github.com/bareos/bareos/blob/56a10bb368b0a81e977bb51304033fe49d59efb0/core/src/plugins/filed/python/vmware/BareosFdPluginVMware.py#L1133-L1157
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/build/waf-1.7.13/waflib/Tools/glib2.py
|
python
|
add_enums
|
(self, source='', target='',
file_head='', file_prod='', file_tail='', enum_prod='',
value_head='', value_prod='', value_tail='', comments='')
|
Add a file to the list of enum files to process. Store them in the attribute *enums_list*.
:param source: enum file to process
:type source: string
:param target: target file
:type target: string
:param file_head: unused
:param file_prod: unused
:param file_tail: unused
:param enum_prod: unused
:param value_head: unused
:param value_prod: unused
:param value_tail: unused
:param comments: comments
:type comments: string
|
Add a file to the list of enum files to process. Store them in the attribute *enums_list*.
|
[
"Add",
"a",
"file",
"to",
"the",
"list",
"of",
"enum",
"files",
"to",
"process",
".",
"Store",
"them",
"in",
"the",
"attribute",
"*",
"enums_list",
"*",
"."
] |
def add_enums(self, source='', target='',
file_head='', file_prod='', file_tail='', enum_prod='',
value_head='', value_prod='', value_tail='', comments=''):
"""
Add a file to the list of enum files to process. Store them in the attribute *enums_list*.
:param source: enum file to process
:type source: string
:param target: target file
:type target: string
:param file_head: unused
:param file_prod: unused
:param file_tail: unused
:param enum_prod: unused
:param value_head: unused
:param value_prod: unused
:param value_tail: unused
:param comments: comments
:type comments: string
"""
if not hasattr(self, 'enums_list'):
self.enums_list = []
self.meths.append('process_enums')
self.enums_list.append({'source': source,
'template': '',
'target': target,
'file-head': file_head,
'file-prod': file_prod,
'file-tail': file_tail,
'enum-prod': enum_prod,
'value-head': value_head,
'value-prod': value_prod,
'value-tail': value_tail,
'comments': comments})
|
[
"def",
"add_enums",
"(",
"self",
",",
"source",
"=",
"''",
",",
"target",
"=",
"''",
",",
"file_head",
"=",
"''",
",",
"file_prod",
"=",
"''",
",",
"file_tail",
"=",
"''",
",",
"enum_prod",
"=",
"''",
",",
"value_head",
"=",
"''",
",",
"value_prod",
"=",
"''",
",",
"value_tail",
"=",
"''",
",",
"comments",
"=",
"''",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'enums_list'",
")",
":",
"self",
".",
"enums_list",
"=",
"[",
"]",
"self",
".",
"meths",
".",
"append",
"(",
"'process_enums'",
")",
"self",
".",
"enums_list",
".",
"append",
"(",
"{",
"'source'",
":",
"source",
",",
"'template'",
":",
"''",
",",
"'target'",
":",
"target",
",",
"'file-head'",
":",
"file_head",
",",
"'file-prod'",
":",
"file_prod",
",",
"'file-tail'",
":",
"file_tail",
",",
"'enum-prod'",
":",
"enum_prod",
",",
"'value-head'",
":",
"value_head",
",",
"'value-prod'",
":",
"value_prod",
",",
"'value-tail'",
":",
"value_tail",
",",
"'comments'",
":",
"comments",
"}",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/waflib/Tools/glib2.py#L119-L152
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/lib-tk/Tkinter.py
|
python
|
Menu.invoke
|
(self, index)
|
return self.tk.call(self._w, 'invoke', index)
|
Invoke a menu item identified by INDEX and execute
the associated command.
|
Invoke a menu item identified by INDEX and execute
the associated command.
|
[
"Invoke",
"a",
"menu",
"item",
"identified",
"by",
"INDEX",
"and",
"execute",
"the",
"associated",
"command",
"."
] |
def invoke(self, index):
"""Invoke a menu item identified by INDEX and execute
the associated command."""
return self.tk.call(self._w, 'invoke', index)
|
[
"def",
"invoke",
"(",
"self",
",",
"index",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'invoke'",
",",
"index",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/Tkinter.py#L2802-L2805
|
|
hpi-xnor/BMXNet-v2
|
af2b1859eafc5c721b1397cef02f946aaf2ce20d
|
python/mxnet/ndarray/ndarray.py
|
python
|
NDArray._set_nd_basic_indexing
|
(self, key, value)
|
This function is called by __setitem__ when key is a basic index, i.e.
an integer, or a slice, or a tuple of integers and slices. No restrictions
on the values of slices' steps.
|
This function is called by __setitem__ when key is a basic index, i.e.
an integer, or a slice, or a tuple of integers and slices. No restrictions
on the values of slices' steps.
|
[
"This",
"function",
"is",
"called",
"by",
"__setitem__",
"when",
"key",
"is",
"a",
"basic",
"index",
"i",
".",
"e",
".",
"an",
"integer",
"or",
"a",
"slice",
"or",
"a",
"tuple",
"of",
"integers",
"and",
"slices",
".",
"No",
"restrictions",
"on",
"the",
"values",
"of",
"slices",
"steps",
"."
] |
def _set_nd_basic_indexing(self, key, value):
"""This function is called by __setitem__ when key is a basic index, i.e.
an integer, or a slice, or a tuple of integers and slices. No restrictions
on the values of slices' steps."""
shape = self.shape
if isinstance(key, integer_types):
if key < 0:
key += shape[0]
if key < 0 or key >= shape[0]:
if key < 0:
key -= shape[0]
raise IndexError('index %d is out of bounds for axis 0 with size %d'
% (key, shape[0]))
key = py_slice(key, key+1) # key must be >= 0 here
if isinstance(key, py_slice):
assign_to_self = key.step is None or key.step == 1
assign_to_self &= key.start is None or key.start == 0
assign_to_self &= key.stop is None or key.stop == shape[0]
if assign_to_self: # trivial case, assign value to self
if isinstance(value, NDArray):
if value.handle is not self.handle:
if value.shape != shape:
value = value.broadcast_to(shape)
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._full(shape=shape, ctx=self.context,
dtype=self.dtype, value=float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
if isinstance(value, np.generic) or value.shape != shape:
value = np.broadcast_to(value, shape)
self._sync_copyfrom(value)
else: # value might be a list or a tuple
value_nd = self._prepare_value_nd(value, shape)
value_nd.copyto(self)
return
else: # non-trivial case, use _slice_assign or _slice_assign_scalar
key = (key,)
assert isinstance(key, tuple), "key=%s must be a tuple of slices and integers" % str(key)
assert len(key) <= len(shape), "Indexing dimensions exceed array dimensions, %d vs %d"\
% (len(key), len(shape))
begin = []
end = []
steps = []
oshape = [] # output shape of slice using key
vshape = [] # value shape of data[key]
for i, slice_i in enumerate(key):
dim_size = 1
if isinstance(slice_i, py_slice):
begin.append(slice_i.start)
end.append(slice_i.stop)
steps.append(slice_i.step)
start, stop, step = _get_index_range(slice_i.start, slice_i.stop,
shape[i], slice_i.step)
dim_size = _get_dim_size(start, stop, step)
vshape.append(dim_size)
elif isinstance(slice_i, integer_types):
begin.append(slice_i)
end.append(slice_i+1 if slice_i != -1 else self.shape[i])
steps.append(1)
else:
raise ValueError("basic indexing does not support index=%s of type=%s"
% (str(slice_i), str(type(slice_i))))
oshape.append(dim_size)
oshape.extend(shape[len(key):])
vshape.extend(shape[len(key):])
# if key contains all integers, vshape should be (1,)
if len(vshape) == 0:
vshape.append(1)
oshape = tuple(oshape)
vshape = tuple(vshape)
if isinstance(value, numeric_types):
_internal._slice_assign_scalar(self, out=self, begin=begin, end=end,
step=steps, scalar=float(value))
else:
value_nd = self._prepare_value_nd(value, vshape)
if vshape != oshape:
value_nd = value_nd.reshape(oshape)
_internal._slice_assign(self, value_nd, begin, end, steps, out=self)
|
[
"def",
"_set_nd_basic_indexing",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"shape",
"=",
"self",
".",
"shape",
"if",
"isinstance",
"(",
"key",
",",
"integer_types",
")",
":",
"if",
"key",
"<",
"0",
":",
"key",
"+=",
"shape",
"[",
"0",
"]",
"if",
"key",
"<",
"0",
"or",
"key",
">=",
"shape",
"[",
"0",
"]",
":",
"if",
"key",
"<",
"0",
":",
"key",
"-=",
"shape",
"[",
"0",
"]",
"raise",
"IndexError",
"(",
"'index %d is out of bounds for axis 0 with size %d'",
"%",
"(",
"key",
",",
"shape",
"[",
"0",
"]",
")",
")",
"key",
"=",
"py_slice",
"(",
"key",
",",
"key",
"+",
"1",
")",
"# key must be >= 0 here",
"if",
"isinstance",
"(",
"key",
",",
"py_slice",
")",
":",
"assign_to_self",
"=",
"key",
".",
"step",
"is",
"None",
"or",
"key",
".",
"step",
"==",
"1",
"assign_to_self",
"&=",
"key",
".",
"start",
"is",
"None",
"or",
"key",
".",
"start",
"==",
"0",
"assign_to_self",
"&=",
"key",
".",
"stop",
"is",
"None",
"or",
"key",
".",
"stop",
"==",
"shape",
"[",
"0",
"]",
"if",
"assign_to_self",
":",
"# trivial case, assign value to self",
"if",
"isinstance",
"(",
"value",
",",
"NDArray",
")",
":",
"if",
"value",
".",
"handle",
"is",
"not",
"self",
".",
"handle",
":",
"if",
"value",
".",
"shape",
"!=",
"shape",
":",
"value",
"=",
"value",
".",
"broadcast_to",
"(",
"shape",
")",
"value",
".",
"copyto",
"(",
"self",
")",
"elif",
"isinstance",
"(",
"value",
",",
"numeric_types",
")",
":",
"_internal",
".",
"_full",
"(",
"shape",
"=",
"shape",
",",
"ctx",
"=",
"self",
".",
"context",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"value",
"=",
"float",
"(",
"value",
")",
",",
"out",
"=",
"self",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"np",
".",
"ndarray",
",",
"np",
".",
"generic",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"np",
".",
"generic",
")",
"or",
"value",
".",
"shape",
"!=",
"shape",
":",
"value",
"=",
"np",
".",
"broadcast_to",
"(",
"value",
",",
"shape",
")",
"self",
".",
"_sync_copyfrom",
"(",
"value",
")",
"else",
":",
"# value might be a list or a tuple",
"value_nd",
"=",
"self",
".",
"_prepare_value_nd",
"(",
"value",
",",
"shape",
")",
"value_nd",
".",
"copyto",
"(",
"self",
")",
"return",
"else",
":",
"# non-trivial case, use _slice_assign or _slice_assign_scalar",
"key",
"=",
"(",
"key",
",",
")",
"assert",
"isinstance",
"(",
"key",
",",
"tuple",
")",
",",
"\"key=%s must be a tuple of slices and integers\"",
"%",
"str",
"(",
"key",
")",
"assert",
"len",
"(",
"key",
")",
"<=",
"len",
"(",
"shape",
")",
",",
"\"Indexing dimensions exceed array dimensions, %d vs %d\"",
"%",
"(",
"len",
"(",
"key",
")",
",",
"len",
"(",
"shape",
")",
")",
"begin",
"=",
"[",
"]",
"end",
"=",
"[",
"]",
"steps",
"=",
"[",
"]",
"oshape",
"=",
"[",
"]",
"# output shape of slice using key",
"vshape",
"=",
"[",
"]",
"# value shape of data[key]",
"for",
"i",
",",
"slice_i",
"in",
"enumerate",
"(",
"key",
")",
":",
"dim_size",
"=",
"1",
"if",
"isinstance",
"(",
"slice_i",
",",
"py_slice",
")",
":",
"begin",
".",
"append",
"(",
"slice_i",
".",
"start",
")",
"end",
".",
"append",
"(",
"slice_i",
".",
"stop",
")",
"steps",
".",
"append",
"(",
"slice_i",
".",
"step",
")",
"start",
",",
"stop",
",",
"step",
"=",
"_get_index_range",
"(",
"slice_i",
".",
"start",
",",
"slice_i",
".",
"stop",
",",
"shape",
"[",
"i",
"]",
",",
"slice_i",
".",
"step",
")",
"dim_size",
"=",
"_get_dim_size",
"(",
"start",
",",
"stop",
",",
"step",
")",
"vshape",
".",
"append",
"(",
"dim_size",
")",
"elif",
"isinstance",
"(",
"slice_i",
",",
"integer_types",
")",
":",
"begin",
".",
"append",
"(",
"slice_i",
")",
"end",
".",
"append",
"(",
"slice_i",
"+",
"1",
"if",
"slice_i",
"!=",
"-",
"1",
"else",
"self",
".",
"shape",
"[",
"i",
"]",
")",
"steps",
".",
"append",
"(",
"1",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"basic indexing does not support index=%s of type=%s\"",
"%",
"(",
"str",
"(",
"slice_i",
")",
",",
"str",
"(",
"type",
"(",
"slice_i",
")",
")",
")",
")",
"oshape",
".",
"append",
"(",
"dim_size",
")",
"oshape",
".",
"extend",
"(",
"shape",
"[",
"len",
"(",
"key",
")",
":",
"]",
")",
"vshape",
".",
"extend",
"(",
"shape",
"[",
"len",
"(",
"key",
")",
":",
"]",
")",
"# if key contains all integers, vshape should be (1,)",
"if",
"len",
"(",
"vshape",
")",
"==",
"0",
":",
"vshape",
".",
"append",
"(",
"1",
")",
"oshape",
"=",
"tuple",
"(",
"oshape",
")",
"vshape",
"=",
"tuple",
"(",
"vshape",
")",
"if",
"isinstance",
"(",
"value",
",",
"numeric_types",
")",
":",
"_internal",
".",
"_slice_assign_scalar",
"(",
"self",
",",
"out",
"=",
"self",
",",
"begin",
"=",
"begin",
",",
"end",
"=",
"end",
",",
"step",
"=",
"steps",
",",
"scalar",
"=",
"float",
"(",
"value",
")",
")",
"else",
":",
"value_nd",
"=",
"self",
".",
"_prepare_value_nd",
"(",
"value",
",",
"vshape",
")",
"if",
"vshape",
"!=",
"oshape",
":",
"value_nd",
"=",
"value_nd",
".",
"reshape",
"(",
"oshape",
")",
"_internal",
".",
"_slice_assign",
"(",
"self",
",",
"value_nd",
",",
"begin",
",",
"end",
",",
"steps",
",",
"out",
"=",
"self",
")"
] |
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/ndarray/ndarray.py#L684-L766
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/archive_util.py
|
python
|
unpack_zipfile
|
(filename, extract_dir, progress_filter=default_filter)
|
Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
|
Unpack zip `filename` to `extract_dir`
|
[
"Unpack",
"zip",
"filename",
"to",
"extract_dir"
] |
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with zipfile.ZipFile(filename) as z:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
with open(target, 'wb') as f:
f.write(data)
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
|
[
"def",
"unpack_zipfile",
"(",
"filename",
",",
"extract_dir",
",",
"progress_filter",
"=",
"default_filter",
")",
":",
"if",
"not",
"zipfile",
".",
"is_zipfile",
"(",
"filename",
")",
":",
"raise",
"UnrecognizedFormat",
"(",
"\"%s is not a zip file\"",
"%",
"(",
"filename",
",",
")",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"filename",
")",
"as",
"z",
":",
"for",
"info",
"in",
"z",
".",
"infolist",
"(",
")",
":",
"name",
"=",
"info",
".",
"filename",
"# don't extract absolute paths or ones with .. in them",
"if",
"name",
".",
"startswith",
"(",
"'/'",
")",
"or",
"'..'",
"in",
"name",
".",
"split",
"(",
"'/'",
")",
":",
"continue",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"extract_dir",
",",
"*",
"name",
".",
"split",
"(",
"'/'",
")",
")",
"target",
"=",
"progress_filter",
"(",
"name",
",",
"target",
")",
"if",
"not",
"target",
":",
"continue",
"if",
"name",
".",
"endswith",
"(",
"'/'",
")",
":",
"# directory",
"ensure_directory",
"(",
"target",
")",
"else",
":",
"# file",
"ensure_directory",
"(",
"target",
")",
"data",
"=",
"z",
".",
"read",
"(",
"info",
".",
"filename",
")",
"with",
"open",
"(",
"target",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"data",
")",
"unix_attributes",
"=",
"info",
".",
"external_attr",
">>",
"16",
"if",
"unix_attributes",
":",
"os",
".",
"chmod",
"(",
"target",
",",
"unix_attributes",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/archive_util.py#L91-L125
|
||
OSGeo/gdal
|
3748fc4ba4fba727492774b2b908a2130c864a83
|
swig/python/gdal-utils/osgeo_utils/gdal2tiles.py
|
python
|
setup_output_srs
|
(input_srs: Optional[osr.SpatialReference], options: Options)
|
return output_srs
|
Setup the desired SRS (based on options)
|
Setup the desired SRS (based on options)
|
[
"Setup",
"the",
"desired",
"SRS",
"(",
"based",
"on",
"options",
")"
] |
def setup_output_srs(input_srs: Optional[osr.SpatialReference], options: Options) -> Optional[osr.SpatialReference]:
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
elif options.profile == 'raster':
output_srs = input_srs
else:
output_srs = tmsMap[options.profile].srs.Clone()
if output_srs:
output_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return output_srs
|
[
"def",
"setup_output_srs",
"(",
"input_srs",
":",
"Optional",
"[",
"osr",
".",
"SpatialReference",
"]",
",",
"options",
":",
"Options",
")",
"->",
"Optional",
"[",
"osr",
".",
"SpatialReference",
"]",
":",
"output_srs",
"=",
"osr",
".",
"SpatialReference",
"(",
")",
"if",
"options",
".",
"profile",
"==",
"'mercator'",
":",
"output_srs",
".",
"ImportFromEPSG",
"(",
"3857",
")",
"elif",
"options",
".",
"profile",
"==",
"'geodetic'",
":",
"output_srs",
".",
"ImportFromEPSG",
"(",
"4326",
")",
"elif",
"options",
".",
"profile",
"==",
"'raster'",
":",
"output_srs",
"=",
"input_srs",
"else",
":",
"output_srs",
"=",
"tmsMap",
"[",
"options",
".",
"profile",
"]",
".",
"srs",
".",
"Clone",
"(",
")",
"if",
"output_srs",
":",
"output_srs",
".",
"SetAxisMappingStrategy",
"(",
"osr",
".",
"OAMS_TRADITIONAL_GIS_ORDER",
")",
"return",
"output_srs"
] |
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/gdal-utils/osgeo_utils/gdal2tiles.py#L879-L897
|
|
lballabio/quantlib-old
|
136336947ed4fea9ecc1da6edad188700e821739
|
gensrc/gensrc/addins/calc.py
|
python
|
CalcAddin.generateFunctions
|
(self)
|
Generate source for function implementations.
|
Generate source for function implementations.
|
[
"Generate",
"source",
"for",
"function",
"implementations",
"."
] |
def generateFunctions(self):
"""Generate source for function implementations."""
for cat in self.categoryList_.categories(self.name_, self.coreCategories_, self.addinCategories_):
buf = ''
for func in cat.functions(self.name_):
buf += self.generateFunction(func)
categoryIncludes = cat.includeList(LOOP_INCLUDES)
# replaced
# buf2 = self.bufferIncludes_.text() % {
# 'categoryIncludes' : categoryIncludes,
# 'prefix' : environment.config().prefix(),
# 'libRoot' : environment.config().libRootDirectory(),
# 'buffer' : buf }
# fileName = self.rootPath_ + cat.name() + '.cpp'
# outputfile.OutputFile(self, fileName, cat.copyright(), buf2, True)
# by
self.bufferIncludes_.set({
'categoryIncludes' : categoryIncludes,
'prefix' : environment.config().prefix(),
'libRoot' : environment.config().libRootDirectory(),
'buffer' : buf })
fileName = self.rootPath_ + cat.name() + '.cpp'
outputfile.OutputFile(self, fileName, cat.copyright(), self.bufferIncludes_, True)
|
[
"def",
"generateFunctions",
"(",
"self",
")",
":",
"for",
"cat",
"in",
"self",
".",
"categoryList_",
".",
"categories",
"(",
"self",
".",
"name_",
",",
"self",
".",
"coreCategories_",
",",
"self",
".",
"addinCategories_",
")",
":",
"buf",
"=",
"''",
"for",
"func",
"in",
"cat",
".",
"functions",
"(",
"self",
".",
"name_",
")",
":",
"buf",
"+=",
"self",
".",
"generateFunction",
"(",
"func",
")",
"categoryIncludes",
"=",
"cat",
".",
"includeList",
"(",
"LOOP_INCLUDES",
")",
"# replaced",
"# buf2 = self.bufferIncludes_.text() % {",
"# 'categoryIncludes' : categoryIncludes,",
"# 'prefix' : environment.config().prefix(),",
"# 'libRoot' : environment.config().libRootDirectory(),",
"# 'buffer' : buf }",
"# fileName = self.rootPath_ + cat.name() + '.cpp'",
"# outputfile.OutputFile(self, fileName, cat.copyright(), buf2, True)",
"# by",
"self",
".",
"bufferIncludes_",
".",
"set",
"(",
"{",
"'categoryIncludes'",
":",
"categoryIncludes",
",",
"'prefix'",
":",
"environment",
".",
"config",
"(",
")",
".",
"prefix",
"(",
")",
",",
"'libRoot'",
":",
"environment",
".",
"config",
"(",
")",
".",
"libRootDirectory",
"(",
")",
",",
"'buffer'",
":",
"buf",
"}",
")",
"fileName",
"=",
"self",
".",
"rootPath_",
"+",
"cat",
".",
"name",
"(",
")",
"+",
"'.cpp'",
"outputfile",
".",
"OutputFile",
"(",
"self",
",",
"fileName",
",",
"cat",
".",
"copyright",
"(",
")",
",",
"self",
".",
"bufferIncludes_",
",",
"True",
")"
] |
https://github.com/lballabio/quantlib-old/blob/136336947ed4fea9ecc1da6edad188700e821739/gensrc/gensrc/addins/calc.py#L189-L211
|
||
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/python2_version/klampt/plan/motionplanning.py
|
python
|
CSpaceInterface.visibilityFailures
|
(self, a, b)
|
return _motionplanning.CSpaceInterface_visibilityFailures(self, a, b)
|
Returns a list of all failed visibility constraints.
Args:
a (:obj:`object`)
b (:obj:`object`)
Returns:
(:obj:`object`):
|
Returns a list of all failed visibility constraints.
|
[
"Returns",
"a",
"list",
"of",
"all",
"failed",
"visibility",
"constraints",
"."
] |
def visibilityFailures(self, a, b):
"""
Returns a list of all failed visibility constraints.
Args:
a (:obj:`object`)
b (:obj:`object`)
Returns:
(:obj:`object`):
"""
return _motionplanning.CSpaceInterface_visibilityFailures(self, a, b)
|
[
"def",
"visibilityFailures",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"return",
"_motionplanning",
".",
"CSpaceInterface_visibilityFailures",
"(",
"self",
",",
"a",
",",
"b",
")"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/plan/motionplanning.py#L484-L494
|
|
eventql/eventql
|
7ca0dbb2e683b525620ea30dc40540a22d5eb227
|
deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/xcodeproj_file.py
|
python
|
XCConfigurationList.SetBuildSetting
|
(self, key, value)
|
Sets the build setting for key to value in all child
XCBuildConfiguration objects.
|
Sets the build setting for key to value in all child
XCBuildConfiguration objects.
|
[
"Sets",
"the",
"build",
"setting",
"for",
"key",
"to",
"value",
"in",
"all",
"child",
"XCBuildConfiguration",
"objects",
"."
] |
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
|
[
"def",
"SetBuildSetting",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"for",
"configuration",
"in",
"self",
".",
"_properties",
"[",
"'buildConfigurations'",
"]",
":",
"configuration",
".",
"SetBuildSetting",
"(",
"key",
",",
"value",
")"
] |
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/media/webrtc/trunk/tools/gyp/pylib/gyp/xcodeproj_file.py#L1656-L1662
|
||
google/sling
|
f408a148a06bc2d62e853a292a8ba7266c642839
|
python/task/workflow.py
|
python
|
stop_monitor
|
()
|
Stop task monitor.
|
Stop task monitor.
|
[
"Stop",
"task",
"monitor",
"."
] |
def stop_monitor():
"""Stop task monitor."""
global active
if active:
log.info("sending final status to monitor")
api.finalize_dashboard()
|
[
"def",
"stop_monitor",
"(",
")",
":",
"global",
"active",
"if",
"active",
":",
"log",
".",
"info",
"(",
"\"sending final status to monitor\"",
")",
"api",
".",
"finalize_dashboard",
"(",
")"
] |
https://github.com/google/sling/blob/f408a148a06bc2d62e853a292a8ba7266c642839/python/task/workflow.py#L733-L738
|
||
SpenceKonde/megaTinyCore
|
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
|
megaavr/tools/libs/pyedbglib/protocols/jtagice3protocol.py
|
python
|
Jtagice3Protocol.peel_response
|
(self, response, expected=None)
|
return return_list
|
Process the response, extracting error codes and data
:param response: raw response bytes
:param expected: expected response
:return: status, data
|
Process the response, extracting error codes and data
|
[
"Process",
"the",
"response",
"extracting",
"error",
"codes",
"and",
"data"
] |
def peel_response(self, response, expected=None):
"""
Process the response, extracting error codes and data
:param response: raw response bytes
:param expected: expected response
:return: status, data
"""
return_list = False, [0xFF]
# Special handling
if expected is not None and response[0] == expected:
return_list = True, response[2:]
else:
if response[0] == self.PROTOCOL_OK:
return_list = True, []
elif response[0] == self.PROTOCOL_LIST:
return_list = True, response[2:]
elif response[0] == self.PROTOCOL_DATA:
# Trailing status is not included on some handlers
if self.supports_trailing_status and response[-1] == self.FAILURE_OK:
return_list = True, response[2:-1]
else:
return_list = False, [response[-1]]
elif response[0] == self.PROTOCOL_FAILED:
return_list = False, [response[2]]
return return_list
|
[
"def",
"peel_response",
"(",
"self",
",",
"response",
",",
"expected",
"=",
"None",
")",
":",
"return_list",
"=",
"False",
",",
"[",
"0xFF",
"]",
"# Special handling",
"if",
"expected",
"is",
"not",
"None",
"and",
"response",
"[",
"0",
"]",
"==",
"expected",
":",
"return_list",
"=",
"True",
",",
"response",
"[",
"2",
":",
"]",
"else",
":",
"if",
"response",
"[",
"0",
"]",
"==",
"self",
".",
"PROTOCOL_OK",
":",
"return_list",
"=",
"True",
",",
"[",
"]",
"elif",
"response",
"[",
"0",
"]",
"==",
"self",
".",
"PROTOCOL_LIST",
":",
"return_list",
"=",
"True",
",",
"response",
"[",
"2",
":",
"]",
"elif",
"response",
"[",
"0",
"]",
"==",
"self",
".",
"PROTOCOL_DATA",
":",
"# Trailing status is not included on some handlers",
"if",
"self",
".",
"supports_trailing_status",
"and",
"response",
"[",
"-",
"1",
"]",
"==",
"self",
".",
"FAILURE_OK",
":",
"return_list",
"=",
"True",
",",
"response",
"[",
"2",
":",
"-",
"1",
"]",
"else",
":",
"return_list",
"=",
"False",
",",
"[",
"response",
"[",
"-",
"1",
"]",
"]",
"elif",
"response",
"[",
"0",
"]",
"==",
"self",
".",
"PROTOCOL_FAILED",
":",
"return_list",
"=",
"False",
",",
"[",
"response",
"[",
"2",
"]",
"]",
"return",
"return_list"
] |
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pyedbglib/protocols/jtagice3protocol.py#L197-L223
|
|
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
scripts/Inelastic/vesuvio/fitting.py
|
python
|
parse_fit_options
|
(mass_values, profile_strs, background_str="", constraints_str="")
|
return FittingOptions(mass_profiles, background, constraints)
|
Parse the function string into a more usable format
|
Parse the function string into a more usable format
|
[
"Parse",
"the",
"function",
"string",
"into",
"a",
"more",
"usable",
"format"
] |
def parse_fit_options(mass_values, profile_strs, background_str="", constraints_str=""):
"""Parse the function string into a more usable format"""
# Individual functions are separated by semi-colon separators
mass_functions = profile_strs.rstrip(";").split(";")
if len(mass_functions) != len(mass_values):
raise ValueError("Expected the number of 'function=' definitions to equal the number of masses. "
"Found {0} masses but {1} function definition".format(len(mass_values), len(mass_functions)))
mass_profiles = []
for mass_value, prop_str in zip(mass_values, mass_functions):
mass_profiles.append(profiles.create_from_str(prop_str, mass_value))
if background_str != "":
background = backgrounds.create_from_str(background_str)
else:
background = None
if constraints_str != "":
constraint_strings = constraints_str.split(";")
constraints = []
for constr_str in constraint_strings:
constraints.append(ast.literal_eval(constr_str))
else:
constraints = None
return FittingOptions(mass_profiles, background, constraints)
|
[
"def",
"parse_fit_options",
"(",
"mass_values",
",",
"profile_strs",
",",
"background_str",
"=",
"\"\"",
",",
"constraints_str",
"=",
"\"\"",
")",
":",
"# Individual functions are separated by semi-colon separators",
"mass_functions",
"=",
"profile_strs",
".",
"rstrip",
"(",
"\";\"",
")",
".",
"split",
"(",
"\";\"",
")",
"if",
"len",
"(",
"mass_functions",
")",
"!=",
"len",
"(",
"mass_values",
")",
":",
"raise",
"ValueError",
"(",
"\"Expected the number of 'function=' definitions to equal the number of masses. \"",
"\"Found {0} masses but {1} function definition\"",
".",
"format",
"(",
"len",
"(",
"mass_values",
")",
",",
"len",
"(",
"mass_functions",
")",
")",
")",
"mass_profiles",
"=",
"[",
"]",
"for",
"mass_value",
",",
"prop_str",
"in",
"zip",
"(",
"mass_values",
",",
"mass_functions",
")",
":",
"mass_profiles",
".",
"append",
"(",
"profiles",
".",
"create_from_str",
"(",
"prop_str",
",",
"mass_value",
")",
")",
"if",
"background_str",
"!=",
"\"\"",
":",
"background",
"=",
"backgrounds",
".",
"create_from_str",
"(",
"background_str",
")",
"else",
":",
"background",
"=",
"None",
"if",
"constraints_str",
"!=",
"\"\"",
":",
"constraint_strings",
"=",
"constraints_str",
".",
"split",
"(",
"\";\"",
")",
"constraints",
"=",
"[",
"]",
"for",
"constr_str",
"in",
"constraint_strings",
":",
"constraints",
".",
"append",
"(",
"ast",
".",
"literal_eval",
"(",
"constr_str",
")",
")",
"else",
":",
"constraints",
"=",
"None",
"return",
"FittingOptions",
"(",
"mass_profiles",
",",
"background",
",",
"constraints",
")"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/Inelastic/vesuvio/fitting.py#L22-L47
|
|
priyankchheda/algorithms
|
c361aa9071573fa9966d5b02d05e524815abcf2b
|
tree/library/tree.py
|
python
|
TreeNode.get_level
|
(self)
|
return level
|
get level of node by calculating how many ancestor it has
|
get level of node by calculating how many ancestor it has
|
[
"get",
"level",
"of",
"node",
"by",
"calculating",
"how",
"many",
"ancestor",
"it",
"has"
] |
def get_level(self):
""" get level of node by calculating how many ancestor it has """
level = 0
parent = self.parent
while parent:
level += 1
parent = parent.parent
return level
|
[
"def",
"get_level",
"(",
"self",
")",
":",
"level",
"=",
"0",
"parent",
"=",
"self",
".",
"parent",
"while",
"parent",
":",
"level",
"+=",
"1",
"parent",
"=",
"parent",
".",
"parent",
"return",
"level"
] |
https://github.com/priyankchheda/algorithms/blob/c361aa9071573fa9966d5b02d05e524815abcf2b/tree/library/tree.py#L16-L24
|
|
google/certificate-transparency
|
2588562fd306a447958471b6f06c1069619c1641
|
python/ct/serialization/tls_message.py
|
python
|
TLSReader._read_repeated
|
(self, message, field, opts)
|
Read a repeated field.
|
Read a repeated field.
|
[
"Read",
"a",
"repeated",
"field",
"."
] |
def _read_repeated(self, message, field, opts):
"""Read a repeated field."""
if not opts.max_total_length:
raise TypeError("Repeated field %s has no length limit" %
field.name)
# Recursive, naive.
reader = TLSReader(self._read_var_bytes(opts.min_total_length,
opts.max_total_length))
target = getattr(message, field.name)
if field.type == field.TYPE_MESSAGE:
while not reader.finished():
new_message = target.add()
reader.read(new_message)
else:
if field.type == field.TYPE_ENUM:
opts = field.enum_type.GetOptions().Extensions[
options.tls_enum_opts]
# |reader| is another member of this class.
# pylint: disable=protected-access
read_method = reader._get_read_method(field)
while not reader.finished():
target.append(read_method(opts))
|
[
"def",
"_read_repeated",
"(",
"self",
",",
"message",
",",
"field",
",",
"opts",
")",
":",
"if",
"not",
"opts",
".",
"max_total_length",
":",
"raise",
"TypeError",
"(",
"\"Repeated field %s has no length limit\"",
"%",
"field",
".",
"name",
")",
"# Recursive, naive.",
"reader",
"=",
"TLSReader",
"(",
"self",
".",
"_read_var_bytes",
"(",
"opts",
".",
"min_total_length",
",",
"opts",
".",
"max_total_length",
")",
")",
"target",
"=",
"getattr",
"(",
"message",
",",
"field",
".",
"name",
")",
"if",
"field",
".",
"type",
"==",
"field",
".",
"TYPE_MESSAGE",
":",
"while",
"not",
"reader",
".",
"finished",
"(",
")",
":",
"new_message",
"=",
"target",
".",
"add",
"(",
")",
"reader",
".",
"read",
"(",
"new_message",
")",
"else",
":",
"if",
"field",
".",
"type",
"==",
"field",
".",
"TYPE_ENUM",
":",
"opts",
"=",
"field",
".",
"enum_type",
".",
"GetOptions",
"(",
")",
".",
"Extensions",
"[",
"options",
".",
"tls_enum_opts",
"]",
"# |reader| is another member of this class.",
"# pylint: disable=protected-access",
"read_method",
"=",
"reader",
".",
"_get_read_method",
"(",
"field",
")",
"while",
"not",
"reader",
".",
"finished",
"(",
")",
":",
"target",
".",
"append",
"(",
"read_method",
"(",
"opts",
")",
")"
] |
https://github.com/google/certificate-transparency/blob/2588562fd306a447958471b6f06c1069619c1641/python/ct/serialization/tls_message.py#L101-L124
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/ftplib.py
|
python
|
print_line
|
(line)
|
Default retrlines callback to print a line.
|
Default retrlines callback to print a line.
|
[
"Default",
"retrlines",
"callback",
"to",
"print",
"a",
"line",
"."
] |
def print_line(line):
'''Default retrlines callback to print a line.'''
print(line)
|
[
"def",
"print_line",
"(",
"line",
")",
":",
"print",
"(",
"line",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/ftplib.py#L904-L906
|
||
microsoft/TSS.MSR
|
0f2516fca2cd9929c31d5450e39301c9bde43688
|
TSS.Py/src/TpmTypes.py
|
python
|
TPM2B_CONTEXT_DATA.__init__
|
(self, buffer = None)
|
This structure is used in a TPMS_CONTEXT.
Attributes:
buffer (TPMS_CONTEXT_DATA): TBD
|
This structure is used in a TPMS_CONTEXT.
|
[
"This",
"structure",
"is",
"used",
"in",
"a",
"TPMS_CONTEXT",
"."
] |
def __init__(self, buffer = None):
""" This structure is used in a TPMS_CONTEXT.
Attributes:
buffer (TPMS_CONTEXT_DATA): TBD
"""
self.buffer = buffer
|
[
"def",
"__init__",
"(",
"self",
",",
"buffer",
"=",
"None",
")",
":",
"self",
".",
"buffer",
"=",
"buffer"
] |
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L8795-L8801
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_core.py
|
python
|
Image.SetDataBuffer
|
(*args, **kwargs)
|
return _core_.Image_SetDataBuffer(*args, **kwargs)
|
SetDataBuffer(self, buffer data)
Sets the internal image data pointer to point at a Python buffer
object. This can save making an extra copy of the data but you must
ensure that the buffer object lives longer than the wx.Image does.
|
SetDataBuffer(self, buffer data)
|
[
"SetDataBuffer",
"(",
"self",
"buffer",
"data",
")"
] |
def SetDataBuffer(*args, **kwargs):
"""
SetDataBuffer(self, buffer data)
Sets the internal image data pointer to point at a Python buffer
object. This can save making an extra copy of the data but you must
ensure that the buffer object lives longer than the wx.Image does.
"""
return _core_.Image_SetDataBuffer(*args, **kwargs)
|
[
"def",
"SetDataBuffer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Image_SetDataBuffer",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L3385-L3393
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/packaging/_compat.py
|
python
|
with_metaclass
|
(meta, *bases)
|
return type.__new__(metaclass, 'temporary_class', (), {})
|
Create a base class with a metaclass.
|
Create a base class with a metaclass.
|
[
"Create",
"a",
"base",
"class",
"with",
"a",
"metaclass",
"."
] |
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
|
[
"def",
"with_metaclass",
"(",
"meta",
",",
"*",
"bases",
")",
":",
"# This requires a bit of explanation: the basic idea is to make a dummy",
"# metaclass for one level of class instantiation that replaces itself with",
"# the actual metaclass.",
"class",
"metaclass",
"(",
"meta",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"name",
",",
"this_bases",
",",
"d",
")",
":",
"return",
"meta",
"(",
"name",
",",
"bases",
",",
"d",
")",
"return",
"type",
".",
"__new__",
"(",
"metaclass",
",",
"'temporary_class'",
",",
"(",
")",
",",
"{",
"}",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/packaging/_compat.py#L20-L30
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_windows.py
|
python
|
MDIParentFrame.Create
|
(*args, **kwargs)
|
return _windows_.MDIParentFrame_Create(*args, **kwargs)
|
Create(self, Window parent, int id=-1, String title=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=wxDEFAULT_FRAME_STYLE|wxVSCROLL|wxHSCROLL,
String name=FrameNameStr) -> bool
|
Create(self, Window parent, int id=-1, String title=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=wxDEFAULT_FRAME_STYLE|wxVSCROLL|wxHSCROLL,
String name=FrameNameStr) -> bool
|
[
"Create",
"(",
"self",
"Window",
"parent",
"int",
"id",
"=",
"-",
"1",
"String",
"title",
"=",
"EmptyString",
"Point",
"pos",
"=",
"DefaultPosition",
"Size",
"size",
"=",
"DefaultSize",
"long",
"style",
"=",
"wxDEFAULT_FRAME_STYLE|wxVSCROLL|wxHSCROLL",
"String",
"name",
"=",
"FrameNameStr",
")",
"-",
">",
"bool"
] |
def Create(*args, **kwargs):
"""
Create(self, Window parent, int id=-1, String title=EmptyString,
Point pos=DefaultPosition, Size size=DefaultSize,
long style=wxDEFAULT_FRAME_STYLE|wxVSCROLL|wxHSCROLL,
String name=FrameNameStr) -> bool
"""
return _windows_.MDIParentFrame_Create(*args, **kwargs)
|
[
"def",
"Create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"MDIParentFrame_Create",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L4025-L4032
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/numpy/py2/numpy/lib/mixins.py
|
python
|
_binary_method
|
(ufunc, name)
|
return func
|
Implement a forward binary method with a ufunc, e.g., __add__.
|
Implement a forward binary method with a ufunc, e.g., __add__.
|
[
"Implement",
"a",
"forward",
"binary",
"method",
"with",
"a",
"ufunc",
"e",
".",
"g",
".",
"__add__",
"."
] |
def _binary_method(ufunc, name):
"""Implement a forward binary method with a ufunc, e.g., __add__."""
def func(self, other):
if _disables_array_ufunc(other):
return NotImplemented
return ufunc(self, other)
func.__name__ = '__{}__'.format(name)
return func
|
[
"def",
"_binary_method",
"(",
"ufunc",
",",
"name",
")",
":",
"def",
"func",
"(",
"self",
",",
"other",
")",
":",
"if",
"_disables_array_ufunc",
"(",
"other",
")",
":",
"return",
"NotImplemented",
"return",
"ufunc",
"(",
"self",
",",
"other",
")",
"func",
".",
"__name__",
"=",
"'__{}__'",
".",
"format",
"(",
"name",
")",
"return",
"func"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/lib/mixins.py#L20-L27
|
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/html5lib/inputstream.py
|
python
|
codecName
|
(encoding)
|
Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding.
|
Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding.
|
[
"Return",
"the",
"python",
"codec",
"name",
"corresponding",
"to",
"an",
"encoding",
"or",
"None",
"if",
"the",
"string",
"doesn",
"t",
"correspond",
"to",
"a",
"valid",
"encoding",
"."
] |
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
|
[
"def",
"codecName",
"(",
"encoding",
")",
":",
"if",
"isinstance",
"(",
"encoding",
",",
"bytes",
")",
":",
"try",
":",
"encoding",
"=",
"encoding",
".",
"decode",
"(",
"\"ascii\"",
")",
"except",
"UnicodeDecodeError",
":",
"return",
"None",
"if",
"encoding",
":",
"canonicalName",
"=",
"ascii_punctuation_re",
".",
"sub",
"(",
"\"\"",
",",
"encoding",
")",
".",
"lower",
"(",
")",
"return",
"encodings",
".",
"get",
"(",
"canonicalName",
",",
"None",
")",
"else",
":",
"return",
"None"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/html5lib/inputstream.py#L869-L881
|
||
nsnam/ns-3-dev-git
|
efdb2e21f45c0a87a60b47c547b68fa140a7b686
|
src/visualizer/visualizer/plugins/olsr.py
|
python
|
ShowOlsrRoutingTable.__init__
|
(self, visualizer, node_index)
|
!
Initializer
@param self this object
@param visualizer visualizer object
@param node_index the node index
|
!
Initializer
|
[
"!",
"Initializer"
] |
def __init__(self, visualizer, node_index):
"""!
Initializer
@param self this object
@param visualizer visualizer object
@param node_index the node index
"""
InformationWindow.__init__(self)
self.win = Gtk.Dialog(parent=visualizer.window,
flags=Gtk.DialogFlags.DESTROY_WITH_PARENT|Gtk.DialogFlags.NO_SEPARATOR,
buttons=(Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE))
self.win.set_default_size(Gdk.Screen.width()/2, Gdk.Screen.height()/2)
self.win.connect("response", self._response_cb)
self.win.set_title("OLSR routing table for node %i" % node_index)
self.visualizer = visualizer
self.node_index = node_index
self.table_model = Gtk.ListStore(str, str, str, int)
treeview = Gtk.TreeView(self.table_model)
treeview.show()
sw = Gtk.ScrolledWindow()
sw.set_properties(hscrollbar_policy=Gtk.PolicyType.AUTOMATIC,
vscrollbar_policy=Gtk.PolicyType.AUTOMATIC)
sw.show()
sw.add(treeview)
self.win.vbox.add(sw)
# Dest.
column = Gtk.TreeViewColumn('Destination', Gtk.CellRendererText(),
text=self.COLUMN_DESTINATION)
treeview.append_column(column)
# Next hop
column = Gtk.TreeViewColumn('Next hop', Gtk.CellRendererText(),
text=self.COLUMN_NEXT_HOP)
treeview.append_column(column)
# Interface
column = Gtk.TreeViewColumn('Interface', Gtk.CellRendererText(),
text=self.COLUMN_INTERFACE)
treeview.append_column(column)
# Num. Hops
column = Gtk.TreeViewColumn('Num. Hops', Gtk.CellRendererText(),
text=self.COLUMN_NUM_HOPS)
treeview.append_column(column)
self.visualizer.add_information_window(self)
self.win.show()
|
[
"def",
"__init__",
"(",
"self",
",",
"visualizer",
",",
"node_index",
")",
":",
"InformationWindow",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"win",
"=",
"Gtk",
".",
"Dialog",
"(",
"parent",
"=",
"visualizer",
".",
"window",
",",
"flags",
"=",
"Gtk",
".",
"DialogFlags",
".",
"DESTROY_WITH_PARENT",
"|",
"Gtk",
".",
"DialogFlags",
".",
"NO_SEPARATOR",
",",
"buttons",
"=",
"(",
"Gtk",
".",
"STOCK_CLOSE",
",",
"Gtk",
".",
"ResponseType",
".",
"CLOSE",
")",
")",
"self",
".",
"win",
".",
"set_default_size",
"(",
"Gdk",
".",
"Screen",
".",
"width",
"(",
")",
"/",
"2",
",",
"Gdk",
".",
"Screen",
".",
"height",
"(",
")",
"/",
"2",
")",
"self",
".",
"win",
".",
"connect",
"(",
"\"response\"",
",",
"self",
".",
"_response_cb",
")",
"self",
".",
"win",
".",
"set_title",
"(",
"\"OLSR routing table for node %i\"",
"%",
"node_index",
")",
"self",
".",
"visualizer",
"=",
"visualizer",
"self",
".",
"node_index",
"=",
"node_index",
"self",
".",
"table_model",
"=",
"Gtk",
".",
"ListStore",
"(",
"str",
",",
"str",
",",
"str",
",",
"int",
")",
"treeview",
"=",
"Gtk",
".",
"TreeView",
"(",
"self",
".",
"table_model",
")",
"treeview",
".",
"show",
"(",
")",
"sw",
"=",
"Gtk",
".",
"ScrolledWindow",
"(",
")",
"sw",
".",
"set_properties",
"(",
"hscrollbar_policy",
"=",
"Gtk",
".",
"PolicyType",
".",
"AUTOMATIC",
",",
"vscrollbar_policy",
"=",
"Gtk",
".",
"PolicyType",
".",
"AUTOMATIC",
")",
"sw",
".",
"show",
"(",
")",
"sw",
".",
"add",
"(",
"treeview",
")",
"self",
".",
"win",
".",
"vbox",
".",
"add",
"(",
"sw",
")",
"# Dest.",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Destination'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_DESTINATION",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Next hop",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Next hop'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_NEXT_HOP",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Interface",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Interface'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_INTERFACE",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"# Num. Hops",
"column",
"=",
"Gtk",
".",
"TreeViewColumn",
"(",
"'Num. Hops'",
",",
"Gtk",
".",
"CellRendererText",
"(",
")",
",",
"text",
"=",
"self",
".",
"COLUMN_NUM_HOPS",
")",
"treeview",
".",
"append_column",
"(",
"column",
")",
"self",
".",
"visualizer",
".",
"add_information_window",
"(",
"self",
")",
"self",
".",
"win",
".",
"show",
"(",
")"
] |
https://github.com/nsnam/ns-3-dev-git/blob/efdb2e21f45c0a87a60b47c547b68fa140a7b686/src/visualizer/visualizer/plugins/olsr.py#L29-L78
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/frame.py
|
python
|
DataFrame.assign
|
(self, **kwargs)
|
return data
|
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
.. versionchanged:: 0.23.0
Keyword argument order is maintained.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
|
r"""
Assign new columns to a DataFrame.
|
[
"r",
"Assign",
"new",
"columns",
"to",
"a",
"DataFrame",
"."
] |
def assign(self, **kwargs) -> "DataFrame":
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
.. versionchanged:: 0.23.0
Keyword argument order is maintained.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
|
[
"def",
"assign",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"\"DataFrame\"",
":",
"data",
"=",
"self",
".",
"copy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"data",
"[",
"k",
"]",
"=",
"com",
".",
"apply_if_callable",
"(",
"v",
",",
"data",
")",
"return",
"data"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/frame.py#L3498-L3568
|
|
eclipse/sumo
|
7132a9b8b6eea734bdec38479026b4d8c4336d03
|
tools/traci/_edge.py
|
python
|
EdgeDomain.getPendingVehicles
|
(self, edgeID)
|
return self._getUniversal(tc.VAR_PENDING_VEHICLES, edgeID)
|
getPendingVehicles(string) -> list(string)
Returns a list of all vehicle ids waiting for insertion on this edge (with depart delay)
|
getPendingVehicles(string) -> list(string)
Returns a list of all vehicle ids waiting for insertion on this edge (with depart delay)
|
[
"getPendingVehicles",
"(",
"string",
")",
"-",
">",
"list",
"(",
"string",
")",
"Returns",
"a",
"list",
"of",
"all",
"vehicle",
"ids",
"waiting",
"for",
"insertion",
"on",
"this",
"edge",
"(",
"with",
"depart",
"delay",
")"
] |
def getPendingVehicles(self, edgeID):
"""getPendingVehicles(string) -> list(string)
Returns a list of all vehicle ids waiting for insertion on this edge (with depart delay)
"""
return self._getUniversal(tc.VAR_PENDING_VEHICLES, edgeID)
|
[
"def",
"getPendingVehicles",
"(",
"self",
",",
"edgeID",
")",
":",
"return",
"self",
".",
"_getUniversal",
"(",
"tc",
".",
"VAR_PENDING_VEHICLES",
",",
"edgeID",
")"
] |
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_edge.py#L183-L187
|
|
mysql/mysql-workbench
|
2f35f9034f015cbcd22139a60e1baa2e3e8e795c
|
modules/db.sqlite/db_sqlite_migration_grt.py
|
python
|
SQLiteMigration.migrateUpdateForChanges
|
(self, state, target_catalog)
|
return target_catalog
|
Create datatype cast expression for target column based on source datatype.
|
Create datatype cast expression for target column based on source datatype.
|
[
"Create",
"datatype",
"cast",
"expression",
"for",
"target",
"column",
"based",
"on",
"source",
"datatype",
"."
] |
def migrateUpdateForChanges(self, state, target_catalog):
"""
Create datatype cast expression for target column based on source datatype.
"""
return target_catalog
|
[
"def",
"migrateUpdateForChanges",
"(",
"self",
",",
"state",
",",
"target_catalog",
")",
":",
"return",
"target_catalog"
] |
https://github.com/mysql/mysql-workbench/blob/2f35f9034f015cbcd22139a60e1baa2e3e8e795c/modules/db.sqlite/db_sqlite_migration_grt.py#L206-L210
|
|
adobe/chromium
|
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
|
third_party/protobuf/python/google/protobuf/internal/wire_format.py
|
python
|
UnpackTag
|
(tag)
|
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
|
The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
|
The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
|
[
"The",
"inverse",
"of",
"PackTag",
"()",
".",
"Given",
"an",
"unsigned",
"32",
"-",
"bit",
"number",
"returns",
"a",
"(",
"field_number",
"wire_type",
")",
"tuple",
"."
] |
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
|
[
"def",
"UnpackTag",
"(",
"tag",
")",
":",
"return",
"(",
"tag",
">>",
"TAG_TYPE_BITS",
")",
",",
"(",
"tag",
"&",
"TAG_TYPE_MASK",
")"
] |
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/protobuf/python/google/protobuf/internal/wire_format.py#L93-L97
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/propgrid.py
|
python
|
PGProperty.AddPrivateChild
|
(*args, **kwargs)
|
return _propgrid.PGProperty_AddPrivateChild(*args, **kwargs)
|
AddPrivateChild(self, PGProperty prop)
|
AddPrivateChild(self, PGProperty prop)
|
[
"AddPrivateChild",
"(",
"self",
"PGProperty",
"prop",
")"
] |
def AddPrivateChild(*args, **kwargs):
"""AddPrivateChild(self, PGProperty prop)"""
return _propgrid.PGProperty_AddPrivateChild(*args, **kwargs)
|
[
"def",
"AddPrivateChild",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PGProperty_AddPrivateChild",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/propgrid.py#L799-L801
|
|
PX4/PX4-Autopilot
|
0b9f60a0370be53d683352c63fd92db3d6586e18
|
platforms/nuttx/NuttX/tools/kconfiglib.py
|
python
|
Kconfig.write_min_config
|
(self, filename,
header="# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n")
|
Writes out a "minimal" configuration file, omitting symbols whose value
matches their default value. The format matches the one produced by
'make savedefconfig'.
The resulting configuration file is incomplete, but a complete
configuration can be derived from it by loading it. Minimal
configuration files can serve as a more manageable configuration format
compared to a "full" .config file, especially when configurations files
are merged or edited by hand.
filename:
Self-explanatory.
header (default: "# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
Text that will be inserted verbatim at the beginning of the file. You
would usually want each line to start with '#' to make it a comment,
and include a final terminating newline.
|
Writes out a "minimal" configuration file, omitting symbols whose value
matches their default value. The format matches the one produced by
'make savedefconfig'.
|
[
"Writes",
"out",
"a",
"minimal",
"configuration",
"file",
"omitting",
"symbols",
"whose",
"value",
"matches",
"their",
"default",
"value",
".",
"The",
"format",
"matches",
"the",
"one",
"produced",
"by",
"make",
"savedefconfig",
"."
] |
def write_min_config(self, filename,
header="# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
"""
Writes out a "minimal" configuration file, omitting symbols whose value
matches their default value. The format matches the one produced by
'make savedefconfig'.
The resulting configuration file is incomplete, but a complete
configuration can be derived from it by loading it. Minimal
configuration files can serve as a more manageable configuration format
compared to a "full" .config file, especially when configurations files
are merged or edited by hand.
filename:
Self-explanatory.
header (default: "# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\n"):
Text that will be inserted verbatim at the beginning of the file. You
would usually want each line to start with '#' to make it a comment,
and include a final terminating newline.
"""
with self._open(filename, "w") as f:
f.write(header)
for sym in self.unique_defined_syms:
# Skip symbols that cannot be changed. Only check
# non-choice symbols, as selects don't affect choice
# symbols.
if not sym.choice and \
sym.visibility <= expr_value(sym.rev_dep):
continue
# Skip symbols whose value matches their default
if sym.str_value == sym._str_default():
continue
# Skip symbols that would be selected by default in a
# choice, unless the choice is optional or the symbol type
# isn't bool (it might be possible to set the choice mode
# to n or the symbol to m in those cases).
if sym.choice and \
not sym.choice.is_optional and \
sym.choice._get_selection_from_defaults() is sym and \
sym.orig_type is BOOL and \
sym.tri_value == 2:
continue
f.write(sym.config_string)
|
[
"def",
"write_min_config",
"(",
"self",
",",
"filename",
",",
"header",
"=",
"\"# Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)\\n\"",
")",
":",
"with",
"self",
".",
"_open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
")",
"for",
"sym",
"in",
"self",
".",
"unique_defined_syms",
":",
"# Skip symbols that cannot be changed. Only check",
"# non-choice symbols, as selects don't affect choice",
"# symbols.",
"if",
"not",
"sym",
".",
"choice",
"and",
"sym",
".",
"visibility",
"<=",
"expr_value",
"(",
"sym",
".",
"rev_dep",
")",
":",
"continue",
"# Skip symbols whose value matches their default",
"if",
"sym",
".",
"str_value",
"==",
"sym",
".",
"_str_default",
"(",
")",
":",
"continue",
"# Skip symbols that would be selected by default in a",
"# choice, unless the choice is optional or the symbol type",
"# isn't bool (it might be possible to set the choice mode",
"# to n or the symbol to m in those cases).",
"if",
"sym",
".",
"choice",
"and",
"not",
"sym",
".",
"choice",
".",
"is_optional",
"and",
"sym",
".",
"choice",
".",
"_get_selection_from_defaults",
"(",
")",
"is",
"sym",
"and",
"sym",
".",
"orig_type",
"is",
"BOOL",
"and",
"sym",
".",
"tri_value",
"==",
"2",
":",
"continue",
"f",
".",
"write",
"(",
"sym",
".",
"config_string",
")"
] |
https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/platforms/nuttx/NuttX/tools/kconfiglib.py#L1396-L1443
|
||
tensorflow/tensorflow
|
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
|
tensorflow/python/ops/math_grad.py
|
python
|
_AcosGrad
|
(op, grad)
|
Returns grad * -1/sqrt(1-x^2).
|
Returns grad * -1/sqrt(1-x^2).
|
[
"Returns",
"grad",
"*",
"-",
"1",
"/",
"sqrt",
"(",
"1",
"-",
"x^2",
")",
"."
] |
def _AcosGrad(op, grad):
"""Returns grad * -1/sqrt(1-x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
den = math_ops.sqrt(math_ops.subtract(one, x2))
inv = math_ops.reciprocal(den)
return -grad * inv
|
[
"def",
"_AcosGrad",
"(",
"op",
",",
"grad",
")",
":",
"x",
"=",
"op",
".",
"inputs",
"[",
"0",
"]",
"with",
"ops",
".",
"control_dependencies",
"(",
"[",
"grad",
"]",
")",
":",
"x",
"=",
"math_ops",
".",
"conj",
"(",
"x",
")",
"x2",
"=",
"math_ops",
".",
"square",
"(",
"x",
")",
"one",
"=",
"constant_op",
".",
"constant",
"(",
"1",
",",
"dtype",
"=",
"grad",
".",
"dtype",
")",
"den",
"=",
"math_ops",
".",
"sqrt",
"(",
"math_ops",
".",
"subtract",
"(",
"one",
",",
"x2",
")",
")",
"inv",
"=",
"math_ops",
".",
"reciprocal",
"(",
"den",
")",
"return",
"-",
"grad",
"*",
"inv"
] |
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/math_grad.py#L1232-L1241
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_misc.py
|
python
|
FileDropTarget.OnData
|
(*args, **kwargs)
|
return _misc_.FileDropTarget_OnData(*args, **kwargs)
|
OnData(self, int x, int y, int def) -> int
|
OnData(self, int x, int y, int def) -> int
|
[
"OnData",
"(",
"self",
"int",
"x",
"int",
"y",
"int",
"def",
")",
"-",
">",
"int"
] |
def OnData(*args, **kwargs):
"""OnData(self, int x, int y, int def) -> int"""
return _misc_.FileDropTarget_OnData(*args, **kwargs)
|
[
"def",
"OnData",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"FileDropTarget_OnData",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L5730-L5732
|
|
hughperkins/tf-coriander
|
970d3df6c11400ad68405f22b0c42a52374e94ca
|
tensorflow/contrib/quantization/tools/quantize_graph.py
|
python
|
GraphRewriter.round_nodes_recursively
|
(self, current_node)
|
The entry point for simple rounding quantization.
|
The entry point for simple rounding quantization.
|
[
"The",
"entry",
"point",
"for",
"simple",
"rounding",
"quantization",
"."
] |
def round_nodes_recursively(self, current_node):
"""The entry point for simple rounding quantization."""
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
nodes_to_quantize = ["Conv2D", "BiasAdd", "MatMul"]
if any(current_node.op in s for s in nodes_to_quantize):
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = current_node.name + "_original"
self.add_output_graph_node(new_node)
levels = 1 << FLAGS.bitdepth
constant_name = current_node.name + "_round_depth"
constant_tensor = tf.constant(levels, dtype=tf.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = tf.NodeDef()
quantize_node.op = "RoundToSteps"
quantize_node.name = current_node.name
quantize_node.input.extend([current_node.name + "_original"])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = tf.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
|
[
"def",
"round_nodes_recursively",
"(",
"self",
",",
"current_node",
")",
":",
"if",
"self",
".",
"already_visited",
"[",
"current_node",
".",
"name",
"]",
":",
"return",
"self",
".",
"already_visited",
"[",
"current_node",
".",
"name",
"]",
"=",
"True",
"for",
"input_node_name",
"in",
"current_node",
".",
"input",
":",
"input_node_name",
"=",
"node_name_from_input",
"(",
"input_node_name",
")",
"input_node",
"=",
"self",
".",
"nodes_map",
"[",
"input_node_name",
"]",
"self",
".",
"round_nodes_recursively",
"(",
"input_node",
")",
"nodes_to_quantize",
"=",
"[",
"\"Conv2D\"",
",",
"\"BiasAdd\"",
",",
"\"MatMul\"",
"]",
"if",
"any",
"(",
"current_node",
".",
"op",
"in",
"s",
"for",
"s",
"in",
"nodes_to_quantize",
")",
":",
"new_node",
"=",
"tf",
".",
"NodeDef",
"(",
")",
"new_node",
".",
"CopyFrom",
"(",
"current_node",
")",
"new_node",
".",
"name",
"=",
"current_node",
".",
"name",
"+",
"\"_original\"",
"self",
".",
"add_output_graph_node",
"(",
"new_node",
")",
"levels",
"=",
"1",
"<<",
"FLAGS",
".",
"bitdepth",
"constant_name",
"=",
"current_node",
".",
"name",
"+",
"\"_round_depth\"",
"constant_tensor",
"=",
"tf",
".",
"constant",
"(",
"levels",
",",
"dtype",
"=",
"tf",
".",
"int32",
",",
"name",
"=",
"constant_name",
")",
"constant_node",
"=",
"constant_tensor",
".",
"op",
".",
"node_def",
"self",
".",
"add_output_graph_node",
"(",
"constant_node",
")",
"quantize_node",
"=",
"tf",
".",
"NodeDef",
"(",
")",
"quantize_node",
".",
"op",
"=",
"\"RoundToSteps\"",
"quantize_node",
".",
"name",
"=",
"current_node",
".",
"name",
"quantize_node",
".",
"input",
".",
"extend",
"(",
"[",
"current_node",
".",
"name",
"+",
"\"_original\"",
"]",
")",
"quantize_node",
".",
"input",
".",
"extend",
"(",
"[",
"constant_node",
".",
"name",
"]",
")",
"self",
".",
"add_output_graph_node",
"(",
"quantize_node",
")",
"else",
":",
"new_node",
"=",
"tf",
".",
"NodeDef",
"(",
")",
"new_node",
".",
"CopyFrom",
"(",
"current_node",
")",
"self",
".",
"add_output_graph_node",
"(",
"new_node",
")"
] |
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/quantization/tools/quantize_graph.py#L353-L382
|
||
eclipse/sumo
|
7132a9b8b6eea734bdec38479026b4d8c4336d03
|
tools/contributed/sumopy/agilepy/lib_wx/toolbox.py
|
python
|
ToolPalett.refresh
|
(self)
|
Reorganizes toolpallet after adding/removing tools.
Attention is not automatically called.
|
Reorganizes toolpallet after adding/removing tools.
Attention is not automatically called.
|
[
"Reorganizes",
"toolpallet",
"after",
"adding",
"/",
"removing",
"tools",
".",
"Attention",
"is",
"not",
"automatically",
"called",
"."
] |
def refresh(self):
"""
Reorganizes toolpallet after adding/removing tools.
Attention is not automatically called.
"""
self.sizer.Layout()
|
[
"def",
"refresh",
"(",
"self",
")",
":",
"self",
".",
"sizer",
".",
"Layout",
"(",
")"
] |
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/agilepy/lib_wx/toolbox.py#L310-L315
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/ed_toolbar.py
|
python
|
EdToolBar.__init__
|
(self, parent)
|
Initializes the toolbar
@param parent: parent window of this toolbar
|
Initializes the toolbar
@param parent: parent window of this toolbar
|
[
"Initializes",
"the",
"toolbar",
"@param",
"parent",
":",
"parent",
"window",
"of",
"this",
"toolbar"
] |
def __init__(self, parent):
"""Initializes the toolbar
@param parent: parent window of this toolbar
"""
sstyle = wx.TB_HORIZONTAL | wx.NO_BORDER
if wx.Platform == '__WXGTK__':
sstyle = sstyle | wx.TB_DOCKABLE
super(EdToolBar, self).__init__(parent, style=sstyle)
# Attributes
self._theme = Profile_Get('ICONS')
self.SetToolBitmapSize(Profile_Get('ICON_SZ', 'size_tuple'))
self._PopulateTools()
# Event Handlers
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy, self)
# Message Handlers
ed_msg.Subscribe(self.OnThemeChange, ed_msg.EDMSG_THEME_CHANGED)
|
[
"def",
"__init__",
"(",
"self",
",",
"parent",
")",
":",
"sstyle",
"=",
"wx",
".",
"TB_HORIZONTAL",
"|",
"wx",
".",
"NO_BORDER",
"if",
"wx",
".",
"Platform",
"==",
"'__WXGTK__'",
":",
"sstyle",
"=",
"sstyle",
"|",
"wx",
".",
"TB_DOCKABLE",
"super",
"(",
"EdToolBar",
",",
"self",
")",
".",
"__init__",
"(",
"parent",
",",
"style",
"=",
"sstyle",
")",
"# Attributes",
"self",
".",
"_theme",
"=",
"Profile_Get",
"(",
"'ICONS'",
")",
"self",
".",
"SetToolBitmapSize",
"(",
"Profile_Get",
"(",
"'ICON_SZ'",
",",
"'size_tuple'",
")",
")",
"self",
".",
"_PopulateTools",
"(",
")",
"# Event Handlers",
"self",
".",
"Bind",
"(",
"wx",
".",
"EVT_WINDOW_DESTROY",
",",
"self",
".",
"OnDestroy",
",",
"self",
")",
"# Message Handlers",
"ed_msg",
".",
"Subscribe",
"(",
"self",
".",
"OnThemeChange",
",",
"ed_msg",
".",
"EDMSG_THEME_CHANGED",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_toolbar.py#L42-L61
|
||
fabianschenk/RESLAM
|
2e71a578b6d1a1ad1fb018641218e1f41dd9e330
|
thirdparty/Sophus/py/sophus/complex.py
|
python
|
Complex.Da_a_mul_b
|
(a, b)
|
return sympy.Matrix([[b.real, -b.imag],
[b.imag, b.real]])
|
derivatice of complex muliplication wrt left multiplier a
|
derivatice of complex muliplication wrt left multiplier a
|
[
"derivatice",
"of",
"complex",
"muliplication",
"wrt",
"left",
"multiplier",
"a"
] |
def Da_a_mul_b(a, b):
""" derivatice of complex muliplication wrt left multiplier a """
return sympy.Matrix([[b.real, -b.imag],
[b.imag, b.real]])
|
[
"def",
"Da_a_mul_b",
"(",
"a",
",",
"b",
")",
":",
"return",
"sympy",
".",
"Matrix",
"(",
"[",
"[",
"b",
".",
"real",
",",
"-",
"b",
".",
"imag",
"]",
",",
"[",
"b",
".",
"imag",
",",
"b",
".",
"real",
"]",
"]",
")"
] |
https://github.com/fabianschenk/RESLAM/blob/2e71a578b6d1a1ad1fb018641218e1f41dd9e330/thirdparty/Sophus/py/sophus/complex.py#L72-L75
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mimetypes.py
|
python
|
MimeTypes.guess_all_extensions
|
(self, type, strict=True)
|
return extensions
|
Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
|
Guess the extensions for a file based on its MIME type.
|
[
"Guess",
"the",
"extensions",
"for",
"a",
"file",
"based",
"on",
"its",
"MIME",
"type",
"."
] |
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
|
[
"def",
"guess_all_extensions",
"(",
"self",
",",
"type",
",",
"strict",
"=",
"True",
")",
":",
"type",
"=",
"type",
".",
"lower",
"(",
")",
"extensions",
"=",
"self",
".",
"types_map_inv",
"[",
"True",
"]",
".",
"get",
"(",
"type",
",",
"[",
"]",
")",
"if",
"not",
"strict",
":",
"for",
"ext",
"in",
"self",
".",
"types_map_inv",
"[",
"False",
"]",
".",
"get",
"(",
"type",
",",
"[",
"]",
")",
":",
"if",
"ext",
"not",
"in",
"extensions",
":",
"extensions",
".",
"append",
"(",
"ext",
")",
"return",
"extensions"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mimetypes.py#L157-L174
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_gdi.py
|
python
|
DC.DrawEllipticArcPointSize
|
(*args, **kwargs)
|
return _gdi_.DC_DrawEllipticArcPointSize(*args, **kwargs)
|
DrawEllipticArcPointSize(self, Point pt, Size sz, double start, double end)
Draws an arc of an ellipse, with the given rectangle defining the
bounds of the ellipse. The current pen is used for drawing the arc and
the current brush is used for drawing the pie.
The *start* and *end* parameters specify the start and end of the arc
relative to the three-o'clock position from the center of the
rectangle. Angles are specified in degrees (360 is a complete
circle). Positive values mean counter-clockwise motion. If start is
equal to end, a complete ellipse will be drawn.
|
DrawEllipticArcPointSize(self, Point pt, Size sz, double start, double end)
|
[
"DrawEllipticArcPointSize",
"(",
"self",
"Point",
"pt",
"Size",
"sz",
"double",
"start",
"double",
"end",
")"
] |
def DrawEllipticArcPointSize(*args, **kwargs):
"""
DrawEllipticArcPointSize(self, Point pt, Size sz, double start, double end)
Draws an arc of an ellipse, with the given rectangle defining the
bounds of the ellipse. The current pen is used for drawing the arc and
the current brush is used for drawing the pie.
The *start* and *end* parameters specify the start and end of the arc
relative to the three-o'clock position from the center of the
rectangle. Angles are specified in degrees (360 is a complete
circle). Positive values mean counter-clockwise motion. If start is
equal to end, a complete ellipse will be drawn.
"""
return _gdi_.DC_DrawEllipticArcPointSize(*args, **kwargs)
|
[
"def",
"DrawEllipticArcPointSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"DC_DrawEllipticArcPointSize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_gdi.py#L3514-L3528
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py2/scipy/ndimage/morphology.py
|
python
|
binary_dilation
|
(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0,
brute_force=False)
|
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 1, brute_force)
|
Multi-dimensional binary dilation with the given structuring element.
Parameters
----------
input : array_like
Binary array_like to be dilated. Non-zero (True) elements form
the subset to be dilated.
structure : array_like, optional
Structuring element used for the dilation. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The dilation is repeated `iterations` times (one, by default).
If iterations is less than 1, the dilation is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
brute_force : boolean, optional
Memory condition: if False, only the pixels whose value was changed in
the last iteration are tracked as candidates to be updated (dilated)
in the current iteration; if True all pixels are considered as
candidates for dilation, regardless of what happened in the previous
iteration. False by default.
Returns
-------
binary_dilation : ndarray of bools
Dilation of the input by the structuring element.
See also
--------
grey_dilation, binary_erosion, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for expanding the shapes in an image. The binary
dilation of an image by a structuring element is the locus of the points
covered by the structuring element, when its center lies within the
non-zero points of the image.
References
----------
.. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a)
array([[False, False, False, False, False],
[False, False, True, False, False],
[False, True, True, True, False],
[False, False, True, False, False],
[False, False, False, False, False]], dtype=bool)
>>> ndimage.binary_dilation(a).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # 3x3 structuring element with connectivity 1, used by default
>>> struct1 = ndimage.generate_binary_structure(2, 1)
>>> struct1
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # 3x3 structuring element with connectivity 2
>>> struct2 = ndimage.generate_binary_structure(2, 2)
>>> struct2
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct1,\\
... iterations=2).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
|
Multi-dimensional binary dilation with the given structuring element.
|
[
"Multi",
"-",
"dimensional",
"binary",
"dilation",
"with",
"the",
"given",
"structuring",
"element",
"."
] |
def binary_dilation(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0,
brute_force=False):
"""
Multi-dimensional binary dilation with the given structuring element.
Parameters
----------
input : array_like
Binary array_like to be dilated. Non-zero (True) elements form
the subset to be dilated.
structure : array_like, optional
Structuring element used for the dilation. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The dilation is repeated `iterations` times (one, by default).
If iterations is less than 1, the dilation is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
brute_force : boolean, optional
Memory condition: if False, only the pixels whose value was changed in
the last iteration are tracked as candidates to be updated (dilated)
in the current iteration; if True all pixels are considered as
candidates for dilation, regardless of what happened in the previous
iteration. False by default.
Returns
-------
binary_dilation : ndarray of bools
Dilation of the input by the structuring element.
See also
--------
grey_dilation, binary_erosion, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for expanding the shapes in an image. The binary
dilation of an image by a structuring element is the locus of the points
covered by the structuring element, when its center lies within the
non-zero points of the image.
References
----------
.. [1] https://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] https://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a)
array([[False, False, False, False, False],
[False, False, True, False, False],
[False, True, True, True, False],
[False, False, True, False, False],
[False, False, False, False, False]], dtype=bool)
>>> ndimage.binary_dilation(a).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # 3x3 structuring element with connectivity 1, used by default
>>> struct1 = ndimage.generate_binary_structure(2, 1)
>>> struct1
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # 3x3 structuring element with connectivity 2
>>> struct2 = ndimage.generate_binary_structure(2, 2)
>>> struct2
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct1,\\
... iterations=2).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
"""
input = numpy.asarray(input)
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
origin = _ni_support._normalize_sequence(origin, input.ndim)
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 1, brute_force)
|
[
"def",
"binary_dilation",
"(",
"input",
",",
"structure",
"=",
"None",
",",
"iterations",
"=",
"1",
",",
"mask",
"=",
"None",
",",
"output",
"=",
"None",
",",
"border_value",
"=",
"0",
",",
"origin",
"=",
"0",
",",
"brute_force",
"=",
"False",
")",
":",
"input",
"=",
"numpy",
".",
"asarray",
"(",
"input",
")",
"if",
"structure",
"is",
"None",
":",
"structure",
"=",
"generate_binary_structure",
"(",
"input",
".",
"ndim",
",",
"1",
")",
"origin",
"=",
"_ni_support",
".",
"_normalize_sequence",
"(",
"origin",
",",
"input",
".",
"ndim",
")",
"structure",
"=",
"numpy",
".",
"asarray",
"(",
"structure",
")",
"structure",
"=",
"structure",
"[",
"tuple",
"(",
"[",
"slice",
"(",
"None",
",",
"None",
",",
"-",
"1",
")",
"]",
"*",
"structure",
".",
"ndim",
")",
"]",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"origin",
")",
")",
":",
"origin",
"[",
"ii",
"]",
"=",
"-",
"origin",
"[",
"ii",
"]",
"if",
"not",
"structure",
".",
"shape",
"[",
"ii",
"]",
"&",
"1",
":",
"origin",
"[",
"ii",
"]",
"-=",
"1",
"return",
"_binary_erosion",
"(",
"input",
",",
"structure",
",",
"iterations",
",",
"mask",
",",
"output",
",",
"border_value",
",",
"origin",
",",
"1",
",",
"brute_force",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/ndimage/morphology.py#L379-L507
|
|
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
deps/src/libxml2-2.9.1/python/libxml2.py
|
python
|
xmlNode.xpathCastNodeToNumber
|
(self)
|
return ret
|
Converts a node to its number value
|
Converts a node to its number value
|
[
"Converts",
"a",
"node",
"to",
"its",
"number",
"value"
] |
def xpathCastNodeToNumber(self):
"""Converts a node to its number value """
ret = libxml2mod.xmlXPathCastNodeToNumber(self._o)
return ret
|
[
"def",
"xpathCastNodeToNumber",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlXPathCastNodeToNumber",
"(",
"self",
".",
"_o",
")",
"return",
"ret"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L3704-L3707
|
|
openthread/openthread
|
9fcdbed9c526c70f1556d1ed84099c1535c7cd32
|
tools/harness-thci/OpenThread.py
|
python
|
OpenThreadTHCI.getVersionNumber
|
(self)
|
return self.__executeCommand('version')[0]
|
get OpenThread stack firmware version number
|
get OpenThread stack firmware version number
|
[
"get",
"OpenThread",
"stack",
"firmware",
"version",
"number"
] |
def getVersionNumber(self):
"""get OpenThread stack firmware version number"""
return self.__executeCommand('version')[0]
|
[
"def",
"getVersionNumber",
"(",
"self",
")",
":",
"return",
"self",
".",
"__executeCommand",
"(",
"'version'",
")",
"[",
"0",
"]"
] |
https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/harness-thci/OpenThread.py#L368-L370
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/gsutil/gslib/__main__.py
|
python
|
_HandleSigQuit
|
(signal_num, cur_stack_frame)
|
Called when user hits ^\\, so we can force breakpoint a running gsutil.
|
Called when user hits ^\\, so we can force breakpoint a running gsutil.
|
[
"Called",
"when",
"user",
"hits",
"^",
"\\\\",
"so",
"we",
"can",
"force",
"breakpoint",
"a",
"running",
"gsutil",
"."
] |
def _HandleSigQuit(signal_num, cur_stack_frame):
"""Called when user hits ^\\, so we can force breakpoint a running gsutil."""
import pdb # pylint: disable=g-import-not-at-top
pdb.set_trace()
|
[
"def",
"_HandleSigQuit",
"(",
"signal_num",
",",
"cur_stack_frame",
")",
":",
"import",
"pdb",
"# pylint: disable=g-import-not-at-top",
"pdb",
".",
"set_trace",
"(",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/gslib/__main__.py#L440-L443
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/io/formats/style.py
|
python
|
Styler.highlight_max
|
(self, subset=None, color="yellow", axis=0)
|
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
|
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
A valid slice for ``data`` to limit the style application to.
color : str, default 'yellow'
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
Returns
-------
self : Styler
|
Highlight the maximum by shading the background.
|
[
"Highlight",
"the",
"maximum",
"by",
"shading",
"the",
"background",
"."
] |
def highlight_max(self, subset=None, color="yellow", axis=0):
"""
Highlight the maximum by shading the background.
Parameters
----------
subset : IndexSlice, default None
A valid slice for ``data`` to limit the style application to.
color : str, default 'yellow'
axis : {0 or 'index', 1 or 'columns', None}, default 0
Apply to each column (``axis=0`` or ``'index'``), to each row
(``axis=1`` or ``'columns'``), or to the entire DataFrame at once
with ``axis=None``.
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
|
[
"def",
"highlight_max",
"(",
"self",
",",
"subset",
"=",
"None",
",",
"color",
"=",
"\"yellow\"",
",",
"axis",
"=",
"0",
")",
":",
"return",
"self",
".",
"_highlight_handler",
"(",
"subset",
"=",
"subset",
",",
"color",
"=",
"color",
",",
"axis",
"=",
"axis",
",",
"max_",
"=",
"True",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/io/formats/style.py#L1296-L1314
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/codegen.py
|
python
|
CodeLibrary.serialize_using_bitcode
|
(self)
|
return (self._name, 'bitcode', self._final_module.as_bitcode())
|
Serialize this library using its bitcode as the cached representation.
|
Serialize this library using its bitcode as the cached representation.
|
[
"Serialize",
"this",
"library",
"using",
"its",
"bitcode",
"as",
"the",
"cached",
"representation",
"."
] |
def serialize_using_bitcode(self):
"""
Serialize this library using its bitcode as the cached representation.
"""
self._ensure_finalized()
return (self._name, 'bitcode', self._final_module.as_bitcode())
|
[
"def",
"serialize_using_bitcode",
"(",
"self",
")",
":",
"self",
".",
"_ensure_finalized",
"(",
")",
"return",
"(",
"self",
".",
"_name",
",",
"'bitcode'",
",",
"self",
".",
"_final_module",
".",
"as_bitcode",
"(",
")",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/codegen.py#L407-L412
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/grid.py
|
python
|
Grid.SetColFormatCustom
|
(*args, **kwargs)
|
return _grid.Grid_SetColFormatCustom(*args, **kwargs)
|
SetColFormatCustom(self, int col, String typeName)
|
SetColFormatCustom(self, int col, String typeName)
|
[
"SetColFormatCustom",
"(",
"self",
"int",
"col",
"String",
"typeName",
")"
] |
def SetColFormatCustom(*args, **kwargs):
"""SetColFormatCustom(self, int col, String typeName)"""
return _grid.Grid_SetColFormatCustom(*args, **kwargs)
|
[
"def",
"SetColFormatCustom",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"Grid_SetColFormatCustom",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L1742-L1744
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_core.py
|
python
|
ImageHandler.GetAltExtensions
|
(*args, **kwargs)
|
return _core_.ImageHandler_GetAltExtensions(*args, **kwargs)
|
GetAltExtensions(self) -> wxArrayString
|
GetAltExtensions(self) -> wxArrayString
|
[
"GetAltExtensions",
"(",
"self",
")",
"-",
">",
"wxArrayString"
] |
def GetAltExtensions(*args, **kwargs):
"""GetAltExtensions(self) -> wxArrayString"""
return _core_.ImageHandler_GetAltExtensions(*args, **kwargs)
|
[
"def",
"GetAltExtensions",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ImageHandler_GetAltExtensions",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L2628-L2630
|
|
swift/swift
|
12d031cf8177fdec0137f9aa7e2912fa23c4416b
|
3rdParty/SCons/scons-3.0.1/engine/SCons/Environment.py
|
python
|
MethodWrapper.clone
|
(self, new_object)
|
return self.__class__(new_object, self.method, self.name)
|
Returns an object that re-binds the underlying "method" to
the specified new object.
|
Returns an object that re-binds the underlying "method" to
the specified new object.
|
[
"Returns",
"an",
"object",
"that",
"re",
"-",
"binds",
"the",
"underlying",
"method",
"to",
"the",
"specified",
"new",
"object",
"."
] |
def clone(self, new_object):
"""
Returns an object that re-binds the underlying "method" to
the specified new object.
"""
return self.__class__(new_object, self.method, self.name)
|
[
"def",
"clone",
"(",
"self",
",",
"new_object",
")",
":",
"return",
"self",
".",
"__class__",
"(",
"new_object",
",",
"self",
".",
"method",
",",
"self",
".",
"name",
")"
] |
https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Environment.py#L226-L231
|
|
borglab/gtsam
|
a5bee157efce6a0563704bce6a5d188c29817f39
|
gtsam/3rdparty/GeographicLib/python/geographiclib/geodesic.py
|
python
|
Geodesic.__init__
|
(self, a, f)
|
Construct a Geodesic object
:param a: the equatorial radius of the ellipsoid in meters
:param f: the flattening of the ellipsoid
An exception is thrown if *a* or the polar semi-axis *b* = *a* (1 -
*f*) is not a finite positive quantity.
|
Construct a Geodesic object
|
[
"Construct",
"a",
"Geodesic",
"object"
] |
def __init__(self, a, f):
"""Construct a Geodesic object
:param a: the equatorial radius of the ellipsoid in meters
:param f: the flattening of the ellipsoid
An exception is thrown if *a* or the polar semi-axis *b* = *a* (1 -
*f*) is not a finite positive quantity.
"""
self.a = float(a)
"""The equatorial radius in meters (readonly)"""
self.f = float(f)
"""The flattening (readonly)"""
self._f1 = 1 - self.f
self._e2 = self.f * (2 - self.f)
self._ep2 = self._e2 / Math.sq(self._f1) # e2 / (1 - e2)
self._n = self.f / ( 2 - self.f)
self._b = self.a * self._f1
# authalic radius squared
self._c2 = (Math.sq(self.a) + Math.sq(self._b) *
(1 if self._e2 == 0 else
(Math.atanh(math.sqrt(self._e2)) if self._e2 > 0 else
math.atan(math.sqrt(-self._e2))) /
math.sqrt(abs(self._e2))))/2
# The sig12 threshold for "really short". Using the auxiliary sphere
# solution with dnm computed at (bet1 + bet2) / 2, the relative error in
# the azimuth consistency check is sig12^2 * abs(f) * min(1, 1-f/2) / 2.
# (Error measured for 1/100 < b/a < 100 and abs(f) >= 1/1000. For a given
# f and sig12, the max error occurs for lines near the pole. If the old
# rule for computing dnm = (dn1 + dn2)/2 is used, then the error increases
# by a factor of 2.) Setting this equal to epsilon gives sig12 = etol2.
# Here 0.1 is a safety factor (error decreased by 100) and max(0.001,
# abs(f)) stops etol2 getting too large in the nearly spherical case.
self._etol2 = 0.1 * Geodesic.tol2_ / math.sqrt( max(0.001, abs(self.f)) *
min(1.0, 1-self.f/2) / 2 )
if not(Math.isfinite(self.a) and self.a > 0):
raise ValueError("Equatorial radius is not positive")
if not(Math.isfinite(self._b) and self._b > 0):
raise ValueError("Polar semi-axis is not positive")
self._A3x = list(range(Geodesic.nA3x_))
self._C3x = list(range(Geodesic.nC3x_))
self._C4x = list(range(Geodesic.nC4x_))
self._A3coeff()
self._C3coeff()
self._C4coeff()
|
[
"def",
"__init__",
"(",
"self",
",",
"a",
",",
"f",
")",
":",
"self",
".",
"a",
"=",
"float",
"(",
"a",
")",
"\"\"\"The equatorial radius in meters (readonly)\"\"\"",
"self",
".",
"f",
"=",
"float",
"(",
"f",
")",
"\"\"\"The flattening (readonly)\"\"\"",
"self",
".",
"_f1",
"=",
"1",
"-",
"self",
".",
"f",
"self",
".",
"_e2",
"=",
"self",
".",
"f",
"*",
"(",
"2",
"-",
"self",
".",
"f",
")",
"self",
".",
"_ep2",
"=",
"self",
".",
"_e2",
"/",
"Math",
".",
"sq",
"(",
"self",
".",
"_f1",
")",
"# e2 / (1 - e2)",
"self",
".",
"_n",
"=",
"self",
".",
"f",
"/",
"(",
"2",
"-",
"self",
".",
"f",
")",
"self",
".",
"_b",
"=",
"self",
".",
"a",
"*",
"self",
".",
"_f1",
"# authalic radius squared",
"self",
".",
"_c2",
"=",
"(",
"Math",
".",
"sq",
"(",
"self",
".",
"a",
")",
"+",
"Math",
".",
"sq",
"(",
"self",
".",
"_b",
")",
"*",
"(",
"1",
"if",
"self",
".",
"_e2",
"==",
"0",
"else",
"(",
"Math",
".",
"atanh",
"(",
"math",
".",
"sqrt",
"(",
"self",
".",
"_e2",
")",
")",
"if",
"self",
".",
"_e2",
">",
"0",
"else",
"math",
".",
"atan",
"(",
"math",
".",
"sqrt",
"(",
"-",
"self",
".",
"_e2",
")",
")",
")",
"/",
"math",
".",
"sqrt",
"(",
"abs",
"(",
"self",
".",
"_e2",
")",
")",
")",
")",
"/",
"2",
"# The sig12 threshold for \"really short\". Using the auxiliary sphere",
"# solution with dnm computed at (bet1 + bet2) / 2, the relative error in",
"# the azimuth consistency check is sig12^2 * abs(f) * min(1, 1-f/2) / 2.",
"# (Error measured for 1/100 < b/a < 100 and abs(f) >= 1/1000. For a given",
"# f and sig12, the max error occurs for lines near the pole. If the old",
"# rule for computing dnm = (dn1 + dn2)/2 is used, then the error increases",
"# by a factor of 2.) Setting this equal to epsilon gives sig12 = etol2.",
"# Here 0.1 is a safety factor (error decreased by 100) and max(0.001,",
"# abs(f)) stops etol2 getting too large in the nearly spherical case.",
"self",
".",
"_etol2",
"=",
"0.1",
"*",
"Geodesic",
".",
"tol2_",
"/",
"math",
".",
"sqrt",
"(",
"max",
"(",
"0.001",
",",
"abs",
"(",
"self",
".",
"f",
")",
")",
"*",
"min",
"(",
"1.0",
",",
"1",
"-",
"self",
".",
"f",
"/",
"2",
")",
"/",
"2",
")",
"if",
"not",
"(",
"Math",
".",
"isfinite",
"(",
"self",
".",
"a",
")",
"and",
"self",
".",
"a",
">",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"Equatorial radius is not positive\"",
")",
"if",
"not",
"(",
"Math",
".",
"isfinite",
"(",
"self",
".",
"_b",
")",
"and",
"self",
".",
"_b",
">",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"Polar semi-axis is not positive\"",
")",
"self",
".",
"_A3x",
"=",
"list",
"(",
"range",
"(",
"Geodesic",
".",
"nA3x_",
")",
")",
"self",
".",
"_C3x",
"=",
"list",
"(",
"range",
"(",
"Geodesic",
".",
"nC3x_",
")",
")",
"self",
".",
"_C4x",
"=",
"list",
"(",
"range",
"(",
"Geodesic",
".",
"nC4x_",
")",
")",
"self",
".",
"_A3coeff",
"(",
")",
"self",
".",
"_C3coeff",
"(",
")",
"self",
".",
"_C4coeff",
"(",
")"
] |
https://github.com/borglab/gtsam/blob/a5bee157efce6a0563704bce6a5d188c29817f39/gtsam/3rdparty/GeographicLib/python/geographiclib/geodesic.py#L274-L320
|
||
adobe/chromium
|
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
|
third_party/npapi/npspy/analyze_streams.py
|
python
|
ReadFile
|
(filename, flags='rb')
|
return result
|
Returns the contents of a file.
|
Returns the contents of a file.
|
[
"Returns",
"the",
"contents",
"of",
"a",
"file",
"."
] |
def ReadFile(filename, flags='rb'):
"""Returns the contents of a file."""
file = open(filename, flags)
result = file.read()
file.close()
return result
|
[
"def",
"ReadFile",
"(",
"filename",
",",
"flags",
"=",
"'rb'",
")",
":",
"file",
"=",
"open",
"(",
"filename",
",",
"flags",
")",
"result",
"=",
"file",
".",
"read",
"(",
")",
"file",
".",
"close",
"(",
")",
"return",
"result"
] |
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/npapi/npspy/analyze_streams.py#L6-L11
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/req/constructors.py
|
python
|
_looks_like_path
|
(name)
|
return False
|
Checks whether the string "looks like" a path on the filesystem.
This does not check whether the target actually exists, only judge from the
appearance.
Returns true if any of the following conditions is true:
* a path separator is found (either os.path.sep or os.path.altsep);
* a dot is found (which represents the current directory).
|
Checks whether the string "looks like" a path on the filesystem.
|
[
"Checks",
"whether",
"the",
"string",
"looks",
"like",
"a",
"path",
"on",
"the",
"filesystem",
"."
] |
def _looks_like_path(name):
# type: (str) -> bool
"""Checks whether the string "looks like" a path on the filesystem.
This does not check whether the target actually exists, only judge from the
appearance.
Returns true if any of the following conditions is true:
* a path separator is found (either os.path.sep or os.path.altsep);
* a dot is found (which represents the current directory).
"""
if os.path.sep in name:
return True
if os.path.altsep is not None and os.path.altsep in name:
return True
if name.startswith("."):
return True
return False
|
[
"def",
"_looks_like_path",
"(",
"name",
")",
":",
"# type: (str) -> bool",
"if",
"os",
".",
"path",
".",
"sep",
"in",
"name",
":",
"return",
"True",
"if",
"os",
".",
"path",
".",
"altsep",
"is",
"not",
"None",
"and",
"os",
".",
"path",
".",
"altsep",
"in",
"name",
":",
"return",
"True",
"if",
"name",
".",
"startswith",
"(",
"\".\"",
")",
":",
"return",
"True",
"return",
"False"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/req/constructors.py#L459-L493
|
|
miyosuda/TensorFlowAndroidDemo
|
35903e0221aa5f109ea2dbef27f20b52e317f42d
|
jni-build/jni/include/tensorflow/python/ops/io_ops.py
|
python
|
TFRecordReader.__init__
|
(self, name=None, options=None)
|
Create a TFRecordReader.
Args:
name: A name for the operation (optional).
options: A TFRecordOptions object (optional).
|
Create a TFRecordReader.
|
[
"Create",
"a",
"TFRecordReader",
"."
] |
def __init__(self, name=None, options=None):
"""Create a TFRecordReader.
Args:
name: A name for the operation (optional).
options: A TFRecordOptions object (optional).
"""
compression_type_string = ""
if (options and
options.compression_type == python_io.TFRecordCompressionType.ZLIB):
compression_type_string = "ZLIB"
rr = gen_io_ops._tf_record_reader(name=name,
compression_type=compression_type_string)
super(TFRecordReader, self).__init__(rr)
|
[
"def",
"__init__",
"(",
"self",
",",
"name",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"compression_type_string",
"=",
"\"\"",
"if",
"(",
"options",
"and",
"options",
".",
"compression_type",
"==",
"python_io",
".",
"TFRecordCompressionType",
".",
"ZLIB",
")",
":",
"compression_type_string",
"=",
"\"ZLIB\"",
"rr",
"=",
"gen_io_ops",
".",
"_tf_record_reader",
"(",
"name",
"=",
"name",
",",
"compression_type",
"=",
"compression_type_string",
")",
"super",
"(",
"TFRecordReader",
",",
"self",
")",
".",
"__init__",
"(",
"rr",
")"
] |
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/io_ops.py#L527-L541
|
||
FreeCAD/FreeCAD
|
ba42231b9c6889b89e064d6d563448ed81e376ec
|
src/Mod/Draft/drafttaskpanels/task_scale.py
|
python
|
ScaleTaskPanel.setClone
|
(self, state)
|
Set the clone and scale option.
|
Set the clone and scale option.
|
[
"Set",
"the",
"clone",
"and",
"scale",
"option",
"."
] |
def setClone(self, state):
"""Set the clone and scale option."""
App.ParamGet("User parameter:BaseApp/Preferences/Mod/Draft").SetBool("ScaleClone", state)
if state and self.isCopy.isChecked():
self.isCopy.setChecked(False)
if state and self.isSubelementMode.isChecked():
self.isSubelementMode.setChecked(False)
|
[
"def",
"setClone",
"(",
"self",
",",
"state",
")",
":",
"App",
".",
"ParamGet",
"(",
"\"User parameter:BaseApp/Preferences/Mod/Draft\"",
")",
".",
"SetBool",
"(",
"\"ScaleClone\"",
",",
"state",
")",
"if",
"state",
"and",
"self",
".",
"isCopy",
".",
"isChecked",
"(",
")",
":",
"self",
".",
"isCopy",
".",
"setChecked",
"(",
"False",
")",
"if",
"state",
"and",
"self",
".",
"isSubelementMode",
".",
"isChecked",
"(",
")",
":",
"self",
".",
"isSubelementMode",
".",
"setChecked",
"(",
"False",
")"
] |
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/drafttaskpanels/task_scale.py#L129-L135
|
||
microsoft/clang
|
86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5
|
tools/scan-build-py/libscanbuild/analyze.py
|
python
|
get_ctu_config_from_args
|
(args)
|
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
func_map_cmd=args.func_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', func_map_cmd=''))
|
CTU configuration is created from the chosen phases and dir.
|
CTU configuration is created from the chosen phases and dir.
|
[
"CTU",
"configuration",
"is",
"created",
"from",
"the",
"chosen",
"phases",
"and",
"dir",
"."
] |
def get_ctu_config_from_args(args):
""" CTU configuration is created from the chosen phases and dir. """
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
func_map_cmd=args.func_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', func_map_cmd=''))
|
[
"def",
"get_ctu_config_from_args",
"(",
"args",
")",
":",
"return",
"(",
"CtuConfig",
"(",
"collect",
"=",
"args",
".",
"ctu_phases",
".",
"collect",
",",
"analyze",
"=",
"args",
".",
"ctu_phases",
".",
"analyze",
",",
"dir",
"=",
"args",
".",
"ctu_dir",
",",
"func_map_cmd",
"=",
"args",
".",
"func_map_cmd",
")",
"if",
"hasattr",
"(",
"args",
",",
"'ctu_phases'",
")",
"and",
"hasattr",
"(",
"args",
".",
"ctu_phases",
",",
"'dir'",
")",
"else",
"CtuConfig",
"(",
"collect",
"=",
"False",
",",
"analyze",
"=",
"False",
",",
"dir",
"=",
"''",
",",
"func_map_cmd",
"=",
"''",
")",
")"
] |
https://github.com/microsoft/clang/blob/86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5/tools/scan-build-py/libscanbuild/analyze.py#L113-L122
|
|
miyosuda/TensorFlowAndroidMNIST
|
7b5a4603d2780a8a2834575706e9001977524007
|
jni-build/jni/include/tensorflow/python/ops/math_ops.py
|
python
|
reduce_all
|
(input_tensor, reduction_indices=None, keep_dims=False,
name=None)
|
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
|
Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
|
Computes the "logical and" of elements across dimensions of a tensor.
|
[
"Computes",
"the",
"logical",
"and",
"of",
"elements",
"across",
"dimensions",
"of",
"a",
"tensor",
"."
] |
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
|
[
"def",
"reduce_all",
"(",
"input_tensor",
",",
"reduction_indices",
"=",
"None",
",",
"keep_dims",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"return",
"gen_math_ops",
".",
"_all",
"(",
"input_tensor",
",",
"_ReductionDims",
"(",
"input_tensor",
",",
"reduction_indices",
")",
",",
"keep_dims",
",",
"name",
"=",
"name",
")"
] |
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/math_ops.py#L1179-L1213
|
|
pyne/pyne
|
0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3
|
pyne/alara.py
|
python
|
_get_subvoxel_array
|
(mesh, cell_mats)
|
return subvoxel_array
|
This function returns an array of subvoxels.
Parameters
----------
mesh : PyNE Mesh object
The Mesh object for which the geometry is discretized.
return : subvoxel_array: structured array
A sorted, one dimensional array, each entry containing the following
fields:
:svid: int
The index of non-void subvoxel id
:idx: int
The idx of the voxel
:scid: int
The cell index of the cell in that voxel
|
This function returns an array of subvoxels.
Parameters
----------
mesh : PyNE Mesh object
The Mesh object for which the geometry is discretized.
|
[
"This",
"function",
"returns",
"an",
"array",
"of",
"subvoxels",
".",
"Parameters",
"----------",
"mesh",
":",
"PyNE",
"Mesh",
"object",
"The",
"Mesh",
"object",
"for",
"which",
"the",
"geometry",
"is",
"discretized",
"."
] |
def _get_subvoxel_array(mesh, cell_mats):
"""
This function returns an array of subvoxels.
Parameters
----------
mesh : PyNE Mesh object
The Mesh object for which the geometry is discretized.
return : subvoxel_array: structured array
A sorted, one dimensional array, each entry containing the following
fields:
:svid: int
The index of non-void subvoxel id
:idx: int
The idx of the voxel
:scid: int
The cell index of the cell in that voxel
"""
cell_number_tag = mesh.cell_number
subvoxel_array = np.zeros(0, dtype=[('svid', np.int64),
('idx', np.int64),
('scid', np.int64)])
temp_subvoxel = np.zeros(1, dtype=[('svid', np.int64),
('idx', np.int64),
('scid', np.int64)])
# calculate the total number of non-void sub-voxel
non_void_sv_num = 0
for i, _, ve in mesh:
for c, cell in enumerate(np.atleast_1d(cell_number_tag[ve])):
if cell > 0 and len(cell_mats[cell].comp): # non-void cell
temp_subvoxel[0] = (non_void_sv_num, i, c)
subvoxel_array = np.append(subvoxel_array, temp_subvoxel)
non_void_sv_num += 1
return subvoxel_array
|
[
"def",
"_get_subvoxel_array",
"(",
"mesh",
",",
"cell_mats",
")",
":",
"cell_number_tag",
"=",
"mesh",
".",
"cell_number",
"subvoxel_array",
"=",
"np",
".",
"zeros",
"(",
"0",
",",
"dtype",
"=",
"[",
"(",
"'svid'",
",",
"np",
".",
"int64",
")",
",",
"(",
"'idx'",
",",
"np",
".",
"int64",
")",
",",
"(",
"'scid'",
",",
"np",
".",
"int64",
")",
"]",
")",
"temp_subvoxel",
"=",
"np",
".",
"zeros",
"(",
"1",
",",
"dtype",
"=",
"[",
"(",
"'svid'",
",",
"np",
".",
"int64",
")",
",",
"(",
"'idx'",
",",
"np",
".",
"int64",
")",
",",
"(",
"'scid'",
",",
"np",
".",
"int64",
")",
"]",
")",
"# calculate the total number of non-void sub-voxel",
"non_void_sv_num",
"=",
"0",
"for",
"i",
",",
"_",
",",
"ve",
"in",
"mesh",
":",
"for",
"c",
",",
"cell",
"in",
"enumerate",
"(",
"np",
".",
"atleast_1d",
"(",
"cell_number_tag",
"[",
"ve",
"]",
")",
")",
":",
"if",
"cell",
">",
"0",
"and",
"len",
"(",
"cell_mats",
"[",
"cell",
"]",
".",
"comp",
")",
":",
"# non-void cell",
"temp_subvoxel",
"[",
"0",
"]",
"=",
"(",
"non_void_sv_num",
",",
"i",
",",
"c",
")",
"subvoxel_array",
"=",
"np",
".",
"append",
"(",
"subvoxel_array",
",",
"temp_subvoxel",
")",
"non_void_sv_num",
"+=",
"1",
"return",
"subvoxel_array"
] |
https://github.com/pyne/pyne/blob/0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3/pyne/alara.py#L1003-L1039
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py
|
python
|
SourceLoader.get_code
|
(self, fullname)
|
return code_object
|
Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
|
Concrete implementation of InspectLoader.get_code.
|
[
"Concrete",
"implementation",
"of",
"InspectLoader",
".",
"get_code",
"."
] |
def get_code(self, fullname):
"""Concrete implementation of InspectLoader.get_code.
Reading of bytecode requires path_stats to be implemented. To write
bytecode, set_data must also be implemented.
"""
source_path = self.get_filename(fullname)
source_mtime = None
source_bytes = None
source_hash = None
hash_based = False
check_source = True
try:
bytecode_path = cache_from_source(source_path)
except NotImplementedError:
bytecode_path = None
else:
try:
st = self.path_stats(source_path)
except OSError:
pass
else:
source_mtime = int(st['mtime'])
try:
data = self.get_data(bytecode_path)
except OSError:
pass
else:
exc_details = {
'name': fullname,
'path': bytecode_path,
}
try:
flags = _classify_pyc(data, fullname, exc_details)
bytes_data = memoryview(data)[16:]
hash_based = flags & 0b1 != 0
if hash_based:
check_source = flags & 0b10 != 0
if (_imp.check_hash_based_pycs != 'never' and
(check_source or
_imp.check_hash_based_pycs == 'always')):
source_bytes = self.get_data(source_path)
source_hash = _imp.source_hash(
_RAW_MAGIC_NUMBER,
source_bytes,
)
_validate_hash_pyc(data, source_hash, fullname,
exc_details)
else:
_validate_timestamp_pyc(
data,
source_mtime,
st['size'],
fullname,
exc_details,
)
except (ImportError, EOFError):
pass
else:
_bootstrap._verbose_message('{} matches {}', bytecode_path,
source_path)
return _compile_bytecode(bytes_data, name=fullname,
bytecode_path=bytecode_path,
source_path=source_path)
if source_bytes is None:
source_bytes = self.get_data(source_path)
code_object = self.source_to_code(source_bytes, source_path)
_bootstrap._verbose_message('code object from {}', source_path)
if (not sys.dont_write_bytecode and bytecode_path is not None and
source_mtime is not None):
if hash_based:
if source_hash is None:
source_hash = _imp.source_hash(source_bytes)
data = _code_to_hash_pyc(code_object, source_hash, check_source)
else:
data = _code_to_timestamp_pyc(code_object, source_mtime,
len(source_bytes))
try:
self._cache_bytecode(source_path, bytecode_path, data)
except NotImplementedError:
pass
return code_object
|
[
"def",
"get_code",
"(",
"self",
",",
"fullname",
")",
":",
"source_path",
"=",
"self",
".",
"get_filename",
"(",
"fullname",
")",
"source_mtime",
"=",
"None",
"source_bytes",
"=",
"None",
"source_hash",
"=",
"None",
"hash_based",
"=",
"False",
"check_source",
"=",
"True",
"try",
":",
"bytecode_path",
"=",
"cache_from_source",
"(",
"source_path",
")",
"except",
"NotImplementedError",
":",
"bytecode_path",
"=",
"None",
"else",
":",
"try",
":",
"st",
"=",
"self",
".",
"path_stats",
"(",
"source_path",
")",
"except",
"OSError",
":",
"pass",
"else",
":",
"source_mtime",
"=",
"int",
"(",
"st",
"[",
"'mtime'",
"]",
")",
"try",
":",
"data",
"=",
"self",
".",
"get_data",
"(",
"bytecode_path",
")",
"except",
"OSError",
":",
"pass",
"else",
":",
"exc_details",
"=",
"{",
"'name'",
":",
"fullname",
",",
"'path'",
":",
"bytecode_path",
",",
"}",
"try",
":",
"flags",
"=",
"_classify_pyc",
"(",
"data",
",",
"fullname",
",",
"exc_details",
")",
"bytes_data",
"=",
"memoryview",
"(",
"data",
")",
"[",
"16",
":",
"]",
"hash_based",
"=",
"flags",
"&",
"0b1",
"!=",
"0",
"if",
"hash_based",
":",
"check_source",
"=",
"flags",
"&",
"0b10",
"!=",
"0",
"if",
"(",
"_imp",
".",
"check_hash_based_pycs",
"!=",
"'never'",
"and",
"(",
"check_source",
"or",
"_imp",
".",
"check_hash_based_pycs",
"==",
"'always'",
")",
")",
":",
"source_bytes",
"=",
"self",
".",
"get_data",
"(",
"source_path",
")",
"source_hash",
"=",
"_imp",
".",
"source_hash",
"(",
"_RAW_MAGIC_NUMBER",
",",
"source_bytes",
",",
")",
"_validate_hash_pyc",
"(",
"data",
",",
"source_hash",
",",
"fullname",
",",
"exc_details",
")",
"else",
":",
"_validate_timestamp_pyc",
"(",
"data",
",",
"source_mtime",
",",
"st",
"[",
"'size'",
"]",
",",
"fullname",
",",
"exc_details",
",",
")",
"except",
"(",
"ImportError",
",",
"EOFError",
")",
":",
"pass",
"else",
":",
"_bootstrap",
".",
"_verbose_message",
"(",
"'{} matches {}'",
",",
"bytecode_path",
",",
"source_path",
")",
"return",
"_compile_bytecode",
"(",
"bytes_data",
",",
"name",
"=",
"fullname",
",",
"bytecode_path",
"=",
"bytecode_path",
",",
"source_path",
"=",
"source_path",
")",
"if",
"source_bytes",
"is",
"None",
":",
"source_bytes",
"=",
"self",
".",
"get_data",
"(",
"source_path",
")",
"code_object",
"=",
"self",
".",
"source_to_code",
"(",
"source_bytes",
",",
"source_path",
")",
"_bootstrap",
".",
"_verbose_message",
"(",
"'code object from {}'",
",",
"source_path",
")",
"if",
"(",
"not",
"sys",
".",
"dont_write_bytecode",
"and",
"bytecode_path",
"is",
"not",
"None",
"and",
"source_mtime",
"is",
"not",
"None",
")",
":",
"if",
"hash_based",
":",
"if",
"source_hash",
"is",
"None",
":",
"source_hash",
"=",
"_imp",
".",
"source_hash",
"(",
"source_bytes",
")",
"data",
"=",
"_code_to_hash_pyc",
"(",
"code_object",
",",
"source_hash",
",",
"check_source",
")",
"else",
":",
"data",
"=",
"_code_to_timestamp_pyc",
"(",
"code_object",
",",
"source_mtime",
",",
"len",
"(",
"source_bytes",
")",
")",
"try",
":",
"self",
".",
"_cache_bytecode",
"(",
"source_path",
",",
"bytecode_path",
",",
"data",
")",
"except",
"NotImplementedError",
":",
"pass",
"return",
"code_object"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/importlib/_bootstrap_external.py#L916-L998
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.