nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
list | function
stringlengths 34
151k
| function_tokens
list | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
facebookincubator/BOLT
|
88c70afe9d388ad430cc150cc158641701397f70
|
mlir/python/mlir/dialects/_ods_common.py
|
python
|
segmented_accessor
|
(elements, raw_segments, idx)
|
return elements[start:end]
|
Returns a slice of elements corresponding to the idx-th segment.
elements: a sliceable container (operands or results).
raw_segments: an mlir.ir.Attribute, of DenseIntElements subclass containing
sizes of the segments.
idx: index of the segment.
|
Returns a slice of elements corresponding to the idx-th segment.
|
[
"Returns",
"a",
"slice",
"of",
"elements",
"corresponding",
"to",
"the",
"idx",
"-",
"th",
"segment",
"."
] |
def segmented_accessor(elements, raw_segments, idx):
"""
Returns a slice of elements corresponding to the idx-th segment.
elements: a sliceable container (operands or results).
raw_segments: an mlir.ir.Attribute, of DenseIntElements subclass containing
sizes of the segments.
idx: index of the segment.
"""
segments = _cext.ir.DenseIntElementsAttr(raw_segments)
start = sum(segments[i] for i in range(idx))
end = start + segments[idx]
return elements[start:end]
|
[
"def",
"segmented_accessor",
"(",
"elements",
",",
"raw_segments",
",",
"idx",
")",
":",
"segments",
"=",
"_cext",
".",
"ir",
".",
"DenseIntElementsAttr",
"(",
"raw_segments",
")",
"start",
"=",
"sum",
"(",
"segments",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"idx",
")",
")",
"end",
"=",
"start",
"+",
"segments",
"[",
"idx",
"]",
"return",
"elements",
"[",
"start",
":",
"end",
"]"
] |
https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/mlir/python/mlir/dialects/_ods_common.py#L76-L88
|
|
mindspore-ai/mindspore
|
fb8fd3338605bb34fa5cea054e535a8b1d753fab
|
mindspore/python/mindspore/nn/optim/thor.py
|
python
|
ThorAscend._get_second_gradients_one
|
(self, params_len, gradients, new_grads)
|
return new_grads
|
get second gradients one
|
get second gradients one
|
[
"get",
"second",
"gradients",
"one"
] |
def _get_second_gradients_one(self, params_len, gradients, new_grads):
"""get second gradients one"""
for i in range(params_len):
g = gradients[i]
thor_layer_count = self.weight_fim_idx_map[i]
conv_layer_count = self.weight_conv_idx_map[i]
layer_type = self.weight_layertype_idx_map[i]
matrix_a = self.matrix_a[thor_layer_count]
matrix_g = self.matrix_g[thor_layer_count]
matrix_max = self.matrix_max_inv[thor_layer_count]
grad_shape = self.shape(g)
if layer_type == FC:
if grad_shape[0] == 1001:
g = self.cube_matmul_left_fc(matrix_g, g)
g = self.cube_matmul_right_fc(g, matrix_a, matrix_max)
else:
temp_a = self.cast(matrix_a, mstype.float16)
temp_g = self.cast(matrix_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(temp_g, g)
g = self.matmul(g, temp_a)
g = self.cast(g, mstype.float32)
g = self.mul(g, matrix_max)
elif layer_type == Conv:
matmul_support_flag = self.conv_matmul_support_map[conv_layer_count]
if matmul_support_flag == 1:
g = self.cube_matmul_left(matrix_g, g)
g = self.cube_matmul_right_mul(g, matrix_a, matrix_max)
else:
g = self.reshape(g, (grad_shape[0], grad_shape[1] * grad_shape[2] * grad_shape[3]))
temp_a = self.cast(matrix_a, mstype.float16)
temp_g = self.cast(matrix_g, mstype.float16)
g = self.cast(g, mstype.float16)
g = self.matmul(temp_g, g)
g = self.matmul(g, temp_a)
g = self.cast(g, mstype.float32)
g = self.mul(g, matrix_max)
g = self.reshape(g, grad_shape)
new_grads = new_grads + (g,)
return new_grads
|
[
"def",
"_get_second_gradients_one",
"(",
"self",
",",
"params_len",
",",
"gradients",
",",
"new_grads",
")",
":",
"for",
"i",
"in",
"range",
"(",
"params_len",
")",
":",
"g",
"=",
"gradients",
"[",
"i",
"]",
"thor_layer_count",
"=",
"self",
".",
"weight_fim_idx_map",
"[",
"i",
"]",
"conv_layer_count",
"=",
"self",
".",
"weight_conv_idx_map",
"[",
"i",
"]",
"layer_type",
"=",
"self",
".",
"weight_layertype_idx_map",
"[",
"i",
"]",
"matrix_a",
"=",
"self",
".",
"matrix_a",
"[",
"thor_layer_count",
"]",
"matrix_g",
"=",
"self",
".",
"matrix_g",
"[",
"thor_layer_count",
"]",
"matrix_max",
"=",
"self",
".",
"matrix_max_inv",
"[",
"thor_layer_count",
"]",
"grad_shape",
"=",
"self",
".",
"shape",
"(",
"g",
")",
"if",
"layer_type",
"==",
"FC",
":",
"if",
"grad_shape",
"[",
"0",
"]",
"==",
"1001",
":",
"g",
"=",
"self",
".",
"cube_matmul_left_fc",
"(",
"matrix_g",
",",
"g",
")",
"g",
"=",
"self",
".",
"cube_matmul_right_fc",
"(",
"g",
",",
"matrix_a",
",",
"matrix_max",
")",
"else",
":",
"temp_a",
"=",
"self",
".",
"cast",
"(",
"matrix_a",
",",
"mstype",
".",
"float16",
")",
"temp_g",
"=",
"self",
".",
"cast",
"(",
"matrix_g",
",",
"mstype",
".",
"float16",
")",
"g",
"=",
"self",
".",
"cast",
"(",
"g",
",",
"mstype",
".",
"float16",
")",
"g",
"=",
"self",
".",
"matmul",
"(",
"temp_g",
",",
"g",
")",
"g",
"=",
"self",
".",
"matmul",
"(",
"g",
",",
"temp_a",
")",
"g",
"=",
"self",
".",
"cast",
"(",
"g",
",",
"mstype",
".",
"float32",
")",
"g",
"=",
"self",
".",
"mul",
"(",
"g",
",",
"matrix_max",
")",
"elif",
"layer_type",
"==",
"Conv",
":",
"matmul_support_flag",
"=",
"self",
".",
"conv_matmul_support_map",
"[",
"conv_layer_count",
"]",
"if",
"matmul_support_flag",
"==",
"1",
":",
"g",
"=",
"self",
".",
"cube_matmul_left",
"(",
"matrix_g",
",",
"g",
")",
"g",
"=",
"self",
".",
"cube_matmul_right_mul",
"(",
"g",
",",
"matrix_a",
",",
"matrix_max",
")",
"else",
":",
"g",
"=",
"self",
".",
"reshape",
"(",
"g",
",",
"(",
"grad_shape",
"[",
"0",
"]",
",",
"grad_shape",
"[",
"1",
"]",
"*",
"grad_shape",
"[",
"2",
"]",
"*",
"grad_shape",
"[",
"3",
"]",
")",
")",
"temp_a",
"=",
"self",
".",
"cast",
"(",
"matrix_a",
",",
"mstype",
".",
"float16",
")",
"temp_g",
"=",
"self",
".",
"cast",
"(",
"matrix_g",
",",
"mstype",
".",
"float16",
")",
"g",
"=",
"self",
".",
"cast",
"(",
"g",
",",
"mstype",
".",
"float16",
")",
"g",
"=",
"self",
".",
"matmul",
"(",
"temp_g",
",",
"g",
")",
"g",
"=",
"self",
".",
"matmul",
"(",
"g",
",",
"temp_a",
")",
"g",
"=",
"self",
".",
"cast",
"(",
"g",
",",
"mstype",
".",
"float32",
")",
"g",
"=",
"self",
".",
"mul",
"(",
"g",
",",
"matrix_max",
")",
"g",
"=",
"self",
".",
"reshape",
"(",
"g",
",",
"grad_shape",
")",
"new_grads",
"=",
"new_grads",
"+",
"(",
"g",
",",
")",
"return",
"new_grads"
] |
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/optim/thor.py#L1107-L1146
|
|
microsoft/ivy
|
9f3c7ecc0b2383129fdd0953e10890d98d09a82d
|
ivy/ivy_parser.py
|
python
|
p_sequence_lcb_rcb
|
(p)
|
sequence : LCB RCB
|
sequence : LCB RCB
|
[
"sequence",
":",
"LCB",
"RCB"
] |
def p_sequence_lcb_rcb(p):
'sequence : LCB RCB'
p[0] = Sequence()
p[0].lineno = get_lineno(p,1)
|
[
"def",
"p_sequence_lcb_rcb",
"(",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"Sequence",
"(",
")",
"p",
"[",
"0",
"]",
".",
"lineno",
"=",
"get_lineno",
"(",
"p",
",",
"1",
")"
] |
https://github.com/microsoft/ivy/blob/9f3c7ecc0b2383129fdd0953e10890d98d09a82d/ivy/ivy_parser.py#L2050-L2053
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py
|
python
|
Text.window_configure
|
(self, index, cnf=None, **kw)
|
return self._configure(('window', 'configure', index), cnf, kw)
|
Configure an embedded window at INDEX.
|
Configure an embedded window at INDEX.
|
[
"Configure",
"an",
"embedded",
"window",
"at",
"INDEX",
"."
] |
def window_configure(self, index, cnf=None, **kw):
"""Configure an embedded window at INDEX."""
return self._configure(('window', 'configure', index), cnf, kw)
|
[
"def",
"window_configure",
"(",
"self",
",",
"index",
",",
"cnf",
"=",
"None",
",",
"*",
"*",
"kw",
")",
":",
"return",
"self",
".",
"_configure",
"(",
"(",
"'window'",
",",
"'configure'",
",",
"index",
")",
",",
"cnf",
",",
"kw",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L3415-L3417
|
|
idaholab/moose
|
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
|
python/peacock/Input/BlockInfo.py
|
python
|
BlockInfo._orderedNames
|
(self, first, complete)
|
return l
|
Add in elements from the list "complete" to the end
of the "first" if they are not already in "first"
Input:
first[list]: These elements will be first in the returned list
complete[list]: These elements will come after first
Return:
list: The elements in "complete" with elements in "first" first.
|
Add in elements from the list "complete" to the end
of the "first" if they are not already in "first"
Input:
first[list]: These elements will be first in the returned list
complete[list]: These elements will come after first
Return:
list: The elements in "complete" with elements in "first" first.
|
[
"Add",
"in",
"elements",
"from",
"the",
"list",
"complete",
"to",
"the",
"end",
"of",
"the",
"first",
"if",
"they",
"are",
"not",
"already",
"in",
"first",
"Input",
":",
"first",
"[",
"list",
"]",
":",
"These",
"elements",
"will",
"be",
"first",
"in",
"the",
"returned",
"list",
"complete",
"[",
"list",
"]",
":",
"These",
"elements",
"will",
"come",
"after",
"first",
"Return",
":",
"list",
":",
"The",
"elements",
"in",
"complete",
"with",
"elements",
"in",
"first",
"first",
"."
] |
def _orderedNames(self, first, complete):
"""
Add in elements from the list "complete" to the end
of the "first" if they are not already in "first"
Input:
first[list]: These elements will be first in the returned list
complete[list]: These elements will come after first
Return:
list: The elements in "complete" with elements in "first" first.
"""
l = first[:]
for x in complete:
if x not in l:
l.append(x)
return l
|
[
"def",
"_orderedNames",
"(",
"self",
",",
"first",
",",
"complete",
")",
":",
"l",
"=",
"first",
"[",
":",
"]",
"for",
"x",
"in",
"complete",
":",
"if",
"x",
"not",
"in",
"l",
":",
"l",
".",
"append",
"(",
"x",
")",
"return",
"l"
] |
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/peacock/Input/BlockInfo.py#L420-L434
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/pandas/py2/pandas/core/indexes/multi.py
|
python
|
MultiIndex.reorder_levels
|
(self, order)
|
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
|
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
|
Rearrange levels using input order. May not drop or duplicate levels
|
[
"Rearrange",
"levels",
"using",
"input",
"order",
".",
"May",
"not",
"drop",
"or",
"duplicate",
"levels"
] |
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
|
[
"def",
"reorder_levels",
"(",
"self",
",",
"order",
")",
":",
"order",
"=",
"[",
"self",
".",
"_get_level_number",
"(",
"i",
")",
"for",
"i",
"in",
"order",
"]",
"if",
"len",
"(",
"order",
")",
"!=",
"self",
".",
"nlevels",
":",
"raise",
"AssertionError",
"(",
"'Length of order must be same as '",
"'number of levels (%d), got %d'",
"%",
"(",
"self",
".",
"nlevels",
",",
"len",
"(",
"order",
")",
")",
")",
"new_levels",
"=",
"[",
"self",
".",
"levels",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"new_codes",
"=",
"[",
"self",
".",
"codes",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"new_names",
"=",
"[",
"self",
".",
"names",
"[",
"i",
"]",
"for",
"i",
"in",
"order",
"]",
"return",
"MultiIndex",
"(",
"levels",
"=",
"new_levels",
",",
"codes",
"=",
"new_codes",
",",
"names",
"=",
"new_names",
",",
"verify_integrity",
"=",
"False",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/indexes/multi.py#L1997-L2014
|
|
ppwwyyxx/speaker-recognition
|
15d7bf32ad4ba2f1543e1287b03f3f2e6791d4dd
|
src/feature/MFCC.py
|
python
|
extract
|
(fs, signal=None, diff=False, **kwargs)
|
return ret
|
accept two argument, or one as a tuple
|
accept two argument, or one as a tuple
|
[
"accept",
"two",
"argument",
"or",
"one",
"as",
"a",
"tuple"
] |
def extract(fs, signal=None, diff=False, **kwargs):
"""accept two argument, or one as a tuple"""
if signal is None:
assert type(fs) == tuple
fs, signal = fs[0], fs[1]
signal = cast['float'](signal)
ret = get_mfcc_extractor(fs, **kwargs).extract(signal)
if diff:
return diff_feature(ret)
return ret
|
[
"def",
"extract",
"(",
"fs",
",",
"signal",
"=",
"None",
",",
"diff",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"signal",
"is",
"None",
":",
"assert",
"type",
"(",
"fs",
")",
"==",
"tuple",
"fs",
",",
"signal",
"=",
"fs",
"[",
"0",
"]",
",",
"fs",
"[",
"1",
"]",
"signal",
"=",
"cast",
"[",
"'float'",
"]",
"(",
"signal",
")",
"ret",
"=",
"get_mfcc_extractor",
"(",
"fs",
",",
"*",
"*",
"kwargs",
")",
".",
"extract",
"(",
"signal",
")",
"if",
"diff",
":",
"return",
"diff_feature",
"(",
"ret",
")",
"return",
"ret"
] |
https://github.com/ppwwyyxx/speaker-recognition/blob/15d7bf32ad4ba2f1543e1287b03f3f2e6791d4dd/src/feature/MFCC.py#L123-L132
|
|
kamyu104/LeetCode-Solutions
|
77605708a927ea3b85aee5a479db733938c7c211
|
Python/number-of-good-pairs.py
|
python
|
Solution.numIdenticalPairs
|
(self, nums)
|
return sum(c*(c-1)//2 for c in collections.Counter(nums).itervalues())
|
:type nums: List[int]
:rtype: int
|
:type nums: List[int]
:rtype: int
|
[
":",
"type",
"nums",
":",
"List",
"[",
"int",
"]",
":",
"rtype",
":",
"int"
] |
def numIdenticalPairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(c*(c-1)//2 for c in collections.Counter(nums).itervalues())
|
[
"def",
"numIdenticalPairs",
"(",
"self",
",",
"nums",
")",
":",
"return",
"sum",
"(",
"c",
"*",
"(",
"c",
"-",
"1",
")",
"//",
"2",
"for",
"c",
"in",
"collections",
".",
"Counter",
"(",
"nums",
")",
".",
"itervalues",
"(",
")",
")"
] |
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/number-of-good-pairs.py#L8-L13
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/dataview.py
|
python
|
DataViewListCtrl.SetTextValue
|
(*args, **kwargs)
|
return _dataview.DataViewListCtrl_SetTextValue(*args, **kwargs)
|
SetTextValue(self, String value, unsigned int row, unsigned int col)
|
SetTextValue(self, String value, unsigned int row, unsigned int col)
|
[
"SetTextValue",
"(",
"self",
"String",
"value",
"unsigned",
"int",
"row",
"unsigned",
"int",
"col",
")"
] |
def SetTextValue(*args, **kwargs):
"""SetTextValue(self, String value, unsigned int row, unsigned int col)"""
return _dataview.DataViewListCtrl_SetTextValue(*args, **kwargs)
|
[
"def",
"SetTextValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_dataview",
".",
"DataViewListCtrl_SetTextValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/dataview.py#L2185-L2187
|
|
hfinkel/llvm-project-cxxjit
|
91084ef018240bbb8e24235ff5cd8c355a9c1a1e
|
clang/bindings/python/clang/cindex.py
|
python
|
Cursor.get_children
|
(self)
|
return iter(children)
|
Return an iterator for accessing the children of this cursor.
|
Return an iterator for accessing the children of this cursor.
|
[
"Return",
"an",
"iterator",
"for",
"accessing",
"the",
"children",
"of",
"this",
"cursor",
"."
] |
def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return iter(children)
|
[
"def",
"get_children",
"(",
"self",
")",
":",
"# FIXME: Expose iteration from CIndex, PR6125.",
"def",
"visitor",
"(",
"child",
",",
"parent",
",",
"children",
")",
":",
"# FIXME: Document this assertion in API.",
"# FIXME: There should just be an isNull method.",
"assert",
"child",
"!=",
"conf",
".",
"lib",
".",
"clang_getNullCursor",
"(",
")",
"# Create reference to TU so it isn't GC'd before Cursor.",
"child",
".",
"_tu",
"=",
"self",
".",
"_tu",
"children",
".",
"append",
"(",
"child",
")",
"return",
"1",
"# continue",
"children",
"=",
"[",
"]",
"conf",
".",
"lib",
".",
"clang_visitChildren",
"(",
"self",
",",
"callbacks",
"[",
"'cursor_visit'",
"]",
"(",
"visitor",
")",
",",
"children",
")",
"return",
"iter",
"(",
"children",
")"
] |
https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/clang/bindings/python/clang/cindex.py#L1824-L1840
|
|
rapidsai/cudf
|
d5b2448fc69f17509304d594f029d0df56984962
|
python/cudf/cudf/core/column_accessor.py
|
python
|
ColumnAccessor.select_by_index
|
(self, index: Any)
|
return self.__class__(
data, multiindex=self.multiindex, level_names=self.level_names,
)
|
Return a ColumnAccessor composed of the columns
specified by index.
Parameters
----------
key : integer, integer slice, or list-like of integers
Returns
-------
ColumnAccessor
|
Return a ColumnAccessor composed of the columns
specified by index.
|
[
"Return",
"a",
"ColumnAccessor",
"composed",
"of",
"the",
"columns",
"specified",
"by",
"index",
"."
] |
def select_by_index(self, index: Any) -> ColumnAccessor:
"""
Return a ColumnAccessor composed of the columns
specified by index.
Parameters
----------
key : integer, integer slice, or list-like of integers
Returns
-------
ColumnAccessor
"""
if isinstance(index, slice):
start, stop, step = index.indices(len(self._data))
keys = self.names[start:stop:step]
elif pd.api.types.is_integer(index):
keys = [self.names[index]]
else:
keys = (self.names[i] for i in index)
data = {k: self._data[k] for k in keys}
return self.__class__(
data, multiindex=self.multiindex, level_names=self.level_names,
)
|
[
"def",
"select_by_index",
"(",
"self",
",",
"index",
":",
"Any",
")",
"->",
"ColumnAccessor",
":",
"if",
"isinstance",
"(",
"index",
",",
"slice",
")",
":",
"start",
",",
"stop",
",",
"step",
"=",
"index",
".",
"indices",
"(",
"len",
"(",
"self",
".",
"_data",
")",
")",
"keys",
"=",
"self",
".",
"names",
"[",
"start",
":",
"stop",
":",
"step",
"]",
"elif",
"pd",
".",
"api",
".",
"types",
".",
"is_integer",
"(",
"index",
")",
":",
"keys",
"=",
"[",
"self",
".",
"names",
"[",
"index",
"]",
"]",
"else",
":",
"keys",
"=",
"(",
"self",
".",
"names",
"[",
"i",
"]",
"for",
"i",
"in",
"index",
")",
"data",
"=",
"{",
"k",
":",
"self",
".",
"_data",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"}",
"return",
"self",
".",
"__class__",
"(",
"data",
",",
"multiindex",
"=",
"self",
".",
"multiindex",
",",
"level_names",
"=",
"self",
".",
"level_names",
",",
")"
] |
https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/column_accessor.py#L346-L369
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/dis.py
|
python
|
Bytecode.from_traceback
|
(cls, tb)
|
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
|
Construct a Bytecode from the given traceback
|
Construct a Bytecode from the given traceback
|
[
"Construct",
"a",
"Bytecode",
"from",
"the",
"given",
"traceback"
] |
def from_traceback(cls, tb):
""" Construct a Bytecode from the given traceback """
while tb.tb_next:
tb = tb.tb_next
return cls(tb.tb_frame.f_code, current_offset=tb.tb_lasti)
|
[
"def",
"from_traceback",
"(",
"cls",
",",
"tb",
")",
":",
"while",
"tb",
".",
"tb_next",
":",
"tb",
"=",
"tb",
".",
"tb_next",
"return",
"cls",
"(",
"tb",
".",
"tb_frame",
".",
"f_code",
",",
"current_offset",
"=",
"tb",
".",
"tb_lasti",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/dis.py#L494-L498
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/optimizer_v2/optimizer_v2.py
|
python
|
_OptimizerV2State.create_slot
|
(self, var, val, slot_name, optional_op_name=None)
|
return named_slots[var_key]
|
Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that needs to be
created for the slot.
Returns:
A `Variable` object.
|
Find or create a slot for a variable.
|
[
"Find",
"or",
"create",
"a",
"slot",
"for",
"a",
"variable",
"."
] |
def create_slot(self, var, val, slot_name, optional_op_name=None):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
optional_op_name: Name to use when scoping the Variable that needs to be
created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
var_key = _var_key_v2(var)
if var_key not in named_slots:
new_slot_variable = slot_creator.create_slot(
var, val, optional_op_name or self._op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var, slot_variable=new_slot_variable)
named_slots[var_key] = new_slot_variable
return named_slots[var_key]
|
[
"def",
"create_slot",
"(",
"self",
",",
"var",
",",
"val",
",",
"slot_name",
",",
"optional_op_name",
"=",
"None",
")",
":",
"named_slots",
"=",
"self",
".",
"_slot_dict",
"(",
"slot_name",
")",
"var_key",
"=",
"_var_key_v2",
"(",
"var",
")",
"if",
"var_key",
"not",
"in",
"named_slots",
":",
"new_slot_variable",
"=",
"slot_creator",
".",
"create_slot",
"(",
"var",
",",
"val",
",",
"optional_op_name",
"or",
"self",
".",
"_op_name",
")",
"self",
".",
"_restore_slot_variable",
"(",
"slot_name",
"=",
"slot_name",
",",
"variable",
"=",
"var",
",",
"slot_variable",
"=",
"new_slot_variable",
")",
"named_slots",
"[",
"var_key",
"]",
"=",
"new_slot_variable",
"return",
"named_slots",
"[",
"var_key",
"]"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/optimizer_v2/optimizer_v2.py#L274-L295
|
|
apache/incubator-mxnet
|
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
|
benchmark/opperf/nd_operations/array_manipulation_operators.py
|
python
|
run_join_split_operators_benchmarks
|
(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100)
|
return mx_join_split_op_results
|
Runs benchmarks with the given context and precision (dtype) for all the
join & split operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
|
Runs benchmarks with the given context and precision (dtype) for all the
join & split operators in MXNet.
|
[
"Runs",
"benchmarks",
"with",
"the",
"given",
"context",
"and",
"precision",
"(",
"dtype",
")",
"for",
"all",
"the",
"join",
"&",
"split",
"operators",
"in",
"MXNet",
"."
] |
def run_join_split_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the
join & split operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# backward not supported for all 3 ops - concat, stack, split
# concat
concat_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "concat")],
run_backward=False,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"args0":nd.random_normal(shape=(100,100)),
"args1":nd.random_normal(shape=(100,100)),
"args2":nd.random_normal(shape=(100,100))}
],
warmup=warmup,
runs=runs)
# split
split_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "split")],
run_backward=False,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (1024, 1024), "num_outputs": 2},
{"data": (10000, 1), "num_outputs": 1},
{"data": (10000, 100), "num_outputs": 10}
],
warmup=warmup,
runs=runs)
# stack
stack_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "stack")],
run_backward=False,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"args0":nd.random_normal(shape=(100,100)),
"args1":nd.random_normal(shape=(100,100)),
"args2":nd.random_normal(shape=(100,100))}
],
warmup=warmup,
runs=runs)
mx_join_split_op_results = merge_map_list(concat_benchmark_res + split_benchmark_res + stack_benchmark_res)
return mx_join_split_op_results
|
[
"def",
"run_join_split_operators_benchmarks",
"(",
"ctx",
"=",
"mx",
".",
"cpu",
"(",
")",
",",
"dtype",
"=",
"'float32'",
",",
"profiler",
"=",
"'native'",
",",
"int64_tensor",
"=",
"'off'",
",",
"warmup",
"=",
"25",
",",
"runs",
"=",
"100",
")",
":",
"# backward not supported for all 3 ops - concat, stack, split",
"# concat",
"concat_benchmark_res",
"=",
"run_performance_test",
"(",
"[",
"getattr",
"(",
"MX_OP_MODULE",
",",
"\"concat\"",
")",
"]",
",",
"run_backward",
"=",
"False",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
",",
"profiler",
"=",
"profiler",
",",
"inputs",
"=",
"[",
"{",
"\"args0\"",
":",
"nd",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"100",
",",
"100",
")",
")",
",",
"\"args1\"",
":",
"nd",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"100",
",",
"100",
")",
")",
",",
"\"args2\"",
":",
"nd",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"100",
",",
"100",
")",
")",
"}",
"]",
",",
"warmup",
"=",
"warmup",
",",
"runs",
"=",
"runs",
")",
"# split",
"split_benchmark_res",
"=",
"run_performance_test",
"(",
"[",
"getattr",
"(",
"MX_OP_MODULE",
",",
"\"split\"",
")",
"]",
",",
"run_backward",
"=",
"False",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
",",
"profiler",
"=",
"profiler",
",",
"inputs",
"=",
"[",
"{",
"\"data\"",
":",
"(",
"1024",
",",
"1024",
")",
",",
"\"num_outputs\"",
":",
"2",
"}",
",",
"{",
"\"data\"",
":",
"(",
"10000",
",",
"1",
")",
",",
"\"num_outputs\"",
":",
"1",
"}",
",",
"{",
"\"data\"",
":",
"(",
"10000",
",",
"100",
")",
",",
"\"num_outputs\"",
":",
"10",
"}",
"]",
",",
"warmup",
"=",
"warmup",
",",
"runs",
"=",
"runs",
")",
"# stack",
"stack_benchmark_res",
"=",
"run_performance_test",
"(",
"[",
"getattr",
"(",
"MX_OP_MODULE",
",",
"\"stack\"",
")",
"]",
",",
"run_backward",
"=",
"False",
",",
"dtype",
"=",
"dtype",
",",
"ctx",
"=",
"ctx",
",",
"profiler",
"=",
"profiler",
",",
"inputs",
"=",
"[",
"{",
"\"args0\"",
":",
"nd",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"100",
",",
"100",
")",
")",
",",
"\"args1\"",
":",
"nd",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"100",
",",
"100",
")",
")",
",",
"\"args2\"",
":",
"nd",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"100",
",",
"100",
")",
")",
"}",
"]",
",",
"warmup",
"=",
"warmup",
",",
"runs",
"=",
"runs",
")",
"mx_join_split_op_results",
"=",
"merge_map_list",
"(",
"concat_benchmark_res",
"+",
"split_benchmark_res",
"+",
"stack_benchmark_res",
")",
"return",
"mx_join_split_op_results"
] |
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/benchmark/opperf/nd_operations/array_manipulation_operators.py#L200-L264
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/xrc.py
|
python
|
XmlNode.GetAttributes
|
(*args, **kwargs)
|
return _xrc.XmlNode_GetAttributes(*args, **kwargs)
|
GetAttributes(self) -> XmlProperty
|
GetAttributes(self) -> XmlProperty
|
[
"GetAttributes",
"(",
"self",
")",
"-",
">",
"XmlProperty"
] |
def GetAttributes(*args, **kwargs):
"""GetAttributes(self) -> XmlProperty"""
return _xrc.XmlNode_GetAttributes(*args, **kwargs)
|
[
"def",
"GetAttributes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_xrc",
".",
"XmlNode_GetAttributes",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/xrc.py#L485-L487
|
|
google/syzygy
|
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
|
third_party/numpy/files/numpy/oldnumeric/ma.py
|
python
|
_MaskedPrintOption.set_display
|
(self, s)
|
set_display(s) sets what prints for masked values.
|
set_display(s) sets what prints for masked values.
|
[
"set_display",
"(",
"s",
")",
"sets",
"what",
"prints",
"for",
"masked",
"values",
"."
] |
def set_display (self, s):
"set_display(s) sets what prints for masked values."
self._display = s
|
[
"def",
"set_display",
"(",
"self",
",",
"s",
")",
":",
"self",
".",
"_display",
"=",
"s"
] |
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/oldnumeric/ma.py#L57-L59
|
||
swift/swift
|
12d031cf8177fdec0137f9aa7e2912fa23c4416b
|
3rdParty/SCons/scons-3.0.1/engine/SCons/Environment.py
|
python
|
OverrideEnvironment._update
|
(self, dict)
|
Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
|
Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
|
[
"Update",
"an",
"environment",
"s",
"values",
"directly",
"bypassing",
"the",
"normal",
"checks",
"that",
"occur",
"when",
"users",
"try",
"to",
"set",
"items",
"."
] |
def _update(self, dict):
"""Update an environment's values directly, bypassing the normal
checks that occur when users try to set items.
"""
self.__dict__['overrides'].update(dict)
|
[
"def",
"_update",
"(",
"self",
",",
"dict",
")",
":",
"self",
".",
"__dict__",
"[",
"'overrides'",
"]",
".",
"update",
"(",
"dict",
")"
] |
https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Environment.py#L2356-L2360
|
||
Ewenwan/MVision
|
97b394dfa48cb21c82cd003b1a952745e413a17f
|
darknect/tensorflow/yolo_v2/decode_output.py
|
python
|
decode
|
(detection_feat, feat_sizes=(13, 13), num_classes=80,
anchors=None)
|
return bboxes, obj_probs, class_probs
|
decode from the detection feature
|
decode from the detection feature
|
[
"decode",
"from",
"the",
"detection",
"feature"
] |
def decode(detection_feat, feat_sizes=(13, 13), num_classes=80,
anchors=None):
"""decode from the detection feature"""
H, W = feat_sizes# 最后 特征图的 尺寸 13*13格子数量
num_anchors = len(anchors)# 每个格子预测的 边框数量
detetion_results = tf.reshape(detection_feat, [-1, H * W, num_anchors,
num_classes + 5])
bbox_xy = tf.nn.sigmoid(detetion_results[:, :, :, 0:2])# 边框中心点 相对于所在格子 左上点 的偏移的比例
bbox_wh = tf.exp(detetion_results[:, :, :, 2:4])#
obj_probs = tf.nn.sigmoid(detetion_results[:, :, :, 4])# 物体
class_probs = tf.nn.softmax(detetion_results[:, :, :, 5:])
anchors = tf.constant(anchors, dtype=tf.float32)
height_ind = tf.range(H, dtype=tf.float32)
width_ind = tf.range(W, dtype=tf.float32)
x_offset, y_offset = tf.meshgrid(height_ind, width_ind)
x_offset = tf.reshape(x_offset, [1, -1, 1])
y_offset = tf.reshape(y_offset, [1, -1, 1])
# decode
bbox_x = (bbox_xy[:, :, :, 0] + x_offset) / W
bbox_y = (bbox_xy[:, :, :, 1] + y_offset) / H
bbox_w = bbox_wh[:, :, :, 0] * anchors[:, 0] / W * 0.5
bbox_h = bbox_wh[:, :, :, 1] * anchors[:, 1] / H * 0.5
bboxes = tf.stack([bbox_x - bbox_w, bbox_y - bbox_h,
bbox_x + bbox_w, bbox_y + bbox_h], axis=3)
return bboxes, obj_probs, class_probs
|
[
"def",
"decode",
"(",
"detection_feat",
",",
"feat_sizes",
"=",
"(",
"13",
",",
"13",
")",
",",
"num_classes",
"=",
"80",
",",
"anchors",
"=",
"None",
")",
":",
"H",
",",
"W",
"=",
"feat_sizes",
"# 最后 特征图的 尺寸 13*13格子数量",
"num_anchors",
"=",
"len",
"(",
"anchors",
")",
"# 每个格子预测的 边框数量 ",
"detetion_results",
"=",
"tf",
".",
"reshape",
"(",
"detection_feat",
",",
"[",
"-",
"1",
",",
"H",
"*",
"W",
",",
"num_anchors",
",",
"num_classes",
"+",
"5",
"]",
")",
"bbox_xy",
"=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"detetion_results",
"[",
":",
",",
":",
",",
":",
",",
"0",
":",
"2",
"]",
")",
"# 边框中心点 相对于所在格子 左上点 的偏移的比例",
"bbox_wh",
"=",
"tf",
".",
"exp",
"(",
"detetion_results",
"[",
":",
",",
":",
",",
":",
",",
"2",
":",
"4",
"]",
")",
"# ",
"obj_probs",
"=",
"tf",
".",
"nn",
".",
"sigmoid",
"(",
"detetion_results",
"[",
":",
",",
":",
",",
":",
",",
"4",
"]",
")",
"# 物体 ",
"class_probs",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"detetion_results",
"[",
":",
",",
":",
",",
":",
",",
"5",
":",
"]",
")",
"anchors",
"=",
"tf",
".",
"constant",
"(",
"anchors",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"height_ind",
"=",
"tf",
".",
"range",
"(",
"H",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"width_ind",
"=",
"tf",
".",
"range",
"(",
"W",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"x_offset",
",",
"y_offset",
"=",
"tf",
".",
"meshgrid",
"(",
"height_ind",
",",
"width_ind",
")",
"x_offset",
"=",
"tf",
".",
"reshape",
"(",
"x_offset",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
"]",
")",
"y_offset",
"=",
"tf",
".",
"reshape",
"(",
"y_offset",
",",
"[",
"1",
",",
"-",
"1",
",",
"1",
"]",
")",
"# decode",
"bbox_x",
"=",
"(",
"bbox_xy",
"[",
":",
",",
":",
",",
":",
",",
"0",
"]",
"+",
"x_offset",
")",
"/",
"W",
"bbox_y",
"=",
"(",
"bbox_xy",
"[",
":",
",",
":",
",",
":",
",",
"1",
"]",
"+",
"y_offset",
")",
"/",
"H",
"bbox_w",
"=",
"bbox_wh",
"[",
":",
",",
":",
",",
":",
",",
"0",
"]",
"*",
"anchors",
"[",
":",
",",
"0",
"]",
"/",
"W",
"*",
"0.5",
"bbox_h",
"=",
"bbox_wh",
"[",
":",
",",
":",
",",
":",
",",
"1",
"]",
"*",
"anchors",
"[",
":",
",",
"1",
"]",
"/",
"H",
"*",
"0.5",
"bboxes",
"=",
"tf",
".",
"stack",
"(",
"[",
"bbox_x",
"-",
"bbox_w",
",",
"bbox_y",
"-",
"bbox_h",
",",
"bbox_x",
"+",
"bbox_w",
",",
"bbox_y",
"+",
"bbox_h",
"]",
",",
"axis",
"=",
"3",
")",
"return",
"bboxes",
",",
"obj_probs",
",",
"class_probs"
] |
https://github.com/Ewenwan/MVision/blob/97b394dfa48cb21c82cd003b1a952745e413a17f/darknect/tensorflow/yolo_v2/decode_output.py#L11-L41
|
|
yuxng/DA-RNN
|
77fbb50b4272514588a10a9f90b7d5f8d46974fb
|
lib/datasets/factory.py
|
python
|
get_imdb
|
(name)
|
return __sets[name]()
|
Get an imdb (image database) by name.
|
Get an imdb (image database) by name.
|
[
"Get",
"an",
"imdb",
"(",
"image",
"database",
")",
"by",
"name",
"."
] |
def get_imdb(name):
"""Get an imdb (image database) by name."""
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
|
[
"def",
"get_imdb",
"(",
"name",
")",
":",
"if",
"not",
"__sets",
".",
"has_key",
"(",
"name",
")",
":",
"raise",
"KeyError",
"(",
"'Unknown dataset: {}'",
".",
"format",
"(",
"name",
")",
")",
"return",
"__sets",
"[",
"name",
"]",
"(",
")"
] |
https://github.com/yuxng/DA-RNN/blob/77fbb50b4272514588a10a9f90b7d5f8d46974fb/lib/datasets/factory.py#L53-L57
|
|
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
scripts/SANS/sans/algorithm_detail/convert_to_q.py
|
python
|
_run_q_2d
|
(workspace, output_summed_parts, state_convert_to_q,
wavelength_adj_ws, pixel_adj_ws)
|
This method performs a 2D data reduction on our workspace.
Note that it does not perform any q resolution calculation, nor any wavelength-and-pixel adjustment. The
output workspace contains two numerical axes.
|
This method performs a 2D data reduction on our workspace.
|
[
"This",
"method",
"performs",
"a",
"2D",
"data",
"reduction",
"on",
"our",
"workspace",
"."
] |
def _run_q_2d(workspace, output_summed_parts, state_convert_to_q,
wavelength_adj_ws, pixel_adj_ws):
"""
This method performs a 2D data reduction on our workspace.
Note that it does not perform any q resolution calculation, nor any wavelength-and-pixel adjustment. The
output workspace contains two numerical axes.
"""
# Extract relevant settings
max_q_xy = state_convert_to_q.q_xy_max
log_binning = True if state_convert_to_q.q_xy_step_type is RangeStepType.LOG else False
delta_q = state_convert_to_q.q_xy_step
radius_cutoff = state_convert_to_q.radius_cutoff / 1000. # Qxy expects the radius cutoff to be in mm
wavelength_cutoff = state_convert_to_q.wavelength_cutoff
use_gravity = state_convert_to_q.use_gravity
gravity_extra_length = state_convert_to_q.gravity_extra_length
qxy_name = "Qxy"
qxy_options = {"InputWorkspace": workspace,
"OutputWorkspace": EMPTY_NAME,
"MaxQxy": max_q_xy,
"DeltaQ": delta_q,
"IQxQyLogBinning": log_binning,
"AccountForGravity": use_gravity,
"RadiusCut": radius_cutoff,
"WaveCut": wavelength_cutoff,
"OutputParts": output_summed_parts,
"ExtraLength": gravity_extra_length}
if wavelength_adj_ws:
qxy_options.update({"WavelengthAdj": wavelength_adj_ws})
if pixel_adj_ws:
qxy_options.update({"PixelAdj": pixel_adj_ws})
qxy_alg = create_unmanaged_algorithm(qxy_name, **qxy_options)
qxy_alg.execute()
reduced_workspace = qxy_alg.getProperty("OutputWorkspace").value
reduced_workspace = _replace_special_values(reduced_workspace)
# Get the partial workspaces
if output_summed_parts:
sum_of_counts_workspace, sum_of_norms_workspace = _get_partial_output(qxy_alg, do_clean=True)
return reduced_workspace, sum_of_counts_workspace, sum_of_norms_workspace
else:
return reduced_workspace, None, None
|
[
"def",
"_run_q_2d",
"(",
"workspace",
",",
"output_summed_parts",
",",
"state_convert_to_q",
",",
"wavelength_adj_ws",
",",
"pixel_adj_ws",
")",
":",
"# Extract relevant settings",
"max_q_xy",
"=",
"state_convert_to_q",
".",
"q_xy_max",
"log_binning",
"=",
"True",
"if",
"state_convert_to_q",
".",
"q_xy_step_type",
"is",
"RangeStepType",
".",
"LOG",
"else",
"False",
"delta_q",
"=",
"state_convert_to_q",
".",
"q_xy_step",
"radius_cutoff",
"=",
"state_convert_to_q",
".",
"radius_cutoff",
"/",
"1000.",
"# Qxy expects the radius cutoff to be in mm",
"wavelength_cutoff",
"=",
"state_convert_to_q",
".",
"wavelength_cutoff",
"use_gravity",
"=",
"state_convert_to_q",
".",
"use_gravity",
"gravity_extra_length",
"=",
"state_convert_to_q",
".",
"gravity_extra_length",
"qxy_name",
"=",
"\"Qxy\"",
"qxy_options",
"=",
"{",
"\"InputWorkspace\"",
":",
"workspace",
",",
"\"OutputWorkspace\"",
":",
"EMPTY_NAME",
",",
"\"MaxQxy\"",
":",
"max_q_xy",
",",
"\"DeltaQ\"",
":",
"delta_q",
",",
"\"IQxQyLogBinning\"",
":",
"log_binning",
",",
"\"AccountForGravity\"",
":",
"use_gravity",
",",
"\"RadiusCut\"",
":",
"radius_cutoff",
",",
"\"WaveCut\"",
":",
"wavelength_cutoff",
",",
"\"OutputParts\"",
":",
"output_summed_parts",
",",
"\"ExtraLength\"",
":",
"gravity_extra_length",
"}",
"if",
"wavelength_adj_ws",
":",
"qxy_options",
".",
"update",
"(",
"{",
"\"WavelengthAdj\"",
":",
"wavelength_adj_ws",
"}",
")",
"if",
"pixel_adj_ws",
":",
"qxy_options",
".",
"update",
"(",
"{",
"\"PixelAdj\"",
":",
"pixel_adj_ws",
"}",
")",
"qxy_alg",
"=",
"create_unmanaged_algorithm",
"(",
"qxy_name",
",",
"*",
"*",
"qxy_options",
")",
"qxy_alg",
".",
"execute",
"(",
")",
"reduced_workspace",
"=",
"qxy_alg",
".",
"getProperty",
"(",
"\"OutputWorkspace\"",
")",
".",
"value",
"reduced_workspace",
"=",
"_replace_special_values",
"(",
"reduced_workspace",
")",
"# Get the partial workspaces",
"if",
"output_summed_parts",
":",
"sum_of_counts_workspace",
",",
"sum_of_norms_workspace",
"=",
"_get_partial_output",
"(",
"qxy_alg",
",",
"do_clean",
"=",
"True",
")",
"return",
"reduced_workspace",
",",
"sum_of_counts_workspace",
",",
"sum_of_norms_workspace",
"else",
":",
"return",
"reduced_workspace",
",",
"None",
",",
"None"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/sans/algorithm_detail/convert_to_q.py#L101-L147
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/stc.py
|
python
|
StyledTextCtrl.MarkerDefine
|
(*args, **kwargs)
|
return _stc.StyledTextCtrl_MarkerDefine(*args, **kwargs)
|
MarkerDefine(self, int markerNumber, int markerSymbol, Colour foreground=wxNullColour,
Colour background=wxNullColour)
Set the symbol used for a particular marker number,
and optionally the fore and background colours.
|
MarkerDefine(self, int markerNumber, int markerSymbol, Colour foreground=wxNullColour,
Colour background=wxNullColour)
|
[
"MarkerDefine",
"(",
"self",
"int",
"markerNumber",
"int",
"markerSymbol",
"Colour",
"foreground",
"=",
"wxNullColour",
"Colour",
"background",
"=",
"wxNullColour",
")"
] |
def MarkerDefine(*args, **kwargs):
"""
MarkerDefine(self, int markerNumber, int markerSymbol, Colour foreground=wxNullColour,
Colour background=wxNullColour)
Set the symbol used for a particular marker number,
and optionally the fore and background colours.
"""
return _stc.StyledTextCtrl_MarkerDefine(*args, **kwargs)
|
[
"def",
"MarkerDefine",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_MarkerDefine",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L2328-L2336
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py
|
python
|
Grid.grid_remove
|
(self)
|
Unmap this widget but remember the grid options.
|
Unmap this widget but remember the grid options.
|
[
"Unmap",
"this",
"widget",
"but",
"remember",
"the",
"grid",
"options",
"."
] |
def grid_remove(self):
"""Unmap this widget but remember the grid options."""
self.tk.call('grid', 'remove', self._w)
|
[
"def",
"grid_remove",
"(",
"self",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"'grid'",
",",
"'remove'",
",",
"self",
".",
"_w",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py#L2234-L2236
|
||
microsoft/CNTK
|
e9396480025b9ca457d26b6f33dd07c474c6aa04
|
bindings/python/cntk/train/distributed.py
|
python
|
WorkerDescriptor.global_rank
|
(self)
|
return super(WorkerDescriptor, self).m_global_rank
|
The global rank of the worker.
|
The global rank of the worker.
|
[
"The",
"global",
"rank",
"of",
"the",
"worker",
"."
] |
def global_rank(self):
'''
The global rank of the worker.
'''
return super(WorkerDescriptor, self).m_global_rank
|
[
"def",
"global_rank",
"(",
"self",
")",
":",
"return",
"super",
"(",
"WorkerDescriptor",
",",
"self",
")",
".",
"m_global_rank"
] |
https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/train/distributed.py#L28-L32
|
|
mongodb/mongo
|
d8ff665343ad29cf286ee2cf4a1960d29371937b
|
buildscripts/resmokelib/run/__init__.py
|
python
|
TestRunnerEvg._make_tag_combinations
|
(cls)
|
return combinations
|
Return a list of (tag, enabled) pairs.
These pairs represent all possible combinations of all possible pairings
of whether the tags are enabled or disabled together.
|
Return a list of (tag, enabled) pairs.
|
[
"Return",
"a",
"list",
"of",
"(",
"tag",
"enabled",
")",
"pairs",
"."
] |
def _make_tag_combinations(cls):
"""Return a list of (tag, enabled) pairs.
These pairs represent all possible combinations of all possible pairings
of whether the tags are enabled or disabled together.
"""
combinations = []
if config.EVERGREEN_PATCH_BUILD:
combinations.append(("unreliable and resource intensive",
((cls.UNRELIABLE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, True))))
combinations.append(("unreliable and not resource intensive",
((cls.UNRELIABLE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, False))))
combinations.append(("reliable and resource intensive",
((cls.UNRELIABLE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG, True))))
combinations.append(("reliable and not resource intensive",
((cls.UNRELIABLE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG,
False))))
else:
combinations.append(("retry on failure and resource intensive",
((cls.RETRY_ON_FAILURE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG,
True))))
combinations.append(("retry on failure and not resource intensive",
((cls.RETRY_ON_FAILURE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG,
False))))
combinations.append(("run once and resource intensive",
((cls.RETRY_ON_FAILURE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG,
True))))
combinations.append(("run once and not resource intensive",
((cls.RETRY_ON_FAILURE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG,
False))))
return combinations
|
[
"def",
"_make_tag_combinations",
"(",
"cls",
")",
":",
"combinations",
"=",
"[",
"]",
"if",
"config",
".",
"EVERGREEN_PATCH_BUILD",
":",
"combinations",
".",
"append",
"(",
"(",
"\"unreliable and resource intensive\"",
",",
"(",
"(",
"cls",
".",
"UNRELIABLE_TAG",
",",
"True",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"True",
")",
")",
")",
")",
"combinations",
".",
"append",
"(",
"(",
"\"unreliable and not resource intensive\"",
",",
"(",
"(",
"cls",
".",
"UNRELIABLE_TAG",
",",
"True",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"False",
")",
")",
")",
")",
"combinations",
".",
"append",
"(",
"(",
"\"reliable and resource intensive\"",
",",
"(",
"(",
"cls",
".",
"UNRELIABLE_TAG",
",",
"False",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"True",
")",
")",
")",
")",
"combinations",
".",
"append",
"(",
"(",
"\"reliable and not resource intensive\"",
",",
"(",
"(",
"cls",
".",
"UNRELIABLE_TAG",
",",
"False",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"False",
")",
")",
")",
")",
"else",
":",
"combinations",
".",
"append",
"(",
"(",
"\"retry on failure and resource intensive\"",
",",
"(",
"(",
"cls",
".",
"RETRY_ON_FAILURE_TAG",
",",
"True",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"True",
")",
")",
")",
")",
"combinations",
".",
"append",
"(",
"(",
"\"retry on failure and not resource intensive\"",
",",
"(",
"(",
"cls",
".",
"RETRY_ON_FAILURE_TAG",
",",
"True",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"False",
")",
")",
")",
")",
"combinations",
".",
"append",
"(",
"(",
"\"run once and resource intensive\"",
",",
"(",
"(",
"cls",
".",
"RETRY_ON_FAILURE_TAG",
",",
"False",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"True",
")",
")",
")",
")",
"combinations",
".",
"append",
"(",
"(",
"\"run once and not resource intensive\"",
",",
"(",
"(",
"cls",
".",
"RETRY_ON_FAILURE_TAG",
",",
"False",
")",
",",
"(",
"cls",
".",
"RESOURCE_INTENSIVE_TAG",
",",
"False",
")",
")",
")",
")",
"return",
"combinations"
] |
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/run/__init__.py#L527-L560
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py
|
python
|
Subversion.is_commit_id_equal
|
(cls, dest, name)
|
return False
|
Always assume the versions don't match
|
Always assume the versions don't match
|
[
"Always",
"assume",
"the",
"versions",
"don",
"t",
"match"
] |
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
|
[
"def",
"is_commit_id_equal",
"(",
"cls",
",",
"dest",
",",
"name",
")",
":",
"return",
"False"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py#L385-L389
|
|
freeorion/freeorion
|
c266a40eccd3a99a17de8fe57c36ef6ba3771665
|
default/python/AI/character/character_module.py
|
python
|
Trait.may_dither_focus_to_gain_research
|
(self)
|
return True
|
Return True if permitted to trade production at a loss for research
|
Return True if permitted to trade production at a loss for research
|
[
"Return",
"True",
"if",
"permitted",
"to",
"trade",
"production",
"at",
"a",
"loss",
"for",
"research"
] |
def may_dither_focus_to_gain_research(self): # pylint: disable=no-self-use,unused-argument
"""Return True if permitted to trade production at a loss for research"""
return True
|
[
"def",
"may_dither_focus_to_gain_research",
"(",
"self",
")",
":",
"# pylint: disable=no-self-use,unused-argument",
"return",
"True"
] |
https://github.com/freeorion/freeorion/blob/c266a40eccd3a99a17de8fe57c36ef6ba3771665/default/python/AI/character/character_module.py#L228-L230
|
|
Polidea/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
SymbolExtractorAndRenamer/lldb/utils/vim-lldb/python-vim-lldb/vim_panes.py
|
python
|
StoppedCommandPane.get_content
|
(self, target, controller)
|
return output
|
Returns the output of a command that relies on the process being stopped.
If the process is not in 'stopped' state, the process status is returned.
|
Returns the output of a command that relies on the process being stopped.
If the process is not in 'stopped' state, the process status is returned.
|
[
"Returns",
"the",
"output",
"of",
"a",
"command",
"that",
"relies",
"on",
"the",
"process",
"being",
"stopped",
".",
"If",
"the",
"process",
"is",
"not",
"in",
"stopped",
"state",
"the",
"process",
"status",
"is",
"returned",
"."
] |
def get_content(self, target, controller):
""" Returns the output of a command that relies on the process being stopped.
If the process is not in 'stopped' state, the process status is returned.
"""
output = ""
if not target or not target.IsValid():
output = VimPane.MSG_NO_TARGET
elif not target.GetProcess() or not target.GetProcess().IsValid():
output = VimPane.MSG_NO_PROCESS
elif target.GetProcess().GetState() == lldb.eStateStopped:
(success, output) = controller.getCommandOutput(
self.command, self.args)
else:
(success, output) = controller.getCommandOutput("process", "status")
return output
|
[
"def",
"get_content",
"(",
"self",
",",
"target",
",",
"controller",
")",
":",
"output",
"=",
"\"\"",
"if",
"not",
"target",
"or",
"not",
"target",
".",
"IsValid",
"(",
")",
":",
"output",
"=",
"VimPane",
".",
"MSG_NO_TARGET",
"elif",
"not",
"target",
".",
"GetProcess",
"(",
")",
"or",
"not",
"target",
".",
"GetProcess",
"(",
")",
".",
"IsValid",
"(",
")",
":",
"output",
"=",
"VimPane",
".",
"MSG_NO_PROCESS",
"elif",
"target",
".",
"GetProcess",
"(",
")",
".",
"GetState",
"(",
")",
"==",
"lldb",
".",
"eStateStopped",
":",
"(",
"success",
",",
"output",
")",
"=",
"controller",
".",
"getCommandOutput",
"(",
"self",
".",
"command",
",",
"self",
".",
"args",
")",
"else",
":",
"(",
"success",
",",
"output",
")",
"=",
"controller",
".",
"getCommandOutput",
"(",
"\"process\"",
",",
"\"status\"",
")",
"return",
"output"
] |
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/utils/vim-lldb/python-vim-lldb/vim_panes.py#L571-L585
|
|
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/klampt/model/workspace.py
|
python
|
compute_field_grid
|
(points : Sequence[Vector3],
values : Sequence[float],
resolution=0.05,
dimensions=None,
bounds=None,
aggregator='max',
initial_value='auto')
|
return vg
|
Helper to compute a gridded value field over a set of scattered points.
Args:
points (list of Vector3 or nx3 array): the points
values (list of float): the values at each point
resolution (float, 3-vector, or None): the resolution of the resulting
grid.
dimensions (int or 3-vector optional): if resolution=None and dimensions
is given, the number of dimensions. If a single int, the cell size
is determined by dividing the longest side by dimensions.
bounds (pair of 3-vectors, optional): specifies the minimum and maximum
range of the grid. If not given, calculated by the
aggregator (str or pair): either 'max', 'min', 'sum', 'average', or a
pair of functions (f,g) giving an arbitrary aggregator
f(x,value) -> x', g(x) -> float. x is an arbitrary object, which
for each cell is initialized to the value specified by
initial_value (default None).
initial_value (float or 'auto'): the initial value of the cell before
aggregation. If aggregator is a pair of functions, 'auto' sets x
to None by default.
|
Helper to compute a gridded value field over a set of scattered points.
|
[
"Helper",
"to",
"compute",
"a",
"gridded",
"value",
"field",
"over",
"a",
"set",
"of",
"scattered",
"points",
"."
] |
def compute_field_grid(points : Sequence[Vector3],
values : Sequence[float],
resolution=0.05,
dimensions=None,
bounds=None,
aggregator='max',
initial_value='auto') -> VolumeGrid:
"""
Helper to compute a gridded value field over a set of scattered points.
Args:
points (list of Vector3 or nx3 array): the points
values (list of float): the values at each point
resolution (float, 3-vector, or None): the resolution of the resulting
grid.
dimensions (int or 3-vector optional): if resolution=None and dimensions
is given, the number of dimensions. If a single int, the cell size
is determined by dividing the longest side by dimensions.
bounds (pair of 3-vectors, optional): specifies the minimum and maximum
range of the grid. If not given, calculated by the
aggregator (str or pair): either 'max', 'min', 'sum', 'average', or a
pair of functions (f,g) giving an arbitrary aggregator
f(x,value) -> x', g(x) -> float. x is an arbitrary object, which
for each cell is initialized to the value specified by
initial_value (default None).
initial_value (float or 'auto'): the initial value of the cell before
aggregation. If aggregator is a pair of functions, 'auto' sets x
to None by default.
"""
points = np.asarray(points)
auto_bounds = False
expand = False
if bounds is not None:
lower_corner,upper_corner = bounds
if len(lower_corner) != 3 or len(upper_corner) != 3:
raise ValueError("Invalid bounds")
else:
if len(points) == 0:
raise ValueError("Cannot compute occupancy grid of empty set of points")
lower_corner,upper_corner = np.min(points,axis=0),np.max(points,axis=0)
assert len(lower_corner) == 3
assert len(upper_corner) == 3
auto_bounds = True
expand = True
if dimensions is not None:
if hasattr(dimensions,'__iter__'):
cellsize = vectorops.div(vectorops.sub(upper_corner,lower_corner),dimensions)
invcellsize = vectorops.div(dimensions,vectorops.sub(upper_corner,lower_corner))
auto_bounds = False
else:
w = max(*vectorops.sub(upper_corner,lower_corner))
cellsize = [w / dimensions]*3
invcellsize = [dimensions/w]*3
dimensions = [int(math.floor(d/c)+1) for d,c in zip(vectorops.sub(upper_corner,lower_corner),cellsize)]
else:
if resolution is None:
raise ValueError("One of resolution or dimensions must be given")
cellsize = resolution
if not hasattr(resolution,'__iter__'):
cellsize = [resolution]*3
invcellsize = [1.0/c for c in cellsize]
dimensions = [int(math.floor(d/c)+1) for d,c in zip(vectorops.sub(upper_corner,lower_corner),cellsize)]
if auto_bounds:
#adjust limits to reduce artifacts
bmax = vectorops.add(lower_corner,[d*c for (d,c) in zip(dimensions,cellsize)])
shift = vectorops.mul(vectorops.sub(upper_corner,bmax),0.5)
lower_corner = vectorops.add(lower_corner,shift)
upper_corner = vectorops.add(upper_corner,shift)
if expand:
lower_corner = vectorops.sub(lower_corner,cellsize)
upper_corner = vectorops.add(upper_corner,cellsize)
dimensions = [d+2 for d in dimensions]
#compact way to compute all valid indices of points
if len(points)==0:
valid_indices = []
else:
indices = np.multiply(points - np.asarray(lower_corner),np.asarray(invcellsize))
indices = np.floor(indices).astype(int)
valid_points = np.all((0 <= indices) & (indices < np.array(dimensions)),axis=1)
valid_indices = indices[valid_points,:]
result = None
if aggregator == 'max':
if initial_value == 'auto':
initial_value = -float('inf')
value_grid = np.full(dimensions,fill_value=initial_value)
f=max
g=None
elif aggregator == 'min':
if initial_value == 'auto':
initial_value = float('inf')
value_grid = np.full(dimensions,fill_value=float('inf'))
f=min
g=None
elif aggregator == 'sum':
if initial_value == 'auto':
initial_value = 0.0
value_grid = np.full(dimensions,fill_value=initial_value)
elif aggregator == 'average':
if initial_value == 'auto':
prior,strength = 0.0,0
else:
if not isinstance(initial_value,(tuple,list)) or len(initial_value) != 2:
raise ValueError("Initial value for average must be of the form (prior,strength)")
prior,strength = initial_value
vsum = np.full(dimensions,fill_value=prior*strength)
count = np.full(dimensions,fill_value=strength)
for ind,v in zip(valid_indices,values):
vsum[ind] += v
count[ind] += 1
if strength > 0:
result = np.divide(vsum,count)
else:
result = np.zeros(dimensions)
nz = (count > 0)
result[nz] = np.divide(vsum[nz],count[nz])
elif isinstance(aggregator,tuple):
f,g = aggregator
value_grid = np.full(dimensions,fill_value=initial_value)
else:
raise ValueError("Invalid value for aggregator, must be min, max, sum, average, or a pair of callables")
if result is None:
for ind,v in zip(valid_indices,values):
ind=tuple(ind)
value_grid[ind] = f(value_grid[ind],v)
if g is not None:
result = np.empty(dimensions)
for i in range(dimensions[0]):
for j in range(dimensions[1]):
for k in range(dimensions[2]):
result[i,j,k] = g(value_grid[i,j,k])
else:
result = value_grid
vg = VolumeGrid()
vg.setBounds(lower_corner,upper_corner)
vg.setValues(result)
return vg
|
[
"def",
"compute_field_grid",
"(",
"points",
":",
"Sequence",
"[",
"Vector3",
"]",
",",
"values",
":",
"Sequence",
"[",
"float",
"]",
",",
"resolution",
"=",
"0.05",
",",
"dimensions",
"=",
"None",
",",
"bounds",
"=",
"None",
",",
"aggregator",
"=",
"'max'",
",",
"initial_value",
"=",
"'auto'",
")",
"->",
"VolumeGrid",
":",
"points",
"=",
"np",
".",
"asarray",
"(",
"points",
")",
"auto_bounds",
"=",
"False",
"expand",
"=",
"False",
"if",
"bounds",
"is",
"not",
"None",
":",
"lower_corner",
",",
"upper_corner",
"=",
"bounds",
"if",
"len",
"(",
"lower_corner",
")",
"!=",
"3",
"or",
"len",
"(",
"upper_corner",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"\"Invalid bounds\"",
")",
"else",
":",
"if",
"len",
"(",
"points",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Cannot compute occupancy grid of empty set of points\"",
")",
"lower_corner",
",",
"upper_corner",
"=",
"np",
".",
"min",
"(",
"points",
",",
"axis",
"=",
"0",
")",
",",
"np",
".",
"max",
"(",
"points",
",",
"axis",
"=",
"0",
")",
"assert",
"len",
"(",
"lower_corner",
")",
"==",
"3",
"assert",
"len",
"(",
"upper_corner",
")",
"==",
"3",
"auto_bounds",
"=",
"True",
"expand",
"=",
"True",
"if",
"dimensions",
"is",
"not",
"None",
":",
"if",
"hasattr",
"(",
"dimensions",
",",
"'__iter__'",
")",
":",
"cellsize",
"=",
"vectorops",
".",
"div",
"(",
"vectorops",
".",
"sub",
"(",
"upper_corner",
",",
"lower_corner",
")",
",",
"dimensions",
")",
"invcellsize",
"=",
"vectorops",
".",
"div",
"(",
"dimensions",
",",
"vectorops",
".",
"sub",
"(",
"upper_corner",
",",
"lower_corner",
")",
")",
"auto_bounds",
"=",
"False",
"else",
":",
"w",
"=",
"max",
"(",
"*",
"vectorops",
".",
"sub",
"(",
"upper_corner",
",",
"lower_corner",
")",
")",
"cellsize",
"=",
"[",
"w",
"/",
"dimensions",
"]",
"*",
"3",
"invcellsize",
"=",
"[",
"dimensions",
"/",
"w",
"]",
"*",
"3",
"dimensions",
"=",
"[",
"int",
"(",
"math",
".",
"floor",
"(",
"d",
"/",
"c",
")",
"+",
"1",
")",
"for",
"d",
",",
"c",
"in",
"zip",
"(",
"vectorops",
".",
"sub",
"(",
"upper_corner",
",",
"lower_corner",
")",
",",
"cellsize",
")",
"]",
"else",
":",
"if",
"resolution",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"One of resolution or dimensions must be given\"",
")",
"cellsize",
"=",
"resolution",
"if",
"not",
"hasattr",
"(",
"resolution",
",",
"'__iter__'",
")",
":",
"cellsize",
"=",
"[",
"resolution",
"]",
"*",
"3",
"invcellsize",
"=",
"[",
"1.0",
"/",
"c",
"for",
"c",
"in",
"cellsize",
"]",
"dimensions",
"=",
"[",
"int",
"(",
"math",
".",
"floor",
"(",
"d",
"/",
"c",
")",
"+",
"1",
")",
"for",
"d",
",",
"c",
"in",
"zip",
"(",
"vectorops",
".",
"sub",
"(",
"upper_corner",
",",
"lower_corner",
")",
",",
"cellsize",
")",
"]",
"if",
"auto_bounds",
":",
"#adjust limits to reduce artifacts",
"bmax",
"=",
"vectorops",
".",
"add",
"(",
"lower_corner",
",",
"[",
"d",
"*",
"c",
"for",
"(",
"d",
",",
"c",
")",
"in",
"zip",
"(",
"dimensions",
",",
"cellsize",
")",
"]",
")",
"shift",
"=",
"vectorops",
".",
"mul",
"(",
"vectorops",
".",
"sub",
"(",
"upper_corner",
",",
"bmax",
")",
",",
"0.5",
")",
"lower_corner",
"=",
"vectorops",
".",
"add",
"(",
"lower_corner",
",",
"shift",
")",
"upper_corner",
"=",
"vectorops",
".",
"add",
"(",
"upper_corner",
",",
"shift",
")",
"if",
"expand",
":",
"lower_corner",
"=",
"vectorops",
".",
"sub",
"(",
"lower_corner",
",",
"cellsize",
")",
"upper_corner",
"=",
"vectorops",
".",
"add",
"(",
"upper_corner",
",",
"cellsize",
")",
"dimensions",
"=",
"[",
"d",
"+",
"2",
"for",
"d",
"in",
"dimensions",
"]",
"#compact way to compute all valid indices of points",
"if",
"len",
"(",
"points",
")",
"==",
"0",
":",
"valid_indices",
"=",
"[",
"]",
"else",
":",
"indices",
"=",
"np",
".",
"multiply",
"(",
"points",
"-",
"np",
".",
"asarray",
"(",
"lower_corner",
")",
",",
"np",
".",
"asarray",
"(",
"invcellsize",
")",
")",
"indices",
"=",
"np",
".",
"floor",
"(",
"indices",
")",
".",
"astype",
"(",
"int",
")",
"valid_points",
"=",
"np",
".",
"all",
"(",
"(",
"0",
"<=",
"indices",
")",
"&",
"(",
"indices",
"<",
"np",
".",
"array",
"(",
"dimensions",
")",
")",
",",
"axis",
"=",
"1",
")",
"valid_indices",
"=",
"indices",
"[",
"valid_points",
",",
":",
"]",
"result",
"=",
"None",
"if",
"aggregator",
"==",
"'max'",
":",
"if",
"initial_value",
"==",
"'auto'",
":",
"initial_value",
"=",
"-",
"float",
"(",
"'inf'",
")",
"value_grid",
"=",
"np",
".",
"full",
"(",
"dimensions",
",",
"fill_value",
"=",
"initial_value",
")",
"f",
"=",
"max",
"g",
"=",
"None",
"elif",
"aggregator",
"==",
"'min'",
":",
"if",
"initial_value",
"==",
"'auto'",
":",
"initial_value",
"=",
"float",
"(",
"'inf'",
")",
"value_grid",
"=",
"np",
".",
"full",
"(",
"dimensions",
",",
"fill_value",
"=",
"float",
"(",
"'inf'",
")",
")",
"f",
"=",
"min",
"g",
"=",
"None",
"elif",
"aggregator",
"==",
"'sum'",
":",
"if",
"initial_value",
"==",
"'auto'",
":",
"initial_value",
"=",
"0.0",
"value_grid",
"=",
"np",
".",
"full",
"(",
"dimensions",
",",
"fill_value",
"=",
"initial_value",
")",
"elif",
"aggregator",
"==",
"'average'",
":",
"if",
"initial_value",
"==",
"'auto'",
":",
"prior",
",",
"strength",
"=",
"0.0",
",",
"0",
"else",
":",
"if",
"not",
"isinstance",
"(",
"initial_value",
",",
"(",
"tuple",
",",
"list",
")",
")",
"or",
"len",
"(",
"initial_value",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"\"Initial value for average must be of the form (prior,strength)\"",
")",
"prior",
",",
"strength",
"=",
"initial_value",
"vsum",
"=",
"np",
".",
"full",
"(",
"dimensions",
",",
"fill_value",
"=",
"prior",
"*",
"strength",
")",
"count",
"=",
"np",
".",
"full",
"(",
"dimensions",
",",
"fill_value",
"=",
"strength",
")",
"for",
"ind",
",",
"v",
"in",
"zip",
"(",
"valid_indices",
",",
"values",
")",
":",
"vsum",
"[",
"ind",
"]",
"+=",
"v",
"count",
"[",
"ind",
"]",
"+=",
"1",
"if",
"strength",
">",
"0",
":",
"result",
"=",
"np",
".",
"divide",
"(",
"vsum",
",",
"count",
")",
"else",
":",
"result",
"=",
"np",
".",
"zeros",
"(",
"dimensions",
")",
"nz",
"=",
"(",
"count",
">",
"0",
")",
"result",
"[",
"nz",
"]",
"=",
"np",
".",
"divide",
"(",
"vsum",
"[",
"nz",
"]",
",",
"count",
"[",
"nz",
"]",
")",
"elif",
"isinstance",
"(",
"aggregator",
",",
"tuple",
")",
":",
"f",
",",
"g",
"=",
"aggregator",
"value_grid",
"=",
"np",
".",
"full",
"(",
"dimensions",
",",
"fill_value",
"=",
"initial_value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for aggregator, must be min, max, sum, average, or a pair of callables\"",
")",
"if",
"result",
"is",
"None",
":",
"for",
"ind",
",",
"v",
"in",
"zip",
"(",
"valid_indices",
",",
"values",
")",
":",
"ind",
"=",
"tuple",
"(",
"ind",
")",
"value_grid",
"[",
"ind",
"]",
"=",
"f",
"(",
"value_grid",
"[",
"ind",
"]",
",",
"v",
")",
"if",
"g",
"is",
"not",
"None",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"dimensions",
")",
"for",
"i",
"in",
"range",
"(",
"dimensions",
"[",
"0",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"dimensions",
"[",
"1",
"]",
")",
":",
"for",
"k",
"in",
"range",
"(",
"dimensions",
"[",
"2",
"]",
")",
":",
"result",
"[",
"i",
",",
"j",
",",
"k",
"]",
"=",
"g",
"(",
"value_grid",
"[",
"i",
",",
"j",
",",
"k",
"]",
")",
"else",
":",
"result",
"=",
"value_grid",
"vg",
"=",
"VolumeGrid",
"(",
")",
"vg",
".",
"setBounds",
"(",
"lower_corner",
",",
"upper_corner",
")",
"vg",
".",
"setValues",
"(",
"result",
")",
"return",
"vg"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/model/workspace.py#L109-L249
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/lib/agw/aui/aui_utilities.py
|
python
|
MakeGray
|
(rgbTuple, factor, maskColour)
|
Make a pixel grayed-out.
If the pixel matches the `maskColour`, it won't be changed.
:param tuple `rgbTuple`: a tuple representing a pixel colour;
:param integer `factor`: a graying-out factor;
:param Colour `maskColour`: a colour mask.
|
Make a pixel grayed-out.
|
[
"Make",
"a",
"pixel",
"grayed",
"-",
"out",
"."
] |
def MakeGray(rgbTuple, factor, maskColour):
"""
Make a pixel grayed-out.
If the pixel matches the `maskColour`, it won't be changed.
:param tuple `rgbTuple`: a tuple representing a pixel colour;
:param integer `factor`: a graying-out factor;
:param Colour `maskColour`: a colour mask.
"""
if rgbTuple != maskColour:
r, g, b = rgbTuple
return map(lambda x: int((230 - x) * factor) + x, (r, g, b))
else:
return rgbTuple
|
[
"def",
"MakeGray",
"(",
"rgbTuple",
",",
"factor",
",",
"maskColour",
")",
":",
"if",
"rgbTuple",
"!=",
"maskColour",
":",
"r",
",",
"g",
",",
"b",
"=",
"rgbTuple",
"return",
"map",
"(",
"lambda",
"x",
":",
"int",
"(",
"(",
"230",
"-",
"x",
")",
"*",
"factor",
")",
"+",
"x",
",",
"(",
"r",
",",
"g",
",",
"b",
")",
")",
"else",
":",
"return",
"rgbTuple"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/aui_utilities.py#L225-L240
|
||
idaholab/moose
|
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
|
python/MooseDocs/common/get_content.py
|
python
|
get_files
|
(items, in_ext)
|
return filenames
|
Get a list of files to consider given the content configuration.
|
Get a list of files to consider given the content configuration.
|
[
"Get",
"a",
"list",
"of",
"files",
"to",
"consider",
"given",
"the",
"content",
"configuration",
"."
] |
def get_files(items, in_ext):
"""
Get a list of files to consider given the content configuration.
"""
filenames = []
for value in items:
if 'root_dir' not in value:
LOG.error('The supplied items must be a list of dict items, each with a "root_dir" and '
'optionally a "content" entry.')
root = mooseutils.eval_path(value['root_dir'])
if not os.path.isabs(root):
root = os.path.join(MooseDocs.ROOT_DIR, root)
for fname in _doc_import(root, content=value.get('content', None)):
filenames.append((root, fname, value.get('external', False)))
return filenames
|
[
"def",
"get_files",
"(",
"items",
",",
"in_ext",
")",
":",
"filenames",
"=",
"[",
"]",
"for",
"value",
"in",
"items",
":",
"if",
"'root_dir'",
"not",
"in",
"value",
":",
"LOG",
".",
"error",
"(",
"'The supplied items must be a list of dict items, each with a \"root_dir\" and '",
"'optionally a \"content\" entry.'",
")",
"root",
"=",
"mooseutils",
".",
"eval_path",
"(",
"value",
"[",
"'root_dir'",
"]",
")",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"root",
")",
":",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"MooseDocs",
".",
"ROOT_DIR",
",",
"root",
")",
"for",
"fname",
"in",
"_doc_import",
"(",
"root",
",",
"content",
"=",
"value",
".",
"get",
"(",
"'content'",
",",
"None",
")",
")",
":",
"filenames",
".",
"append",
"(",
"(",
"root",
",",
"fname",
",",
"value",
".",
"get",
"(",
"'external'",
",",
"False",
")",
")",
")",
"return",
"filenames"
] |
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/MooseDocs/common/get_content.py#L132-L150
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_gdi.py
|
python
|
IconLocation.__init__
|
(self, *args, **kwargs)
|
__init__(self, String filename=&wxPyEmptyString, int num=0) -> IconLocation
|
__init__(self, String filename=&wxPyEmptyString, int num=0) -> IconLocation
|
[
"__init__",
"(",
"self",
"String",
"filename",
"=",
"&wxPyEmptyString",
"int",
"num",
"=",
"0",
")",
"-",
">",
"IconLocation"
] |
def __init__(self, *args, **kwargs):
"""__init__(self, String filename=&wxPyEmptyString, int num=0) -> IconLocation"""
_gdi_.IconLocation_swiginit(self,_gdi_.new_IconLocation(*args, **kwargs))
|
[
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_gdi_",
".",
"IconLocation_swiginit",
"(",
"self",
",",
"_gdi_",
".",
"new_IconLocation",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L1346-L1348
|
||
apache/incubator-mxnet
|
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
|
python/mxnet/symbol/numpy/_symbol.py
|
python
|
pad
|
(x, pad_width, mode='constant', **kwargs)
|
return _npi.pad(x, pad_width, mode='constant', constant_values=0)
|
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
|
Pad an array.
|
[
"Pad",
"an",
"array",
"."
] |
def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
"""
# pylint: disable = too-many-return-statements, inconsistent-return-statements
if not _np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
if not isinstance(pad_width, tuple):
raise TypeError("`pad_width` must be tuple.")
if mode == "linear_ramp":
raise ValueError("mode {'linear_ramp'} is not supported.")
if mode == "wrap":
raise ValueError("mode {'wrap'} is not supported.")
if mode == "median":
raise ValueError("mode {'median'} is not supported.")
if mode == "mean":
raise ValueError("mode {'mean'} is not supported.")
if mode == "empty":
raise ValueError("mode {'empty'} is not supported.")
if callable(mode):
raise ValueError("mode {'<function>'} is not supported.")
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
if isinstance(mode, _np.compat.basestring):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode]))
unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode])
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
if mode == "constant":
values = kwargs.get("constant_values", 0)
if isinstance(values, tuple):
raise TypeError("unsupported constant_values type: {'tuple'}.")
return _npi.pad(x, pad_width, mode='constant', constant_values=values)
elif mode == "symmetric":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even")
elif mode == "edge":
return _npi.pad(x, pad_width, mode='edge')
elif mode == "reflect":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='reflect', reflect_type="even")
elif mode == "maximum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='maximum')
elif mode == "minimum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='minimum')
return _npi.pad(x, pad_width, mode='constant', constant_values=0)
|
[
"def",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'constant'",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=too-many-arguments",
"# pylint: disable = too-many-return-statements, inconsistent-return-statements",
"if",
"not",
"_np",
".",
"asarray",
"(",
"pad_width",
")",
".",
"dtype",
".",
"kind",
"==",
"'i'",
":",
"raise",
"TypeError",
"(",
"'`pad_width` must be of integral type.'",
")",
"if",
"not",
"isinstance",
"(",
"pad_width",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"(",
"\"`pad_width` must be tuple.\"",
")",
"if",
"mode",
"==",
"\"linear_ramp\"",
":",
"raise",
"ValueError",
"(",
"\"mode {'linear_ramp'} is not supported.\"",
")",
"if",
"mode",
"==",
"\"wrap\"",
":",
"raise",
"ValueError",
"(",
"\"mode {'wrap'} is not supported.\"",
")",
"if",
"mode",
"==",
"\"median\"",
":",
"raise",
"ValueError",
"(",
"\"mode {'median'} is not supported.\"",
")",
"if",
"mode",
"==",
"\"mean\"",
":",
"raise",
"ValueError",
"(",
"\"mode {'mean'} is not supported.\"",
")",
"if",
"mode",
"==",
"\"empty\"",
":",
"raise",
"ValueError",
"(",
"\"mode {'empty'} is not supported.\"",
")",
"if",
"callable",
"(",
"mode",
")",
":",
"raise",
"ValueError",
"(",
"\"mode {'<function>'} is not supported.\"",
")",
"allowedkwargs",
"=",
"{",
"'constant'",
":",
"[",
"'constant_values'",
"]",
",",
"'edge'",
":",
"[",
"]",
",",
"'linear_ramp'",
":",
"[",
"'end_values'",
"]",
",",
"'maximum'",
":",
"[",
"'stat_length'",
"]",
",",
"'mean'",
":",
"[",
"'stat_length'",
"]",
",",
"'median'",
":",
"[",
"'stat_length'",
"]",
",",
"'minimum'",
":",
"[",
"'stat_length'",
"]",
",",
"'reflect'",
":",
"[",
"'reflect_type'",
"]",
",",
"'symmetric'",
":",
"[",
"'reflect_type'",
"]",
",",
"'wrap'",
":",
"[",
"]",
",",
"}",
"if",
"isinstance",
"(",
"mode",
",",
"_np",
".",
"compat",
".",
"basestring",
")",
":",
"# Make sure have allowed kwargs appropriate for mode",
"for",
"key",
"in",
"kwargs",
":",
"if",
"key",
"not",
"in",
"allowedkwargs",
"[",
"mode",
"]",
":",
"raise",
"ValueError",
"(",
"'%s keyword not in allowed keywords %s'",
"%",
"(",
"key",
",",
"allowedkwargs",
"[",
"mode",
"]",
")",
")",
"unsupported_kwargs",
"=",
"set",
"(",
"kwargs",
")",
"-",
"set",
"(",
"allowedkwargs",
"[",
"mode",
"]",
")",
"if",
"unsupported_kwargs",
":",
"raise",
"ValueError",
"(",
"\"unsupported keyword arguments for mode '{}': {}\"",
".",
"format",
"(",
"mode",
",",
"unsupported_kwargs",
")",
")",
"if",
"mode",
"==",
"\"constant\"",
":",
"values",
"=",
"kwargs",
".",
"get",
"(",
"\"constant_values\"",
",",
"0",
")",
"if",
"isinstance",
"(",
"values",
",",
"tuple",
")",
":",
"raise",
"TypeError",
"(",
"\"unsupported constant_values type: {'tuple'}.\"",
")",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"values",
")",
"elif",
"mode",
"==",
"\"symmetric\"",
":",
"values",
"=",
"kwargs",
".",
"get",
"(",
"\"reflect_type\"",
",",
"\"even\"",
")",
"if",
"values",
"!=",
"\"even\"",
"and",
"values",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"unsupported reflect_type '{}'\"",
".",
"format",
"(",
"values",
")",
")",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'symmetric'",
",",
"reflect_type",
"=",
"\"even\"",
")",
"elif",
"mode",
"==",
"\"edge\"",
":",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'edge'",
")",
"elif",
"mode",
"==",
"\"reflect\"",
":",
"values",
"=",
"kwargs",
".",
"get",
"(",
"\"reflect_type\"",
",",
"\"even\"",
")",
"if",
"values",
"!=",
"\"even\"",
"and",
"values",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"unsupported reflect_type '{}'\"",
".",
"format",
"(",
"values",
")",
")",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'reflect'",
",",
"reflect_type",
"=",
"\"even\"",
")",
"elif",
"mode",
"==",
"\"maximum\"",
":",
"values",
"=",
"kwargs",
".",
"get",
"(",
"\"stat_length\"",
",",
"None",
")",
"if",
"values",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"unsupported stat_length '{}'\"",
".",
"format",
"(",
"values",
")",
")",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'maximum'",
")",
"elif",
"mode",
"==",
"\"minimum\"",
":",
"values",
"=",
"kwargs",
".",
"get",
"(",
"\"stat_length\"",
",",
"None",
")",
"if",
"values",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"unsupported stat_length '{}'\"",
".",
"format",
"(",
"values",
")",
")",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'minimum'",
")",
"return",
"_npi",
".",
"pad",
"(",
"x",
",",
"pad_width",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"0",
")"
] |
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/symbol/numpy/_symbol.py#L7743-L7873
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_controls.py
|
python
|
ListItem.GetData
|
(*args, **kwargs)
|
return _controls_.ListItem_GetData(*args, **kwargs)
|
GetData(self) -> long
|
GetData(self) -> long
|
[
"GetData",
"(",
"self",
")",
"-",
">",
"long"
] |
def GetData(*args, **kwargs):
"""GetData(self) -> long"""
return _controls_.ListItem_GetData(*args, **kwargs)
|
[
"def",
"GetData",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ListItem_GetData",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L4236-L4238
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_controls.py
|
python
|
ToggleButton.GetValue
|
(*args, **kwargs)
|
return _controls_.ToggleButton_GetValue(*args, **kwargs)
|
GetValue(self) -> bool
|
GetValue(self) -> bool
|
[
"GetValue",
"(",
"self",
")",
"-",
">",
"bool"
] |
def GetValue(*args, **kwargs):
"""GetValue(self) -> bool"""
return _controls_.ToggleButton_GetValue(*args, **kwargs)
|
[
"def",
"GetValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ToggleButton_GetValue",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L3011-L3013
|
|
eventql/eventql
|
7ca0dbb2e683b525620ea30dc40540a22d5eb227
|
deps/3rdparty/spidermonkey/mozjs/python/psutil/psutil/_pslinux.py
|
python
|
users
|
()
|
return retlist
|
Return currently connected users as a list of namedtuples.
|
Return currently connected users as a list of namedtuples.
|
[
"Return",
"currently",
"connected",
"users",
"as",
"a",
"list",
"of",
"namedtuples",
"."
] |
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, tty, hostname, tstamp, user_process = item
# note: the underlying C function includes entries about
# system boot, run level and others. We might want
# to use them in the future.
if not user_process:
continue
if hostname == ':0.0':
hostname = 'localhost'
nt = _common.suser(user, tty or None, hostname, tstamp)
retlist.append(nt)
return retlist
|
[
"def",
"users",
"(",
")",
":",
"retlist",
"=",
"[",
"]",
"rawlist",
"=",
"cext",
".",
"users",
"(",
")",
"for",
"item",
"in",
"rawlist",
":",
"user",
",",
"tty",
",",
"hostname",
",",
"tstamp",
",",
"user_process",
"=",
"item",
"# note: the underlying C function includes entries about",
"# system boot, run level and others. We might want",
"# to use them in the future.",
"if",
"not",
"user_process",
":",
"continue",
"if",
"hostname",
"==",
"':0.0'",
":",
"hostname",
"=",
"'localhost'",
"nt",
"=",
"_common",
".",
"suser",
"(",
"user",
",",
"tty",
"or",
"None",
",",
"hostname",
",",
"tstamp",
")",
"retlist",
".",
"append",
"(",
"nt",
")",
"return",
"retlist"
] |
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/psutil/psutil/_pslinux.py#L314-L329
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/webbrowser.py
|
python
|
get
|
(using=None)
|
Return a browser launcher instance appropriate for the environment.
|
Return a browser launcher instance appropriate for the environment.
|
[
"Return",
"a",
"browser",
"launcher",
"instance",
"appropriate",
"for",
"the",
"environment",
"."
] |
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
|
[
"def",
"get",
"(",
"using",
"=",
"None",
")",
":",
"if",
"using",
"is",
"not",
"None",
":",
"alternatives",
"=",
"[",
"using",
"]",
"else",
":",
"alternatives",
"=",
"_tryorder",
"for",
"browser",
"in",
"alternatives",
":",
"if",
"'%s'",
"in",
"browser",
":",
"# User gave us a command line, split it into name and args",
"browser",
"=",
"shlex",
".",
"split",
"(",
"browser",
")",
"if",
"browser",
"[",
"-",
"1",
"]",
"==",
"'&'",
":",
"return",
"BackgroundBrowser",
"(",
"browser",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"return",
"GenericBrowser",
"(",
"browser",
")",
"else",
":",
"# User gave us a browser name or path.",
"try",
":",
"command",
"=",
"_browsers",
"[",
"browser",
".",
"lower",
"(",
")",
"]",
"except",
"KeyError",
":",
"command",
"=",
"_synthesize",
"(",
"browser",
")",
"if",
"command",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"return",
"command",
"[",
"1",
"]",
"elif",
"command",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"return",
"command",
"[",
"0",
"]",
"(",
")",
"raise",
"Error",
"(",
"\"could not locate runnable browser\"",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/webbrowser.py#L28-L52
|
||
quantOS-org/DataCore
|
e2ef9bd2c22ee9e2845675b6435a14fa607f3551
|
dataserver/api/py/data_api.py
|
python
|
DataApi.unsubscribe
|
(self, symbol)
|
Unsubscribe securities.
Unscribe codes and return list of subscribed code.
|
Unsubscribe securities.
|
[
"Unsubscribe",
"securities",
"."
] |
def unsubscribe(self, symbol):
"""Unsubscribe securities.
Unscribe codes and return list of subscribed code.
"""
assert False, "NOT IMPLEMENTED"
|
[
"def",
"unsubscribe",
"(",
"self",
",",
"symbol",
")",
":",
"assert",
"False",
",",
"\"NOT IMPLEMENTED\""
] |
https://github.com/quantOS-org/DataCore/blob/e2ef9bd2c22ee9e2845675b6435a14fa607f3551/dataserver/api/py/data_api.py#L422-L427
|
||
LiquidPlayer/LiquidCore
|
9405979363f2353ac9a71ad8ab59685dd7f919c9
|
deps/node-10.15.3/configure.py
|
python
|
host_arch_cc
|
()
|
return rtn
|
Host architecture check using the CC command.
|
Host architecture check using the CC command.
|
[
"Host",
"architecture",
"check",
"using",
"the",
"CC",
"command",
"."
] |
def host_arch_cc():
"""Host architecture check using the CC command."""
if sys.platform.startswith('aix'):
# we only support gcc at this point and the default on AIX
# would be xlc so hard code gcc
k = cc_macros('gcc')
else:
k = cc_macros(os.environ.get('CC_host'))
matchup = {
'__aarch64__' : 'arm64',
'__arm__' : 'arm',
'__i386__' : 'ia32',
'__MIPSEL__' : 'mipsel',
'__mips__' : 'mips',
'__PPC64__' : 'ppc64',
'__PPC__' : 'ppc64',
'__x86_64__' : 'x64',
'__s390__' : 's390',
'__s390x__' : 's390x',
}
rtn = 'ia32' # default
for i in matchup:
if i in k and k[i] != '0':
rtn = matchup[i]
if rtn != 's390':
break
if rtn == 'mipsel' and '_LP64' in k:
rtn = 'mips64el'
return rtn
|
[
"def",
"host_arch_cc",
"(",
")",
":",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'aix'",
")",
":",
"# we only support gcc at this point and the default on AIX",
"# would be xlc so hard code gcc",
"k",
"=",
"cc_macros",
"(",
"'gcc'",
")",
"else",
":",
"k",
"=",
"cc_macros",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'CC_host'",
")",
")",
"matchup",
"=",
"{",
"'__aarch64__'",
":",
"'arm64'",
",",
"'__arm__'",
":",
"'arm'",
",",
"'__i386__'",
":",
"'ia32'",
",",
"'__MIPSEL__'",
":",
"'mipsel'",
",",
"'__mips__'",
":",
"'mips'",
",",
"'__PPC64__'",
":",
"'ppc64'",
",",
"'__PPC__'",
":",
"'ppc64'",
",",
"'__x86_64__'",
":",
"'x64'",
",",
"'__s390__'",
":",
"'s390'",
",",
"'__s390x__'",
":",
"'s390x'",
",",
"}",
"rtn",
"=",
"'ia32'",
"# default",
"for",
"i",
"in",
"matchup",
":",
"if",
"i",
"in",
"k",
"and",
"k",
"[",
"i",
"]",
"!=",
"'0'",
":",
"rtn",
"=",
"matchup",
"[",
"i",
"]",
"if",
"rtn",
"!=",
"'s390'",
":",
"break",
"if",
"rtn",
"==",
"'mipsel'",
"and",
"'_LP64'",
"in",
"k",
":",
"rtn",
"=",
"'mips64el'",
"return",
"rtn"
] |
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/configure.py#L834-L868
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/eclib/infodlg.py
|
python
|
FileInfoDlg.SetFileTypeLabel
|
(self, lbl)
|
Set the file type label
@param lbl: string
|
Set the file type label
@param lbl: string
|
[
"Set",
"the",
"file",
"type",
"label",
"@param",
"lbl",
":",
"string"
] |
def SetFileTypeLabel(self, lbl):
"""Set the file type label
@param lbl: string
"""
self._ftype = lbl
self._ftxt.SetLabel(lbl)
self.panel.Layout()
|
[
"def",
"SetFileTypeLabel",
"(",
"self",
",",
"lbl",
")",
":",
"self",
".",
"_ftype",
"=",
"lbl",
"self",
".",
"_ftxt",
".",
"SetLabel",
"(",
"lbl",
")",
"self",
".",
"panel",
".",
"Layout",
"(",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/eclib/infodlg.py#L254-L261
|
||
NVIDIA/DALI
|
bf16cc86ba8f091b145f91962f21fe1b6aff243d
|
docs/examples/frameworks/mxnet/demo/common/fit.py
|
python
|
add_fit_args
|
(parser)
|
return train
|
parser : argparse.ArgumentParser
return a parser added with args required by fit
|
parser : argparse.ArgumentParser
return a parser added with args required by fit
|
[
"parser",
":",
"argparse",
".",
"ArgumentParser",
"return",
"a",
"parser",
"added",
"with",
"args",
"required",
"by",
"fit"
] |
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, \
required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--initializer', type=str, default='default',
help='the initializer type')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--loss', type=str, default='',
help='show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
train.add_argument('--gc-type', type=str, default='none',
help='type of gradient compression to use, \
takes `2bit` or `none` for now')
train.add_argument('--gc-threshold', type=float, default=0.5,
help='threshold for 2bit gradient compression')
# additional parameters for large batch sgd
train.add_argument('--macrobatch-size', type=int, default=0,
help='distributed effective batch size')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--warmup-strategy', type=str, default='linear',
help='the ramping-up strategy for large batch sgd')
return train
|
[
"def",
"add_fit_args",
"(",
"parser",
")",
":",
"train",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Training'",
",",
"'model training'",
")",
"train",
".",
"add_argument",
"(",
"'--network'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'the neural network to use'",
")",
"train",
".",
"add_argument",
"(",
"'--num-layers'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'number of layers in the neural network, \\\n required by some networks such as resnet'",
")",
"train",
".",
"add_argument",
"(",
"'--gpus'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu'",
")",
"train",
".",
"add_argument",
"(",
"'--kv-store'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'device'",
",",
"help",
"=",
"'key-value store type'",
")",
"train",
".",
"add_argument",
"(",
"'--num-epochs'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"100",
",",
"help",
"=",
"'max num of epochs'",
")",
"train",
".",
"add_argument",
"(",
"'--lr'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.1",
",",
"help",
"=",
"'initial learning rate'",
")",
"train",
".",
"add_argument",
"(",
"'--lr-factor'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.1",
",",
"help",
"=",
"'the ratio to reduce lr on each step'",
")",
"train",
".",
"add_argument",
"(",
"'--lr-step-epochs'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'the epochs to reduce the lr, e.g. 30,60'",
")",
"train",
".",
"add_argument",
"(",
"'--initializer'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'default'",
",",
"help",
"=",
"'the initializer type'",
")",
"train",
".",
"add_argument",
"(",
"'--optimizer'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'sgd'",
",",
"help",
"=",
"'the optimizer type'",
")",
"train",
".",
"add_argument",
"(",
"'--mom'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.9",
",",
"help",
"=",
"'momentum for sgd'",
")",
"train",
".",
"add_argument",
"(",
"'--wd'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.0001",
",",
"help",
"=",
"'weight decay for sgd'",
")",
"train",
".",
"add_argument",
"(",
"'--batch-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"128",
",",
"help",
"=",
"'the batch size'",
")",
"train",
".",
"add_argument",
"(",
"'--disp-batches'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"20",
",",
"help",
"=",
"'show progress for every n batches'",
")",
"train",
".",
"add_argument",
"(",
"'--model-prefix'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'model prefix'",
")",
"parser",
".",
"add_argument",
"(",
"'--monitor'",
",",
"dest",
"=",
"'monitor'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'log network parameters every N iters if larger than 0'",
")",
"train",
".",
"add_argument",
"(",
"'--load-epoch'",
",",
"type",
"=",
"int",
",",
"help",
"=",
"'load the model on an epoch using the model-load-prefix'",
")",
"train",
".",
"add_argument",
"(",
"'--top-k'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'report the top-k accuracy. 0 means no report.'",
")",
"train",
".",
"add_argument",
"(",
"'--loss'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"''",
",",
"help",
"=",
"'show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss'",
")",
"train",
".",
"add_argument",
"(",
"'--test-io'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'1 means test reading speed without training'",
")",
"train",
".",
"add_argument",
"(",
"'--dtype'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'float32'",
",",
"help",
"=",
"'precision: float32 or float16'",
")",
"train",
".",
"add_argument",
"(",
"'--gc-type'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'none'",
",",
"help",
"=",
"'type of gradient compression to use, \\\n takes `2bit` or `none` for now'",
")",
"train",
".",
"add_argument",
"(",
"'--gc-threshold'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.5",
",",
"help",
"=",
"'threshold for 2bit gradient compression'",
")",
"# additional parameters for large batch sgd",
"train",
".",
"add_argument",
"(",
"'--macrobatch-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'distributed effective batch size'",
")",
"train",
".",
"add_argument",
"(",
"'--warmup-epochs'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5",
",",
"help",
"=",
"'the epochs to ramp-up lr to scaled large-batch value'",
")",
"train",
".",
"add_argument",
"(",
"'--warmup-strategy'",
",",
"type",
"=",
"str",
",",
"default",
"=",
"'linear'",
",",
"help",
"=",
"'the ramping-up strategy for large batch sgd'",
")",
"return",
"train"
] |
https://github.com/NVIDIA/DALI/blob/bf16cc86ba8f091b145f91962f21fe1b6aff243d/docs/examples/frameworks/mxnet/demo/common/fit.py#L77-L138
|
|
microsoft/TSS.MSR
|
0f2516fca2cd9929c31d5450e39301c9bde43688
|
TSS.Py/src/TpmTypes.py
|
python
|
TPMS_TAGGED_PROPERTY.initFromTpm
|
(self, buf)
|
TpmMarshaller method
|
TpmMarshaller method
|
[
"TpmMarshaller",
"method"
] |
def initFromTpm(self, buf):
""" TpmMarshaller method """
self.property = buf.readInt()
self.value = buf.readInt()
|
[
"def",
"initFromTpm",
"(",
"self",
",",
"buf",
")",
":",
"self",
".",
"property",
"=",
"buf",
".",
"readInt",
"(",
")",
"self",
".",
"value",
"=",
"buf",
".",
"readInt",
"(",
")"
] |
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L4310-L4313
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_core.py
|
python
|
MouseState.SetRightDown
|
(*args, **kwargs)
|
return _core_.MouseState_SetRightDown(*args, **kwargs)
|
SetRightDown(self, bool down)
|
SetRightDown(self, bool down)
|
[
"SetRightDown",
"(",
"self",
"bool",
"down",
")"
] |
def SetRightDown(*args, **kwargs):
"""SetRightDown(self, bool down)"""
return _core_.MouseState_SetRightDown(*args, **kwargs)
|
[
"def",
"SetRightDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"MouseState_SetRightDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L4502-L4504
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/distutils/dist.py
|
python
|
Distribution.reinitialize_command
|
(self, command, reinit_subcommands=0)
|
return command
|
Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
|
Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
|
[
"Reinitializes",
"a",
"command",
"to",
"the",
"state",
"it",
"was",
"in",
"when",
"first",
"returned",
"by",
"get_command_obj",
"()",
":",
"ie",
".",
"initialized",
"but",
"not",
"yet",
"finalized",
".",
"This",
"provides",
"the",
"opportunity",
"to",
"sneak",
"option",
"values",
"in",
"programmatically",
"overriding",
"or",
"supplementing",
"user",
"-",
"supplied",
"values",
"from",
"the",
"config",
"files",
"and",
"command",
"line",
".",
"You",
"ll",
"have",
"to",
"re",
"-",
"finalize",
"the",
"command",
"object",
"(",
"by",
"calling",
"finalize_options",
"()",
"or",
"ensure_finalized",
"()",
")",
"before",
"using",
"it",
"for",
"real",
"."
] |
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
|
[
"def",
"reinitialize_command",
"(",
"self",
",",
"command",
",",
"reinit_subcommands",
"=",
"0",
")",
":",
"from",
"distutils",
".",
"cmd",
"import",
"Command",
"if",
"not",
"isinstance",
"(",
"command",
",",
"Command",
")",
":",
"command_name",
"=",
"command",
"command",
"=",
"self",
".",
"get_command_obj",
"(",
"command_name",
")",
"else",
":",
"command_name",
"=",
"command",
".",
"get_command_name",
"(",
")",
"if",
"not",
"command",
".",
"finalized",
":",
"return",
"command",
"command",
".",
"initialize_options",
"(",
")",
"command",
".",
"finalized",
"=",
"0",
"self",
".",
"have_run",
"[",
"command_name",
"]",
"=",
"0",
"self",
".",
"_set_command_options",
"(",
"command",
")",
"if",
"reinit_subcommands",
":",
"for",
"sub",
"in",
"command",
".",
"get_sub_commands",
"(",
")",
":",
"self",
".",
"reinitialize_command",
"(",
"sub",
",",
"reinit_subcommands",
")",
"return",
"command"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/distutils/dist.py#L916-L953
|
|
cvmfs/cvmfs
|
4637bdb5153178eadf885c1acf37bdc5c685bf8a
|
cpplint.py
|
python
|
NestingState.InAsmBlock
|
(self)
|
return self.stack and self.stack[-1].inline_asm != _NO_ASM
|
Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
|
Check if we are currently one level inside an inline ASM block.
|
[
"Check",
"if",
"we",
"are",
"currently",
"one",
"level",
"inside",
"an",
"inline",
"ASM",
"block",
"."
] |
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
|
[
"def",
"InAsmBlock",
"(",
"self",
")",
":",
"return",
"self",
".",
"stack",
"and",
"self",
".",
"stack",
"[",
"-",
"1",
"]",
".",
"inline_asm",
"!=",
"_NO_ASM"
] |
https://github.com/cvmfs/cvmfs/blob/4637bdb5153178eadf885c1acf37bdc5c685bf8a/cpplint.py#L2258-L2264
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/telemetry/telemetry/internal/image_processing/screen_finder.py
|
python
|
ScreenFinder._DeDupCorners
|
(self, corner_data, corners)
|
De-duplicate corners based on corner_index.
For each set of points representing a corner: If one point is part of the
rectangle and the other is not, filter the other one. If both or none are
part of the rectangle, filter based on score (highest relative brightness
of a quadrant). The reason we allow for neither to be part of the
rectangle is because we may not have found all four corners of the
rectangle, and in degenerate cases like this it's better to find 3 likely
corners than none.
Modifies corner_data directly.
Args:
corner_data: CornerData for each potential corner in the frame.
corners: List of all potential corners in the frame.
|
De-duplicate corners based on corner_index.
|
[
"De",
"-",
"duplicate",
"corners",
"based",
"on",
"corner_index",
"."
] |
def _DeDupCorners(self, corner_data, corners):
"""De-duplicate corners based on corner_index.
For each set of points representing a corner: If one point is part of the
rectangle and the other is not, filter the other one. If both or none are
part of the rectangle, filter based on score (highest relative brightness
of a quadrant). The reason we allow for neither to be part of the
rectangle is because we may not have found all four corners of the
rectangle, and in degenerate cases like this it's better to find 3 likely
corners than none.
Modifies corner_data directly.
Args:
corner_data: CornerData for each potential corner in the frame.
corners: List of all potential corners in the frame."""
# TODO(mthiesse): Ensure that the corners form a sensible rectangle. For
# example, it is currently possible (but unlikely) to detect a 'screen'
# where the bottom-left corner is above the top-left corner, while the
# bottom-right corner is below the top-right corner.
# Sort by corner_index to make de-duping easier.
corner_data.sort()
# De-dup corners.
c_old = None
for i in xrange(len(corner_data) - 1, 0, -1):
if corner_data[i].corner_index != corner_data[i - 1].corner_index:
c_old = None
continue
if c_old is None:
point_info = (corner_data[i].corner_location,
corner_data[i].line1,
corner_data[i].line2)
c_old = self._PointConnectsToCorners(corners, point_info, 2)
point_info_new = (corner_data[i - 1].corner_location,
corner_data[i - 1].line1,
corner_data[i - 1].line2)
c_new = self._PointConnectsToCorners(corners, point_info_new, 2)
if (not (c_old or c_new)) or (c_old and c_new):
if (corner_data[i].brightness_score <
corner_data[i - 1].brightness_score):
del corner_data[i]
c_old = c_new
else:
del corner_data[i - 1]
elif c_old:
del corner_data[i - 1]
else:
del corner_data[i]
c_old = c_new
|
[
"def",
"_DeDupCorners",
"(",
"self",
",",
"corner_data",
",",
"corners",
")",
":",
"# TODO(mthiesse): Ensure that the corners form a sensible rectangle. For",
"# example, it is currently possible (but unlikely) to detect a 'screen'",
"# where the bottom-left corner is above the top-left corner, while the",
"# bottom-right corner is below the top-right corner.",
"# Sort by corner_index to make de-duping easier.",
"corner_data",
".",
"sort",
"(",
")",
"# De-dup corners.",
"c_old",
"=",
"None",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"corner_data",
")",
"-",
"1",
",",
"0",
",",
"-",
"1",
")",
":",
"if",
"corner_data",
"[",
"i",
"]",
".",
"corner_index",
"!=",
"corner_data",
"[",
"i",
"-",
"1",
"]",
".",
"corner_index",
":",
"c_old",
"=",
"None",
"continue",
"if",
"c_old",
"is",
"None",
":",
"point_info",
"=",
"(",
"corner_data",
"[",
"i",
"]",
".",
"corner_location",
",",
"corner_data",
"[",
"i",
"]",
".",
"line1",
",",
"corner_data",
"[",
"i",
"]",
".",
"line2",
")",
"c_old",
"=",
"self",
".",
"_PointConnectsToCorners",
"(",
"corners",
",",
"point_info",
",",
"2",
")",
"point_info_new",
"=",
"(",
"corner_data",
"[",
"i",
"-",
"1",
"]",
".",
"corner_location",
",",
"corner_data",
"[",
"i",
"-",
"1",
"]",
".",
"line1",
",",
"corner_data",
"[",
"i",
"-",
"1",
"]",
".",
"line2",
")",
"c_new",
"=",
"self",
".",
"_PointConnectsToCorners",
"(",
"corners",
",",
"point_info_new",
",",
"2",
")",
"if",
"(",
"not",
"(",
"c_old",
"or",
"c_new",
")",
")",
"or",
"(",
"c_old",
"and",
"c_new",
")",
":",
"if",
"(",
"corner_data",
"[",
"i",
"]",
".",
"brightness_score",
"<",
"corner_data",
"[",
"i",
"-",
"1",
"]",
".",
"brightness_score",
")",
":",
"del",
"corner_data",
"[",
"i",
"]",
"c_old",
"=",
"c_new",
"else",
":",
"del",
"corner_data",
"[",
"i",
"-",
"1",
"]",
"elif",
"c_old",
":",
"del",
"corner_data",
"[",
"i",
"-",
"1",
"]",
"else",
":",
"del",
"corner_data",
"[",
"i",
"]",
"c_old",
"=",
"c_new"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/telemetry/internal/image_processing/screen_finder.py#L518-L568
|
||
PaddlePaddle/Anakin
|
5fd68a6cc4c4620cd1a30794c1bf06eebd3f4730
|
tools/external_converter_v2/parser/onnx/onnx_trans_utils.py
|
python
|
parse_Lrn
|
(onnx_node, weights, graph)
|
parse LRN
:param onnx_node:
:param weights:
:param graph:
:return:
|
parse LRN
:param onnx_node:
:param weights:
:param graph:
:return:
|
[
"parse",
"LRN",
":",
"param",
"onnx_node",
":",
":",
"param",
"weights",
":",
":",
"param",
"graph",
":",
":",
"return",
":"
] |
def parse_Lrn(onnx_node, weights, graph):
"""
parse LRN
:param onnx_node:
:param weights:
:param graph:
:return:
"""
onnx_node['visited'] = True
onnx_node['ak_type'] = 'LRN'
ak_attr = onnx_node['ak_attr']
onnx_attr = onnx_node['onnx_attr']
local_size = 0
if 'size' in onnx_attr.keys():
local_size = onnx_attr['size']
alpha = 0.0001
if 'alpha' in onnx_attr.keys():
alpha = onnx_attr['alpha']
beta = 0.75
if 'beta' in onnx_attr.keys():
beta = onnx_attr['beta']
k = 1
if 'bias' in onnx_attr.keys():
k = onnx_attr['bias']
ak_attr['local_size'] = local_size
ak_attr['alpha'] = alpha / local_size
ak_attr['beta'] = beta
ak_attr['k'] = k
|
[
"def",
"parse_Lrn",
"(",
"onnx_node",
",",
"weights",
",",
"graph",
")",
":",
"onnx_node",
"[",
"'visited'",
"]",
"=",
"True",
"onnx_node",
"[",
"'ak_type'",
"]",
"=",
"'LRN'",
"ak_attr",
"=",
"onnx_node",
"[",
"'ak_attr'",
"]",
"onnx_attr",
"=",
"onnx_node",
"[",
"'onnx_attr'",
"]",
"local_size",
"=",
"0",
"if",
"'size'",
"in",
"onnx_attr",
".",
"keys",
"(",
")",
":",
"local_size",
"=",
"onnx_attr",
"[",
"'size'",
"]",
"alpha",
"=",
"0.0001",
"if",
"'alpha'",
"in",
"onnx_attr",
".",
"keys",
"(",
")",
":",
"alpha",
"=",
"onnx_attr",
"[",
"'alpha'",
"]",
"beta",
"=",
"0.75",
"if",
"'beta'",
"in",
"onnx_attr",
".",
"keys",
"(",
")",
":",
"beta",
"=",
"onnx_attr",
"[",
"'beta'",
"]",
"k",
"=",
"1",
"if",
"'bias'",
"in",
"onnx_attr",
".",
"keys",
"(",
")",
":",
"k",
"=",
"onnx_attr",
"[",
"'bias'",
"]",
"ak_attr",
"[",
"'local_size'",
"]",
"=",
"local_size",
"ak_attr",
"[",
"'alpha'",
"]",
"=",
"alpha",
"/",
"local_size",
"ak_attr",
"[",
"'beta'",
"]",
"=",
"beta",
"ak_attr",
"[",
"'k'",
"]",
"=",
"k"
] |
https://github.com/PaddlePaddle/Anakin/blob/5fd68a6cc4c4620cd1a30794c1bf06eebd3f4730/tools/external_converter_v2/parser/onnx/onnx_trans_utils.py#L1194-L1221
|
||
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib2to3/pytree.py
|
python
|
LeafPattern.match
|
(self, node, results=None)
|
return BasePattern.match(self, node, results)
|
Override match() to insist on a leaf node.
|
Override match() to insist on a leaf node.
|
[
"Override",
"match",
"()",
"to",
"insist",
"on",
"a",
"leaf",
"node",
"."
] |
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
|
[
"def",
"match",
"(",
"self",
",",
"node",
",",
"results",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"node",
",",
"Leaf",
")",
":",
"return",
"False",
"return",
"BasePattern",
".",
"match",
"(",
"self",
",",
"node",
",",
"results",
")"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib2to3/pytree.py#L556-L560
|
|
ricardoquesada/Spidermonkey
|
4a75ea2543408bd1b2c515aa95901523eeef7858
|
python/psutil/psutil/_psmswindows.py
|
python
|
disk_partitions
|
(all)
|
return [nt_partition(*x) for x in rawlist]
|
Return disk partitions.
|
Return disk partitions.
|
[
"Return",
"disk",
"partitions",
"."
] |
def disk_partitions(all):
"""Return disk partitions."""
rawlist = _psutil_mswindows.get_disk_partitions(all)
return [nt_partition(*x) for x in rawlist]
|
[
"def",
"disk_partitions",
"(",
"all",
")",
":",
"rawlist",
"=",
"_psutil_mswindows",
".",
"get_disk_partitions",
"(",
"all",
")",
"return",
"[",
"nt_partition",
"(",
"*",
"x",
")",
"for",
"x",
"in",
"rawlist",
"]"
] |
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/psutil/psutil/_psmswindows.py#L139-L142
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/arrays/categorical.py
|
python
|
Categorical.shift
|
(self, periods, fill_value=None)
|
return self.from_codes(codes, dtype=self.dtype)
|
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
|
Shift Categorical by desired number of periods.
|
[
"Shift",
"Categorical",
"by",
"desired",
"number",
"of",
"periods",
"."
] |
def shift(self, periods, fill_value=None):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
fill_value : object, optional
The scalar value to use for newly introduced missing values.
.. versionadded:: 0.24.0
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError(
f"'fill_value={fill_value}' is not present "
"in this Categorical's categories"
)
if periods > 0:
codes[:periods] = fill_value
else:
codes[periods:] = fill_value
return self.from_codes(codes, dtype=self.dtype)
|
[
"def",
"shift",
"(",
"self",
",",
"periods",
",",
"fill_value",
"=",
"None",
")",
":",
"# since categoricals always have ndim == 1, an axis parameter",
"# doesn't make any sense here.",
"codes",
"=",
"self",
".",
"codes",
"if",
"codes",
".",
"ndim",
">",
"1",
":",
"raise",
"NotImplementedError",
"(",
"\"Categorical with ndim > 1.\"",
")",
"if",
"np",
".",
"prod",
"(",
"codes",
".",
"shape",
")",
"and",
"(",
"periods",
"!=",
"0",
")",
":",
"codes",
"=",
"np",
".",
"roll",
"(",
"codes",
",",
"ensure_platform_int",
"(",
"periods",
")",
",",
"axis",
"=",
"0",
")",
"if",
"isna",
"(",
"fill_value",
")",
":",
"fill_value",
"=",
"-",
"1",
"elif",
"fill_value",
"in",
"self",
".",
"categories",
":",
"fill_value",
"=",
"self",
".",
"categories",
".",
"get_loc",
"(",
"fill_value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"f\"'fill_value={fill_value}' is not present \"",
"\"in this Categorical's categories\"",
")",
"if",
"periods",
">",
"0",
":",
"codes",
"[",
":",
"periods",
"]",
"=",
"fill_value",
"else",
":",
"codes",
"[",
"periods",
":",
"]",
"=",
"fill_value",
"return",
"self",
".",
"from_codes",
"(",
"codes",
",",
"dtype",
"=",
"self",
".",
"dtype",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/arrays/categorical.py#L1233-L1271
|
|
mongodb/mongo
|
d8ff665343ad29cf286ee2cf4a1960d29371937b
|
buildscripts/resmokelib/logging/buildlogger.py
|
python
|
BuildloggerServer.get_test_handler
|
(self, build_id, test_id, handler_info)
|
return BuildloggerTestHandler(self.config, build_id, test_id, **handler_info)
|
Return the test handler.
|
Return the test handler.
|
[
"Return",
"the",
"test",
"handler",
"."
] |
def get_test_handler(self, build_id, test_id, handler_info):
"""Return the test handler."""
return BuildloggerTestHandler(self.config, build_id, test_id, **handler_info)
|
[
"def",
"get_test_handler",
"(",
"self",
",",
"build_id",
",",
"test_id",
",",
"handler_info",
")",
":",
"return",
"BuildloggerTestHandler",
"(",
"self",
".",
"config",
",",
"build_id",
",",
"test_id",
",",
"*",
"*",
"handler_info",
")"
] |
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/logging/buildlogger.py#L324-L326
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/contrib/solvers/python/ops/util.py
|
python
|
create_operator
|
(matrix)
|
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: math_ops.matmul(matrix, v, adjoint_a=False),
apply_adjoint=lambda v: math_ops.matmul(matrix, v, adjoint_a=True))
|
Creates a linear operator from a rank-2 tensor.
|
Creates a linear operator from a rank-2 tensor.
|
[
"Creates",
"a",
"linear",
"operator",
"from",
"a",
"rank",
"-",
"2",
"tensor",
"."
] |
def create_operator(matrix):
"""Creates a linear operator from a rank-2 tensor."""
linear_operator = collections.namedtuple(
"LinearOperator", ["shape", "dtype", "apply", "apply_adjoint"])
# TODO(rmlarsen): Handle SparseTensor.
shape = matrix.get_shape()
if shape.is_fully_defined():
shape = shape.as_list()
else:
shape = array_ops.shape(matrix)
return linear_operator(
shape=shape,
dtype=matrix.dtype,
apply=lambda v: math_ops.matmul(matrix, v, adjoint_a=False),
apply_adjoint=lambda v: math_ops.matmul(matrix, v, adjoint_a=True))
|
[
"def",
"create_operator",
"(",
"matrix",
")",
":",
"linear_operator",
"=",
"collections",
".",
"namedtuple",
"(",
"\"LinearOperator\"",
",",
"[",
"\"shape\"",
",",
"\"dtype\"",
",",
"\"apply\"",
",",
"\"apply_adjoint\"",
"]",
")",
"# TODO(rmlarsen): Handle SparseTensor.",
"shape",
"=",
"matrix",
".",
"get_shape",
"(",
")",
"if",
"shape",
".",
"is_fully_defined",
"(",
")",
":",
"shape",
"=",
"shape",
".",
"as_list",
"(",
")",
"else",
":",
"shape",
"=",
"array_ops",
".",
"shape",
"(",
"matrix",
")",
"return",
"linear_operator",
"(",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"matrix",
".",
"dtype",
",",
"apply",
"=",
"lambda",
"v",
":",
"math_ops",
".",
"matmul",
"(",
"matrix",
",",
"v",
",",
"adjoint_a",
"=",
"False",
")",
",",
"apply_adjoint",
"=",
"lambda",
"v",
":",
"math_ops",
".",
"matmul",
"(",
"matrix",
",",
"v",
",",
"adjoint_a",
"=",
"True",
")",
")"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/solvers/python/ops/util.py#L29-L45
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/subprocess.py
|
python
|
check_call
|
(*popenargs, **kwargs)
|
return 0
|
Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
|
Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
|
[
"Run",
"command",
"with",
"arguments",
".",
"Wait",
"for",
"command",
"to",
"complete",
".",
"If",
"the",
"exit",
"code",
"was",
"zero",
"then",
"return",
"otherwise",
"raise",
"CalledProcessError",
".",
"The",
"CalledProcessError",
"object",
"will",
"have",
"the",
"return",
"code",
"in",
"the",
"returncode",
"attribute",
"."
] |
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
|
[
"def",
"check_call",
"(",
"*",
"popenargs",
",",
"*",
"*",
"kwargs",
")",
":",
"retcode",
"=",
"call",
"(",
"*",
"popenargs",
",",
"*",
"*",
"kwargs",
")",
"if",
"retcode",
":",
"cmd",
"=",
"kwargs",
".",
"get",
"(",
"\"args\"",
")",
"if",
"cmd",
"is",
"None",
":",
"cmd",
"=",
"popenargs",
"[",
"0",
"]",
"raise",
"CalledProcessError",
"(",
"retcode",
",",
"cmd",
")",
"return",
"0"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/subprocess.py#L175-L191
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/scipy/signal/ltisys.py
|
python
|
TransferFunction._zinv_to_z
|
(num, den)
|
return num, den
|
Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
|
Change a transfer function from the variable `z` to `z**-1`.
|
[
"Change",
"a",
"transfer",
"function",
"from",
"the",
"variable",
"z",
"to",
"z",
"**",
"-",
"1",
"."
] |
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
|
[
"def",
"_zinv_to_z",
"(",
"num",
",",
"den",
")",
":",
"diff",
"=",
"len",
"(",
"num",
")",
"-",
"len",
"(",
"den",
")",
"if",
"diff",
">",
"0",
":",
"den",
"=",
"np",
".",
"hstack",
"(",
"(",
"den",
",",
"np",
".",
"zeros",
"(",
"diff",
")",
")",
")",
"elif",
"diff",
"<",
"0",
":",
"num",
"=",
"np",
".",
"hstack",
"(",
"(",
"num",
",",
"np",
".",
"zeros",
"(",
"-",
"diff",
")",
")",
")",
"return",
"num",
",",
"den"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/ltisys.py#L874-L896
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/html.py
|
python
|
HtmlRenderingStyle.GetSelectedTextBgColour
|
(*args, **kwargs)
|
return _html.HtmlRenderingStyle_GetSelectedTextBgColour(*args, **kwargs)
|
GetSelectedTextBgColour(self, Colour clr) -> Colour
|
GetSelectedTextBgColour(self, Colour clr) -> Colour
|
[
"GetSelectedTextBgColour",
"(",
"self",
"Colour",
"clr",
")",
"-",
">",
"Colour"
] |
def GetSelectedTextBgColour(*args, **kwargs):
"""GetSelectedTextBgColour(self, Colour clr) -> Colour"""
return _html.HtmlRenderingStyle_GetSelectedTextBgColour(*args, **kwargs)
|
[
"def",
"GetSelectedTextBgColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html",
".",
"HtmlRenderingStyle_GetSelectedTextBgColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/html.py#L547-L549
|
|
pytorch/pytorch
|
7176c92687d3cc847cc046bf002269c6949a21c2
|
torch/distributed/optim/zero_redundancy_optimizer.py
|
python
|
ZeroRedundancyOptimizer._build_param_buckets
|
(self)
|
r"""
Builds parameter buckets if ``parameters_as_bucket_view=True`` so
that for each device that stores this rank's parameters, there is a
bucket (represented as a tensor) containing all of the parameters on
that device that are assigned to a given rank in the parameter update
partition.
This method is called in the constructor and any time parameter
trainability is changed.
.. warning::
The current implementation assumes that all of the parameters in a
bucket are of the same dense type when allocating the bucket's
tensor.
.. warning::
If the model parameters are stored across more than one device,
then the storage partitioning must be the same across all
processes in order for parameter synchronization to work.
|
r"""
Builds parameter buckets if ``parameters_as_bucket_view=True`` so
that for each device that stores this rank's parameters, there is a
bucket (represented as a tensor) containing all of the parameters on
that device that are assigned to a given rank in the parameter update
partition.
|
[
"r",
"Builds",
"parameter",
"buckets",
"if",
"parameters_as_bucket_view",
"=",
"True",
"so",
"that",
"for",
"each",
"device",
"that",
"stores",
"this",
"rank",
"s",
"parameters",
"there",
"is",
"a",
"bucket",
"(",
"represented",
"as",
"a",
"tensor",
")",
"containing",
"all",
"of",
"the",
"parameters",
"on",
"that",
"device",
"that",
"are",
"assigned",
"to",
"a",
"given",
"rank",
"in",
"the",
"parameter",
"update",
"partition",
"."
] |
def _build_param_buckets(self) -> None:
r"""
Builds parameter buckets if ``parameters_as_bucket_view=True`` so
that for each device that stores this rank's parameters, there is a
bucket (represented as a tensor) containing all of the parameters on
that device that are assigned to a given rank in the parameter update
partition.
This method is called in the constructor and any time parameter
trainability is changed.
.. warning::
The current implementation assumes that all of the parameters in a
bucket are of the same dense type when allocating the bucket's
tensor.
.. warning::
If the model parameters are stored across more than one device,
then the storage partitioning must be the same across all
processes in order for parameter synchronization to work.
"""
if not self.parameters_as_bucket_view or self._overlap_with_ddp:
return
# `self._buckets[i][j]` are the parameters stored on device i and
# assigned to rank j
num_devices = len(self._device_to_params_per_rank)
self._buckets = [[] for _ in range(num_devices)] # type: ignore[assignment]
for dev_i, (device, params_per_rank) in enumerate(self._device_to_params_per_rank.items()):
for params in params_per_rank:
bucket_size = 0
dtype = None
trainable_params = []
for param in params:
if not _is_trainable(param):
# Clone in case the parameter was previously part of
# a bucket to avoid the data from being destroyed
param.data = param.data.detach().clone()
else:
bucket_size += param.numel()
trainable_params.append(param)
dtype = param.dtype # assumes all same dtype
if bucket_size == 0:
# Create a dummy bucket if there are no parameters
bucket = torch.zeros(1, device=device)
else:
# Construct the bucket (assuming all dense and same dtype)
bucket = torch.empty(bucket_size, dtype=dtype, device=device)
offset = 0
for param in trainable_params:
offset_next = offset + param.numel()
bucket[offset:offset_next].copy_(param.data.flatten())
param.data = bucket[offset:offset_next].view_as(param.data)
offset = offset_next
self._buckets[dev_i].append(bucket)
|
[
"def",
"_build_param_buckets",
"(",
"self",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"parameters_as_bucket_view",
"or",
"self",
".",
"_overlap_with_ddp",
":",
"return",
"# `self._buckets[i][j]` are the parameters stored on device i and",
"# assigned to rank j",
"num_devices",
"=",
"len",
"(",
"self",
".",
"_device_to_params_per_rank",
")",
"self",
".",
"_buckets",
"=",
"[",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"num_devices",
")",
"]",
"# type: ignore[assignment]",
"for",
"dev_i",
",",
"(",
"device",
",",
"params_per_rank",
")",
"in",
"enumerate",
"(",
"self",
".",
"_device_to_params_per_rank",
".",
"items",
"(",
")",
")",
":",
"for",
"params",
"in",
"params_per_rank",
":",
"bucket_size",
"=",
"0",
"dtype",
"=",
"None",
"trainable_params",
"=",
"[",
"]",
"for",
"param",
"in",
"params",
":",
"if",
"not",
"_is_trainable",
"(",
"param",
")",
":",
"# Clone in case the parameter was previously part of",
"# a bucket to avoid the data from being destroyed",
"param",
".",
"data",
"=",
"param",
".",
"data",
".",
"detach",
"(",
")",
".",
"clone",
"(",
")",
"else",
":",
"bucket_size",
"+=",
"param",
".",
"numel",
"(",
")",
"trainable_params",
".",
"append",
"(",
"param",
")",
"dtype",
"=",
"param",
".",
"dtype",
"# assumes all same dtype",
"if",
"bucket_size",
"==",
"0",
":",
"# Create a dummy bucket if there are no parameters",
"bucket",
"=",
"torch",
".",
"zeros",
"(",
"1",
",",
"device",
"=",
"device",
")",
"else",
":",
"# Construct the bucket (assuming all dense and same dtype)",
"bucket",
"=",
"torch",
".",
"empty",
"(",
"bucket_size",
",",
"dtype",
"=",
"dtype",
",",
"device",
"=",
"device",
")",
"offset",
"=",
"0",
"for",
"param",
"in",
"trainable_params",
":",
"offset_next",
"=",
"offset",
"+",
"param",
".",
"numel",
"(",
")",
"bucket",
"[",
"offset",
":",
"offset_next",
"]",
".",
"copy_",
"(",
"param",
".",
"data",
".",
"flatten",
"(",
")",
")",
"param",
".",
"data",
"=",
"bucket",
"[",
"offset",
":",
"offset_next",
"]",
".",
"view_as",
"(",
"param",
".",
"data",
")",
"offset",
"=",
"offset_next",
"self",
".",
"_buckets",
"[",
"dev_i",
"]",
".",
"append",
"(",
"bucket",
")"
] |
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/optim/zero_redundancy_optimizer.py#L1196-L1252
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tarfile.py
|
python
|
TarFile.chown
|
(self, tarinfo, targetpath, numeric_owner)
|
Set owner of targetpath according to tarinfo. If numeric_owner
is True, use .gid/.uid instead of .gname/.uname. If numeric_owner
is False, fall back to .gid/.uid when the search based on name
fails.
|
Set owner of targetpath according to tarinfo. If numeric_owner
is True, use .gid/.uid instead of .gname/.uname. If numeric_owner
is False, fall back to .gid/.uid when the search based on name
fails.
|
[
"Set",
"owner",
"of",
"targetpath",
"according",
"to",
"tarinfo",
".",
"If",
"numeric_owner",
"is",
"True",
"use",
".",
"gid",
"/",
".",
"uid",
"instead",
"of",
".",
"gname",
"/",
".",
"uname",
".",
"If",
"numeric_owner",
"is",
"False",
"fall",
"back",
"to",
".",
"gid",
"/",
".",
"uid",
"when",
"the",
"search",
"based",
"on",
"name",
"fails",
"."
] |
def chown(self, tarinfo, targetpath, numeric_owner):
"""Set owner of targetpath according to tarinfo. If numeric_owner
is True, use .gid/.uid instead of .gname/.uname. If numeric_owner
is False, fall back to .gid/.uid when the search based on name
fails.
"""
if hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
g = tarinfo.gid
u = tarinfo.uid
if not numeric_owner:
try:
if grp:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
pass
try:
if pwd:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
pass
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
os.chown(targetpath, u, g)
except OSError:
raise ExtractError("could not change owner")
|
[
"def",
"chown",
"(",
"self",
",",
"tarinfo",
",",
"targetpath",
",",
"numeric_owner",
")",
":",
"if",
"hasattr",
"(",
"os",
",",
"\"geteuid\"",
")",
"and",
"os",
".",
"geteuid",
"(",
")",
"==",
"0",
":",
"# We have to be root to do so.",
"g",
"=",
"tarinfo",
".",
"gid",
"u",
"=",
"tarinfo",
".",
"uid",
"if",
"not",
"numeric_owner",
":",
"try",
":",
"if",
"grp",
":",
"g",
"=",
"grp",
".",
"getgrnam",
"(",
"tarinfo",
".",
"gname",
")",
"[",
"2",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"if",
"pwd",
":",
"u",
"=",
"pwd",
".",
"getpwnam",
"(",
"tarinfo",
".",
"uname",
")",
"[",
"2",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"if",
"tarinfo",
".",
"issym",
"(",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"lchown\"",
")",
":",
"os",
".",
"lchown",
"(",
"targetpath",
",",
"u",
",",
"g",
")",
"else",
":",
"os",
".",
"chown",
"(",
"targetpath",
",",
"u",
",",
"g",
")",
"except",
"OSError",
":",
"raise",
"ExtractError",
"(",
"\"could not change owner\"",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tarfile.py#L2219-L2246
|
||
eclipse/sumo
|
7132a9b8b6eea734bdec38479026b4d8c4336d03
|
tools/traci/_vehicletype.py
|
python
|
VehicleTypeDomain.getShapeClass
|
(self, typeID)
|
return self._getUniversal(tc.VAR_SHAPECLASS, typeID)
|
getShapeClass(string) -> string
Returns the shape class of vehicles of this type.
|
getShapeClass(string) -> string
|
[
"getShapeClass",
"(",
"string",
")",
"-",
">",
"string"
] |
def getShapeClass(self, typeID):
"""getShapeClass(string) -> string
Returns the shape class of vehicles of this type.
"""
return self._getUniversal(tc.VAR_SHAPECLASS, typeID)
|
[
"def",
"getShapeClass",
"(",
"self",
",",
"typeID",
")",
":",
"return",
"self",
".",
"_getUniversal",
"(",
"tc",
".",
"VAR_SHAPECLASS",
",",
"typeID",
")"
] |
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/traci/_vehicletype.py#L123-L128
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scikit-learn/py3/sklearn/preprocessing/_data.py
|
python
|
_handle_zeros_in_scale
|
(scale, copy=True)
|
Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.
|
Makes sure that whenever scale is zero, we handle it correctly.
|
[
"Makes",
"sure",
"that",
"whenever",
"scale",
"is",
"zero",
"we",
"handle",
"it",
"correctly",
"."
] |
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
|
[
"def",
"_handle_zeros_in_scale",
"(",
"scale",
",",
"copy",
"=",
"True",
")",
":",
"# if we are fitting on 1D arrays, scale might be a scalar",
"if",
"np",
".",
"isscalar",
"(",
"scale",
")",
":",
"if",
"scale",
"==",
".0",
":",
"scale",
"=",
"1.",
"return",
"scale",
"elif",
"isinstance",
"(",
"scale",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"copy",
":",
"# New array to avoid side-effects",
"scale",
"=",
"scale",
".",
"copy",
"(",
")",
"scale",
"[",
"scale",
"==",
"0.0",
"]",
"=",
"1.0",
"return",
"scale"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/preprocessing/_data.py#L63-L78
|
||
apple/turicreate
|
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
|
src/external/boost/boost_1_68_0/tools/build/src/build/type.py
|
python
|
register_suffixes
|
(suffixes, type)
|
Specifies that targets with suffix from 'suffixes' have the type 'type'.
If a different type is already specified for any of syffixes, issues an error.
|
Specifies that targets with suffix from 'suffixes' have the type 'type'.
If a different type is already specified for any of syffixes, issues an error.
|
[
"Specifies",
"that",
"targets",
"with",
"suffix",
"from",
"suffixes",
"have",
"the",
"type",
"type",
".",
"If",
"a",
"different",
"type",
"is",
"already",
"specified",
"for",
"any",
"of",
"syffixes",
"issues",
"an",
"error",
"."
] |
def register_suffixes (suffixes, type):
""" Specifies that targets with suffix from 'suffixes' have the type 'type'.
If a different type is already specified for any of syffixes, issues an error.
"""
assert is_iterable_typed(suffixes, basestring)
assert isinstance(type, basestring)
for s in suffixes:
if s in __suffixes_to_types:
old_type = __suffixes_to_types [s]
if old_type != type:
raise BaseException ('Attempting to specify type for suffix "%s"\nOld type: "%s", New type "%s"' % (s, old_type, type))
else:
__suffixes_to_types [s] = type
|
[
"def",
"register_suffixes",
"(",
"suffixes",
",",
"type",
")",
":",
"assert",
"is_iterable_typed",
"(",
"suffixes",
",",
"basestring",
")",
"assert",
"isinstance",
"(",
"type",
",",
"basestring",
")",
"for",
"s",
"in",
"suffixes",
":",
"if",
"s",
"in",
"__suffixes_to_types",
":",
"old_type",
"=",
"__suffixes_to_types",
"[",
"s",
"]",
"if",
"old_type",
"!=",
"type",
":",
"raise",
"BaseException",
"(",
"'Attempting to specify type for suffix \"%s\"\\nOld type: \"%s\", New type \"%s\"'",
"%",
"(",
"s",
",",
"old_type",
",",
"type",
")",
")",
"else",
":",
"__suffixes_to_types",
"[",
"s",
"]",
"=",
"type"
] |
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/boost/boost_1_68_0/tools/build/src/build/type.py#L123-L135
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python3/src/Lib/turtle.py
|
python
|
TNavigator.circle
|
(self, radius, extent = None, steps = None)
|
Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
|
Draw a circle with given radius.
|
[
"Draw",
"a",
"circle",
"with",
"given",
"radius",
"."
] |
def circle(self, radius, extent = None, steps = None):
""" Draw a circle with given radius.
Arguments:
radius -- a number
extent (optional) -- a number
steps (optional) -- an integer
Draw a circle with given radius. The center is radius units left
of the turtle; extent - an angle - determines which part of the
circle is drawn. If extent is not given, draw the entire circle.
If extent is not a full circle, one endpoint of the arc is the
current pen position. Draw the arc in counterclockwise direction
if radius is positive, otherwise in clockwise direction. Finally
the direction of the turtle is changed by the amount of extent.
As the circle is approximated by an inscribed regular polygon,
steps determines the number of steps to use. If not given,
it will be calculated automatically. Maybe used to draw regular
polygons.
call: circle(radius) # full circle
--or: circle(radius, extent) # arc
--or: circle(radius, extent, steps)
--or: circle(radius, steps=6) # 6-sided polygon
Example (for a Turtle instance named turtle):
>>> turtle.circle(50)
>>> turtle.circle(120, 180) # semicircle
"""
if self.undobuffer:
self.undobuffer.push(["seq"])
self.undobuffer.cumulate = True
speed = self.speed()
if extent is None:
extent = self._fullcircle
if steps is None:
frac = abs(extent)/self._fullcircle
steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
w = 1.0 * extent / steps
w2 = 0.5 * w
l = 2.0 * radius * math.sin(w2*math.pi/180.0*self._degreesPerAU)
if radius < 0:
l, w, w2 = -l, -w, -w2
tr = self._tracer()
dl = self._delay()
if speed == 0:
self._tracer(0, 0)
else:
self.speed(0)
self._rotate(w2)
for i in range(steps):
self.speed(speed)
self._go(l)
self.speed(0)
self._rotate(w)
self._rotate(-w2)
if speed == 0:
self._tracer(tr, dl)
self.speed(speed)
if self.undobuffer:
self.undobuffer.cumulate = False
|
[
"def",
"circle",
"(",
"self",
",",
"radius",
",",
"extent",
"=",
"None",
",",
"steps",
"=",
"None",
")",
":",
"if",
"self",
".",
"undobuffer",
":",
"self",
".",
"undobuffer",
".",
"push",
"(",
"[",
"\"seq\"",
"]",
")",
"self",
".",
"undobuffer",
".",
"cumulate",
"=",
"True",
"speed",
"=",
"self",
".",
"speed",
"(",
")",
"if",
"extent",
"is",
"None",
":",
"extent",
"=",
"self",
".",
"_fullcircle",
"if",
"steps",
"is",
"None",
":",
"frac",
"=",
"abs",
"(",
"extent",
")",
"/",
"self",
".",
"_fullcircle",
"steps",
"=",
"1",
"+",
"int",
"(",
"min",
"(",
"11",
"+",
"abs",
"(",
"radius",
")",
"/",
"6.0",
",",
"59.0",
")",
"*",
"frac",
")",
"w",
"=",
"1.0",
"*",
"extent",
"/",
"steps",
"w2",
"=",
"0.5",
"*",
"w",
"l",
"=",
"2.0",
"*",
"radius",
"*",
"math",
".",
"sin",
"(",
"w2",
"*",
"math",
".",
"pi",
"/",
"180.0",
"*",
"self",
".",
"_degreesPerAU",
")",
"if",
"radius",
"<",
"0",
":",
"l",
",",
"w",
",",
"w2",
"=",
"-",
"l",
",",
"-",
"w",
",",
"-",
"w2",
"tr",
"=",
"self",
".",
"_tracer",
"(",
")",
"dl",
"=",
"self",
".",
"_delay",
"(",
")",
"if",
"speed",
"==",
"0",
":",
"self",
".",
"_tracer",
"(",
"0",
",",
"0",
")",
"else",
":",
"self",
".",
"speed",
"(",
"0",
")",
"self",
".",
"_rotate",
"(",
"w2",
")",
"for",
"i",
"in",
"range",
"(",
"steps",
")",
":",
"self",
".",
"speed",
"(",
"speed",
")",
"self",
".",
"_go",
"(",
"l",
")",
"self",
".",
"speed",
"(",
"0",
")",
"self",
".",
"_rotate",
"(",
"w",
")",
"self",
".",
"_rotate",
"(",
"-",
"w2",
")",
"if",
"speed",
"==",
"0",
":",
"self",
".",
"_tracer",
"(",
"tr",
",",
"dl",
")",
"self",
".",
"speed",
"(",
"speed",
")",
"if",
"self",
".",
"undobuffer",
":",
"self",
".",
"undobuffer",
".",
"cumulate",
"=",
"False"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/turtle.py#L1939-L2000
|
||
mapequation/infomap
|
5f56b94fe0f956483f61a03ef07e94d8def2205e
|
interfaces/python/infomap.py
|
python
|
Infomap.add_state_nodes
|
(self, state_nodes)
|
Add state nodes.
See Also
--------
add_state_node
Examples
--------
With tuples
>>> from infomap import Infomap
>>> im = Infomap()
>>> states = (
... (1, 1),
... (2, 1),
... (3, 2)
... )
>>> im.add_state_nodes(states)
With dict
>>> from infomap import Infomap
>>> im = Infomap()
>>> states = {
... 1: 1,
... 2: 1,
... 3: 2
... }
>>> im.add_state_nodes(states)
Parameters
----------
state_nodes : iterable of tuples or dict of int: int
Iterable of tuples of the form ``(state_id, node_id)``
or dict of the form ``{state_id: node_id}``.
|
Add state nodes.
|
[
"Add",
"state",
"nodes",
"."
] |
def add_state_nodes(self, state_nodes):
"""Add state nodes.
See Also
--------
add_state_node
Examples
--------
With tuples
>>> from infomap import Infomap
>>> im = Infomap()
>>> states = (
... (1, 1),
... (2, 1),
... (3, 2)
... )
>>> im.add_state_nodes(states)
With dict
>>> from infomap import Infomap
>>> im = Infomap()
>>> states = {
... 1: 1,
... 2: 1,
... 3: 2
... }
>>> im.add_state_nodes(states)
Parameters
----------
state_nodes : iterable of tuples or dict of int: int
Iterable of tuples of the form ``(state_id, node_id)``
or dict of the form ``{state_id: node_id}``.
"""
try:
for node in state_nodes.items():
self.add_state_node(*node)
except AttributeError:
for node in state_nodes:
self.add_state_node(*node)
|
[
"def",
"add_state_nodes",
"(",
"self",
",",
"state_nodes",
")",
":",
"try",
":",
"for",
"node",
"in",
"state_nodes",
".",
"items",
"(",
")",
":",
"self",
".",
"add_state_node",
"(",
"*",
"node",
")",
"except",
"AttributeError",
":",
"for",
"node",
"in",
"state_nodes",
":",
"self",
".",
"add_state_node",
"(",
"*",
"node",
")"
] |
https://github.com/mapequation/infomap/blob/5f56b94fe0f956483f61a03ef07e94d8def2205e/interfaces/python/infomap.py#L572-L616
|
||
junhyukoh/caffe-lstm
|
598d45456fa2a1b127a644f4aa38daa8fb9fc722
|
scripts/cpp_lint.py
|
python
|
FindStartOfExpressionInLine
|
(line, endpos, depth, startchar, endchar)
|
return (-1, depth)
|
Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
|
Find position at the matching startchar.
|
[
"Find",
"position",
"at",
"the",
"matching",
"startchar",
"."
] |
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in xrange(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
|
[
"def",
"FindStartOfExpressionInLine",
"(",
"line",
",",
"endpos",
",",
"depth",
",",
"startchar",
",",
"endchar",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"endpos",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"line",
"[",
"i",
"]",
"==",
"endchar",
":",
"depth",
"+=",
"1",
"elif",
"line",
"[",
"i",
"]",
"==",
"startchar",
":",
"depth",
"-=",
"1",
"if",
"depth",
"==",
"0",
":",
"return",
"(",
"i",
",",
"0",
")",
"return",
"(",
"-",
"1",
",",
"depth",
")"
] |
https://github.com/junhyukoh/caffe-lstm/blob/598d45456fa2a1b127a644f4aa38daa8fb9fc722/scripts/cpp_lint.py#L1300-L1324
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/groupby/groupby.py
|
python
|
GroupBy.var
|
(self, ddof: int = 1, *args, **kwargs)
|
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
|
Compute variance of groups, excluding missing values.
|
[
"Compute",
"variance",
"of",
"groups",
"excluding",
"missing",
"values",
"."
] |
def var(self, ddof: int = 1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : int, default 1
Degrees of freedom.
Returns
-------
Series or DataFrame
Variance of values within each group.
"""
nv.validate_groupby_func("var", args, kwargs)
if ddof == 1:
return self._cython_agg_general(
"var", alt=lambda x, axis: Series(x).var(ddof=ddof, **kwargs), **kwargs
)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
|
[
"def",
"var",
"(",
"self",
",",
"ddof",
":",
"int",
"=",
"1",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_groupby_func",
"(",
"\"var\"",
",",
"args",
",",
"kwargs",
")",
"if",
"ddof",
"==",
"1",
":",
"return",
"self",
".",
"_cython_agg_general",
"(",
"\"var\"",
",",
"alt",
"=",
"lambda",
"x",
",",
"axis",
":",
"Series",
"(",
"x",
")",
".",
"var",
"(",
"ddof",
"=",
"ddof",
",",
"*",
"*",
"kwargs",
")",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"var",
"(",
"ddof",
"=",
"ddof",
",",
"*",
"*",
"kwargs",
")",
"with",
"_group_selection_context",
"(",
"self",
")",
":",
"return",
"self",
".",
"_python_agg_general",
"(",
"f",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/groupby/groupby.py#L1272-L1296
|
||
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/gsutil/third_party/boto/boto/gs/resumable_upload_handler.py
|
python
|
ResumableUploadHandler._save_tracker_uri_to_file
|
(self)
|
Saves URI to tracker file if one was passed to constructor.
|
Saves URI to tracker file if one was passed to constructor.
|
[
"Saves",
"URI",
"to",
"tracker",
"file",
"if",
"one",
"was",
"passed",
"to",
"constructor",
"."
] |
def _save_tracker_uri_to_file(self):
"""
Saves URI to tracker file if one was passed to constructor.
"""
if not self.tracker_file_name:
return
f = None
try:
with os.fdopen(os.open(self.tracker_file_name,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.write(self.tracker_uri)
except IOError as e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
|
[
"def",
"_save_tracker_uri_to_file",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"tracker_file_name",
":",
"return",
"f",
"=",
"None",
"try",
":",
"with",
"os",
".",
"fdopen",
"(",
"os",
".",
"open",
"(",
"self",
".",
"tracker_file_name",
",",
"os",
".",
"O_WRONLY",
"|",
"os",
".",
"O_CREAT",
",",
"0o600",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"tracker_uri",
")",
"except",
"IOError",
"as",
"e",
":",
"raise",
"ResumableUploadException",
"(",
"'Couldn\\'t write URI tracker file (%s): %s.\\nThis can happen'",
"'if you\\'re using an incorrectly configured upload tool\\n'",
"'(e.g., gsutil configured to save tracker files to an '",
"'unwritable directory)'",
"%",
"(",
"self",
".",
"tracker_file_name",
",",
"e",
".",
"strerror",
")",
",",
"ResumableTransferDisposition",
".",
"ABORT",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/gs/resumable_upload_handler.py#L115-L133
|
||
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py
|
python
|
ParseResults.dump
|
(self, indent='', depth=0, full=True)
|
return "".join(out)
|
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
|
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
|
[
"Diagnostic",
"method",
"for",
"listing",
"out",
"the",
"contents",
"of",
"a",
"C",
"{",
"ParseResults",
"}",
".",
"Accepts",
"an",
"optional",
"C",
"{",
"indent",
"}",
"argument",
"so",
"that",
"this",
"string",
"can",
"be",
"embedded",
"in",
"a",
"nested",
"display",
"of",
"other",
"data",
"."
] |
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
|
[
"def",
"dump",
"(",
"self",
",",
"indent",
"=",
"''",
",",
"depth",
"=",
"0",
",",
"full",
"=",
"True",
")",
":",
"out",
"=",
"[",
"]",
"NL",
"=",
"'\\n'",
"out",
".",
"append",
"(",
"indent",
"+",
"_ustr",
"(",
"self",
".",
"asList",
"(",
")",
")",
")",
"if",
"full",
":",
"if",
"self",
".",
"haskeys",
"(",
")",
":",
"items",
"=",
"sorted",
"(",
"(",
"str",
"(",
"k",
")",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
":",
"if",
"out",
":",
"out",
".",
"append",
"(",
"NL",
")",
"out",
".",
"append",
"(",
"\"%s%s- %s: \"",
"%",
"(",
"indent",
",",
"(",
"' '",
"*",
"depth",
")",
",",
"k",
")",
")",
"if",
"isinstance",
"(",
"v",
",",
"ParseResults",
")",
":",
"if",
"v",
":",
"out",
".",
"append",
"(",
"v",
".",
"dump",
"(",
"indent",
",",
"depth",
"+",
"1",
")",
")",
"else",
":",
"out",
".",
"append",
"(",
"_ustr",
"(",
"v",
")",
")",
"else",
":",
"out",
".",
"append",
"(",
"repr",
"(",
"v",
")",
")",
"elif",
"any",
"(",
"isinstance",
"(",
"vv",
",",
"ParseResults",
")",
"for",
"vv",
"in",
"self",
")",
":",
"v",
"=",
"self",
"for",
"i",
",",
"vv",
"in",
"enumerate",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"vv",
",",
"ParseResults",
")",
":",
"out",
".",
"append",
"(",
"\"\\n%s%s[%d]:\\n%s%s%s\"",
"%",
"(",
"indent",
",",
"(",
"' '",
"*",
"(",
"depth",
")",
")",
",",
"i",
",",
"indent",
",",
"(",
"' '",
"*",
"(",
"depth",
"+",
"1",
")",
")",
",",
"vv",
".",
"dump",
"(",
"indent",
",",
"depth",
"+",
"1",
")",
")",
")",
"else",
":",
"out",
".",
"append",
"(",
"\"\\n%s%s[%d]:\\n%s%s%s\"",
"%",
"(",
"indent",
",",
"(",
"' '",
"*",
"(",
"depth",
")",
")",
",",
"i",
",",
"indent",
",",
"(",
"' '",
"*",
"(",
"depth",
"+",
"1",
")",
")",
",",
"_ustr",
"(",
"vv",
")",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"out",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py#L848-L891
|
|
apache/incubator-mxnet
|
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
|
python/mxnet/numpy/random.py
|
python
|
chisquare
|
(df, size=None, dtype=None, device=None)
|
return _mx_nd_np.random.chisquare(df, size=size, dtype=dtype, device=device)
|
r"""Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : float or ndarray of floats
Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``df`` is a scalar. Otherwise,
``np.array(df).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
device : Device, optional
Device context of output. Default is current device.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized `chi-square distribution` [1]_.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size`
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \sum_{i=0}^{\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \sim \chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \frac{(1/2)^{k/2}}{\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\Gamma` is the gamma function,
.. math:: \Gamma(x) = \int_0^{-\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
|
r"""Draw samples from a chi-square distribution.
|
[
"r",
"Draw",
"samples",
"from",
"a",
"chi",
"-",
"square",
"distribution",
"."
] |
def chisquare(df, size=None, dtype=None, device=None):
r"""Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : float or ndarray of floats
Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``df`` is a scalar. Otherwise,
``np.array(df).size`` samples are drawn.
dtype : {'float16', 'float32', 'float64'}, optional
Data type of output samples. Default is 'float32'.
device : Device, optional
Device context of output. Default is current device.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized `chi-square distribution` [1]_.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size`
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \sum_{i=0}^{\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \sim \chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \frac{(1/2)^{k/2}}{\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\Gamma` is the gamma function,
.. math:: \Gamma(x) = \int_0^{-\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
"""
return _mx_nd_np.random.chisquare(df, size=size, dtype=dtype, device=device)
|
[
"def",
"chisquare",
"(",
"df",
",",
"size",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"device",
"=",
"None",
")",
":",
"return",
"_mx_nd_np",
".",
"random",
".",
"chisquare",
"(",
"df",
",",
"size",
"=",
"size",
",",
"dtype",
"=",
"dtype",
",",
"device",
"=",
"device",
")"
] |
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/numpy/random.py#L950-L1013
|
|
cvxpy/cvxpy
|
5165b4fb750dfd237de8659383ef24b4b2e33aaf
|
cvxpy/atoms/gen_lambda_max.py
|
python
|
gen_lambda_max._grad
|
(self, values)
|
Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
|
Gives the (sub/super)gradient of the atom w.r.t. each argument.
|
[
"Gives",
"the",
"(",
"sub",
"/",
"super",
")",
"gradient",
"of",
"the",
"atom",
"w",
".",
"r",
".",
"t",
".",
"each",
"argument",
"."
] |
def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
raise NotImplementedError()
|
[
"def",
"_grad",
"(",
"self",
",",
"values",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/gen_lambda_max.py#L48-L59
|
||
tensorflow/io
|
92b44e180674a8af0e12e405530f7343e3e693e4
|
tensorflow_io/python/ops/audio_ops.py
|
python
|
AudioIOTensor.__getitem__
|
(self, key)
|
return item.__getitem__(indices)
|
Returns the specified piece of this IOTensor.
|
Returns the specified piece of this IOTensor.
|
[
"Returns",
"the",
"specified",
"piece",
"of",
"this",
"IOTensor",
"."
] |
def __getitem__(self, key):
"""Returns the specified piece of this IOTensor."""
# always convert to tuple to process
if not isinstance(key, tuple):
key = tuple([key])
# get the start and stop of each element
indices = [
(k.start, k.stop) if isinstance(k, slice) else (k, k + 1) for k in key
]
# get the start and stop, and use 0 (start) and -1 (stop) if needed
indices = list(zip(*indices))
start = [0 if e is None else e for e in indices[0]]
stop = [-1 if e is None else e for e in indices[1]]
item = core_ops.io_audio_readable_read(
self._resource, start=start, stop=stop, dtype=self._dtype
)
# in case certain dimension is not slice, then this dimension will need to
# collapse as `0`, otherwise `:` or `slice(None, None, None)`
indices = [slice(None) if isinstance(k, slice) else 0 for k in key]
return item.__getitem__(indices)
|
[
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"# always convert to tuple to process",
"if",
"not",
"isinstance",
"(",
"key",
",",
"tuple",
")",
":",
"key",
"=",
"tuple",
"(",
"[",
"key",
"]",
")",
"# get the start and stop of each element",
"indices",
"=",
"[",
"(",
"k",
".",
"start",
",",
"k",
".",
"stop",
")",
"if",
"isinstance",
"(",
"k",
",",
"slice",
")",
"else",
"(",
"k",
",",
"k",
"+",
"1",
")",
"for",
"k",
"in",
"key",
"]",
"# get the start and stop, and use 0 (start) and -1 (stop) if needed",
"indices",
"=",
"list",
"(",
"zip",
"(",
"*",
"indices",
")",
")",
"start",
"=",
"[",
"0",
"if",
"e",
"is",
"None",
"else",
"e",
"for",
"e",
"in",
"indices",
"[",
"0",
"]",
"]",
"stop",
"=",
"[",
"-",
"1",
"if",
"e",
"is",
"None",
"else",
"e",
"for",
"e",
"in",
"indices",
"[",
"1",
"]",
"]",
"item",
"=",
"core_ops",
".",
"io_audio_readable_read",
"(",
"self",
".",
"_resource",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"dtype",
"=",
"self",
".",
"_dtype",
")",
"# in case certain dimension is not slice, then this dimension will need to",
"# collapse as `0`, otherwise `:` or `slice(None, None, None)`",
"indices",
"=",
"[",
"slice",
"(",
"None",
")",
"if",
"isinstance",
"(",
"k",
",",
"slice",
")",
"else",
"0",
"for",
"k",
"in",
"key",
"]",
"return",
"item",
".",
"__getitem__",
"(",
"indices",
")"
] |
https://github.com/tensorflow/io/blob/92b44e180674a8af0e12e405530f7343e3e693e4/tensorflow_io/python/ops/audio_ops.py#L721-L743
|
|
google/syzygy
|
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
|
third_party/numpy/files/numpy/polynomial/hermite_e.py
|
python
|
hermefromroots
|
(roots)
|
Generate a Hermite series with the given roots.
Return the array of coefficients for the P-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the Hermite series coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots, chebfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Hermite
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the Hermite basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
|
Generate a Hermite series with the given roots.
|
[
"Generate",
"a",
"Hermite",
"series",
"with",
"the",
"given",
"roots",
"."
] |
def hermefromroots(roots) :
"""
Generate a Hermite series with the given roots.
Return the array of coefficients for the P-series whose roots (a.k.a.
"zeros") are given by *roots*. The returned array of coefficients is
ordered from lowest order "term" to highest, and zeros of multiplicity
greater than one must be included in *roots* a number of times equal
to their multiplicity (e.g., if `2` is a root of multiplicity three,
then [2,2,2] must be in *roots*).
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-d array of the Hermite series coefficients, ordered from low to
high. If all roots are real, ``out.dtype`` is a float type;
otherwise, ``out.dtype`` is a complex type, even if all the
coefficients in the result are real (see Examples below).
See Also
--------
polyfromroots, chebfromroots
Notes
-----
What is returned are the :math:`c_i` such that:
.. math::
\\sum_{i=0}^{n} c_i*P_i(x) = \\prod_{i=0}^{n} (x - roots[i])
where ``n == len(roots)`` and :math:`P_i(x)` is the `i`-th Hermite
(basis) polynomial over the domain `[-1,1]`. Note that, unlike
`polyfromroots`, due to the nature of the Hermite basis set, the
above identity *does not* imply :math:`c_n = 1` identically (see
Examples).
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
prd = np.array([1], dtype=roots.dtype)
for r in roots:
prd = hermesub(hermemulx(prd), r*prd)
return prd
|
[
"def",
"hermefromroots",
"(",
"roots",
")",
":",
"if",
"len",
"(",
"roots",
")",
"==",
"0",
":",
"return",
"np",
".",
"ones",
"(",
"1",
")",
"else",
":",
"[",
"roots",
"]",
"=",
"pu",
".",
"as_series",
"(",
"[",
"roots",
"]",
",",
"trim",
"=",
"False",
")",
"prd",
"=",
"np",
".",
"array",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"roots",
".",
"dtype",
")",
"for",
"r",
"in",
"roots",
":",
"prd",
"=",
"hermesub",
"(",
"hermemulx",
"(",
"prd",
")",
",",
"r",
"*",
"prd",
")",
"return",
"prd"
] |
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/polynomial/hermite_e.py#L223-L283
|
||
KratosMultiphysics/Kratos
|
0000833054ed0503424eb28205d6508d9ca6cbbc
|
kratos/mpi/python_scripts/distributed_gid_output_process.py
|
python
|
DistributedGiDOutputProcess._SetCurrentTimeParameters
|
(self, additional_list_files)
|
doing nothing here in MPI
|
doing nothing here in MPI
|
[
"doing",
"nothing",
"here",
"in",
"MPI"
] |
def _SetCurrentTimeParameters(self, additional_list_files):
''' doing nothing here in MPI'''
pass
|
[
"def",
"_SetCurrentTimeParameters",
"(",
"self",
",",
"additional_list_files",
")",
":",
"pass"
] |
https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/kratos/mpi/python_scripts/distributed_gid_output_process.py#L74-L76
|
||
tensorflow/tensorflow
|
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
|
tensorflow/python/distribute/multi_worker_util.py
|
python
|
id_in_cluster
|
(cluster_spec, task_type, task_id)
|
Returns a unique id for the task in the `task_type`'s cluster.
It returns an id ranging from [0, `worker_count(task_type, task_id)`).
Note: this function assumes that "evaluate" job is in its own cluster or its
own partition of a cluster.
Args:
cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated.
task_type: string indicating the type of the task.
task_id: the id of the `task_type` in this cluster.
Returns:
an int indicating the unique id.
Throws:
ValueError: if `task_type` is not "chief", "worker" or "evaluator".
|
Returns a unique id for the task in the `task_type`'s cluster.
|
[
"Returns",
"a",
"unique",
"id",
"for",
"the",
"task",
"in",
"the",
"task_type",
"s",
"cluster",
"."
] |
def id_in_cluster(cluster_spec, task_type, task_id):
"""Returns a unique id for the task in the `task_type`'s cluster.
It returns an id ranging from [0, `worker_count(task_type, task_id)`).
Note: this function assumes that "evaluate" job is in its own cluster or its
own partition of a cluster.
Args:
cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated.
task_type: string indicating the type of the task.
task_id: the id of the `task_type` in this cluster.
Returns:
an int indicating the unique id.
Throws:
ValueError: if `task_type` is not "chief", "worker" or "evaluator".
"""
_validate_cluster_spec(cluster_spec, task_type, task_id)
cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()
# The "chief" job has always id 0 and there is at most one and "worker" jobs
# come after it.
if task_type == "chief":
return 0
if task_type == "worker":
return task_id + len(cluster_spec.get("chief", []))
# The "evaluator" is in its own cluster or its own partition of a cluster.
if task_type == "evaluator":
return task_id
# We currently don't assign ids to other tasks.
raise ValueError("There is no id for task_type %r" % task_type)
|
[
"def",
"id_in_cluster",
"(",
"cluster_spec",
",",
"task_type",
",",
"task_id",
")",
":",
"_validate_cluster_spec",
"(",
"cluster_spec",
",",
"task_type",
",",
"task_id",
")",
"cluster_spec",
"=",
"normalize_cluster_spec",
"(",
"cluster_spec",
")",
".",
"as_dict",
"(",
")",
"# The \"chief\" job has always id 0 and there is at most one and \"worker\" jobs",
"# come after it.",
"if",
"task_type",
"==",
"\"chief\"",
":",
"return",
"0",
"if",
"task_type",
"==",
"\"worker\"",
":",
"return",
"task_id",
"+",
"len",
"(",
"cluster_spec",
".",
"get",
"(",
"\"chief\"",
",",
"[",
"]",
")",
")",
"# The \"evaluator\" is in its own cluster or its own partition of a cluster.",
"if",
"task_type",
"==",
"\"evaluator\"",
":",
"return",
"task_id",
"# We currently don't assign ids to other tasks.",
"raise",
"ValueError",
"(",
"\"There is no id for task_type %r\"",
"%",
"task_type",
")"
] |
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/distribute/multi_worker_util.py#L228-L263
|
||
klzgrad/naiveproxy
|
ed2c513637c77b18721fe428d7ed395b4d284c83
|
src/build/android/gyp/util/zipalign.py
|
python
|
AddToZipHermetic
|
(zip_file,
zip_path,
src_path=None,
data=None,
compress=None,
alignment=None)
|
Same as build_utils.AddToZipHermetic(), but with alignment.
Args:
alignment: If set, align the data of the entry to this many bytes.
|
Same as build_utils.AddToZipHermetic(), but with alignment.
|
[
"Same",
"as",
"build_utils",
".",
"AddToZipHermetic",
"()",
"but",
"with",
"alignment",
"."
] |
def AddToZipHermetic(zip_file,
zip_path,
src_path=None,
data=None,
compress=None,
alignment=None):
"""Same as build_utils.AddToZipHermetic(), but with alignment.
Args:
alignment: If set, align the data of the entry to this many bytes.
"""
zipinfo = build_utils.HermeticZipInfo(filename=zip_path)
if alignment:
_SetAlignment(zip_file, zipinfo, alignment)
build_utils.AddToZipHermetic(
zip_file, zipinfo, src_path=src_path, data=data, compress=compress)
|
[
"def",
"AddToZipHermetic",
"(",
"zip_file",
",",
"zip_path",
",",
"src_path",
"=",
"None",
",",
"data",
"=",
"None",
",",
"compress",
"=",
"None",
",",
"alignment",
"=",
"None",
")",
":",
"zipinfo",
"=",
"build_utils",
".",
"HermeticZipInfo",
"(",
"filename",
"=",
"zip_path",
")",
"if",
"alignment",
":",
"_SetAlignment",
"(",
"zip_file",
",",
"zipinfo",
",",
"alignment",
")",
"build_utils",
".",
"AddToZipHermetic",
"(",
"zip_file",
",",
"zipinfo",
",",
"src_path",
"=",
"src_path",
",",
"data",
"=",
"data",
",",
"compress",
"=",
"compress",
")"
] |
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/gyp/util/zipalign.py#L82-L97
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/data/ops/dataset_ops.py
|
python
|
TensorDataset.__init__
|
(self, element)
|
See `Dataset.from_tensors()` for details.
|
See `Dataset.from_tensors()` for details.
|
[
"See",
"Dataset",
".",
"from_tensors",
"()",
"for",
"details",
"."
] |
def __init__(self, element):
"""See `Dataset.from_tensors()` for details."""
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorDataset, self).__init__(variant_tensor)
|
[
"def",
"__init__",
"(",
"self",
",",
"element",
")",
":",
"element",
"=",
"structure",
".",
"normalize_element",
"(",
"element",
")",
"self",
".",
"_structure",
"=",
"structure",
".",
"type_spec_from_value",
"(",
"element",
")",
"self",
".",
"_tensors",
"=",
"structure",
".",
"to_tensor_list",
"(",
"self",
".",
"_structure",
",",
"element",
")",
"variant_tensor",
"=",
"gen_dataset_ops",
".",
"tensor_dataset",
"(",
"self",
".",
"_tensors",
",",
"output_shapes",
"=",
"structure",
".",
"get_flat_tensor_shapes",
"(",
"self",
".",
"_structure",
")",
")",
"super",
"(",
"TensorDataset",
",",
"self",
")",
".",
"__init__",
"(",
"variant_tensor",
")"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/data/ops/dataset_ops.py#L2351-L2360
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/parso/py2/parso/__init__.py
|
python
|
parse
|
(code=None, **kwargs)
|
return grammar.parse(code, **kwargs)
|
A utility function to avoid loading grammars.
Params are documented in :py:meth:`parso.Grammar.parse`.
:param str version: The version used by :py:func:`parso.load_grammar`.
|
A utility function to avoid loading grammars.
Params are documented in :py:meth:`parso.Grammar.parse`.
|
[
"A",
"utility",
"function",
"to",
"avoid",
"loading",
"grammars",
".",
"Params",
"are",
"documented",
"in",
":",
"py",
":",
"meth",
":",
"parso",
".",
"Grammar",
".",
"parse",
"."
] |
def parse(code=None, **kwargs):
"""
A utility function to avoid loading grammars.
Params are documented in :py:meth:`parso.Grammar.parse`.
:param str version: The version used by :py:func:`parso.load_grammar`.
"""
version = kwargs.pop('version', None)
grammar = load_grammar(version=version)
return grammar.parse(code, **kwargs)
|
[
"def",
"parse",
"(",
"code",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"version",
"=",
"kwargs",
".",
"pop",
"(",
"'version'",
",",
"None",
")",
"grammar",
"=",
"load_grammar",
"(",
"version",
"=",
"version",
")",
"return",
"grammar",
".",
"parse",
"(",
"code",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/parso/py2/parso/__init__.py#L49-L58
|
|
wang-bin/QtAV
|
3b937991afce248648836ae811324d4051b31def
|
python/configure.py
|
python
|
_TargetConfiguration.update_from_configuration_file
|
(self, config_file)
|
Update the configuration with values from a file. config_file
is the name of the configuration file.
|
Update the configuration with values from a file. config_file
is the name of the configuration file.
|
[
"Update",
"the",
"configuration",
"with",
"values",
"from",
"a",
"file",
".",
"config_file",
"is",
"the",
"name",
"of",
"the",
"configuration",
"file",
"."
] |
def update_from_configuration_file(self, config_file):
""" Update the configuration with values from a file. config_file
is the name of the configuration file.
"""
inform("Reading configuration from %s..." % config_file)
parser = _ConfigurationFileParser(config_file)
# Populate some presets from the command line.
parser.preset('py_major', str(self.py_version >> 16))
parser.preset('py_minor', str((self.py_version >> 8) & 0xff))
parser.preset('sysroot', self.sysroot)
if self.pyqt_package is None:
section = ''
else:
# At the moment we only need to distinguish between PyQt4 and
# PyQt5. If that changes we may need a --target-pyqt-version
# command line option.
pyqt_version = 0x050000 if self.pyqt_package == 'PyQt5' else 0x040000
# Find the section corresponding to the version of PyQt.
section = None
latest_section = -1
for name in parser.sections():
parts = name.split()
if len(parts) != 2 or parts[0] != 'PyQt':
continue
section_pyqt_version = version_from_string(parts[1])
if section_pyqt_version is None:
continue
# Major versions must match.
if section_pyqt_version >> 16 != pyqt_version >> 16:
continue
# It must be no later that the version of PyQt.
if section_pyqt_version > pyqt_version:
continue
# Save it if it is the latest so far.
if section_pyqt_version > latest_section:
section = name
latest_section = section_pyqt_version
if section is None:
error(
"%s does not define a section that covers PyQt "
"v%s." % (config_file, self.pyqt_version_str))
self.py_platform = parser.get(section, 'py_platform', self.py_platform)
self.py_inc_dir = parser.get(section, 'py_inc_dir', self.py_inc_dir)
self.py_venv_inc_dir = self.py_inc_dir
self.py_pylib_dir = parser.get(section, 'py_pylib_dir',
self.py_pylib_dir)
self.sip_inc_dir = self.py_venv_inc_dir
self.module_dir = parser.get(section, 'module_dir', self.module_dir)
if self.pyqt_package is not None:
self.py_sip_dir = parser.get(section, 'py_sip_dir',
self.py_sip_dir)
# Construct the SIP flags.
flags = []
flags.append('-t')
flags.append(self._get_platform_tag())
if self.pyqt_package == 'PyQt5':
if self.qt_version < 0x050000:
error("PyQt5 requires Qt v5.0 or later.")
if self.qt_version > 0x060000:
self.qt_version = 0x060000
else:
if self.qt_version > 0x050000:
self.qt_version = 0x050000
major = (self.qt_version >> 16) & 0xff
minor = (self.qt_version >> 8) & 0xff
patch = self.qt_version & 0xff
flags.append('-t')
flags.append('Qt_%d_%d_%d' % (major, minor, patch))
for feat in parser.getlist(section, 'pyqt_disabled_features', []):
flags.append('-x')
flags.append(feat)
self.pyqt_sip_flags = ' '.join(flags)
|
[
"def",
"update_from_configuration_file",
"(",
"self",
",",
"config_file",
")",
":",
"inform",
"(",
"\"Reading configuration from %s...\"",
"%",
"config_file",
")",
"parser",
"=",
"_ConfigurationFileParser",
"(",
"config_file",
")",
"# Populate some presets from the command line.",
"parser",
".",
"preset",
"(",
"'py_major'",
",",
"str",
"(",
"self",
".",
"py_version",
">>",
"16",
")",
")",
"parser",
".",
"preset",
"(",
"'py_minor'",
",",
"str",
"(",
"(",
"self",
".",
"py_version",
">>",
"8",
")",
"&",
"0xff",
")",
")",
"parser",
".",
"preset",
"(",
"'sysroot'",
",",
"self",
".",
"sysroot",
")",
"if",
"self",
".",
"pyqt_package",
"is",
"None",
":",
"section",
"=",
"''",
"else",
":",
"# At the moment we only need to distinguish between PyQt4 and",
"# PyQt5. If that changes we may need a --target-pyqt-version",
"# command line option.",
"pyqt_version",
"=",
"0x050000",
"if",
"self",
".",
"pyqt_package",
"==",
"'PyQt5'",
"else",
"0x040000",
"# Find the section corresponding to the version of PyQt.",
"section",
"=",
"None",
"latest_section",
"=",
"-",
"1",
"for",
"name",
"in",
"parser",
".",
"sections",
"(",
")",
":",
"parts",
"=",
"name",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parts",
")",
"!=",
"2",
"or",
"parts",
"[",
"0",
"]",
"!=",
"'PyQt'",
":",
"continue",
"section_pyqt_version",
"=",
"version_from_string",
"(",
"parts",
"[",
"1",
"]",
")",
"if",
"section_pyqt_version",
"is",
"None",
":",
"continue",
"# Major versions must match.",
"if",
"section_pyqt_version",
">>",
"16",
"!=",
"pyqt_version",
">>",
"16",
":",
"continue",
"# It must be no later that the version of PyQt.",
"if",
"section_pyqt_version",
">",
"pyqt_version",
":",
"continue",
"# Save it if it is the latest so far.",
"if",
"section_pyqt_version",
">",
"latest_section",
":",
"section",
"=",
"name",
"latest_section",
"=",
"section_pyqt_version",
"if",
"section",
"is",
"None",
":",
"error",
"(",
"\"%s does not define a section that covers PyQt \"",
"\"v%s.\"",
"%",
"(",
"config_file",
",",
"self",
".",
"pyqt_version_str",
")",
")",
"self",
".",
"py_platform",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'py_platform'",
",",
"self",
".",
"py_platform",
")",
"self",
".",
"py_inc_dir",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'py_inc_dir'",
",",
"self",
".",
"py_inc_dir",
")",
"self",
".",
"py_venv_inc_dir",
"=",
"self",
".",
"py_inc_dir",
"self",
".",
"py_pylib_dir",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'py_pylib_dir'",
",",
"self",
".",
"py_pylib_dir",
")",
"self",
".",
"sip_inc_dir",
"=",
"self",
".",
"py_venv_inc_dir",
"self",
".",
"module_dir",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'module_dir'",
",",
"self",
".",
"module_dir",
")",
"if",
"self",
".",
"pyqt_package",
"is",
"not",
"None",
":",
"self",
".",
"py_sip_dir",
"=",
"parser",
".",
"get",
"(",
"section",
",",
"'py_sip_dir'",
",",
"self",
".",
"py_sip_dir",
")",
"# Construct the SIP flags.",
"flags",
"=",
"[",
"]",
"flags",
".",
"append",
"(",
"'-t'",
")",
"flags",
".",
"append",
"(",
"self",
".",
"_get_platform_tag",
"(",
")",
")",
"if",
"self",
".",
"pyqt_package",
"==",
"'PyQt5'",
":",
"if",
"self",
".",
"qt_version",
"<",
"0x050000",
":",
"error",
"(",
"\"PyQt5 requires Qt v5.0 or later.\"",
")",
"if",
"self",
".",
"qt_version",
">",
"0x060000",
":",
"self",
".",
"qt_version",
"=",
"0x060000",
"else",
":",
"if",
"self",
".",
"qt_version",
">",
"0x050000",
":",
"self",
".",
"qt_version",
"=",
"0x050000",
"major",
"=",
"(",
"self",
".",
"qt_version",
">>",
"16",
")",
"&",
"0xff",
"minor",
"=",
"(",
"self",
".",
"qt_version",
">>",
"8",
")",
"&",
"0xff",
"patch",
"=",
"self",
".",
"qt_version",
"&",
"0xff",
"flags",
".",
"append",
"(",
"'-t'",
")",
"flags",
".",
"append",
"(",
"'Qt_%d_%d_%d'",
"%",
"(",
"major",
",",
"minor",
",",
"patch",
")",
")",
"for",
"feat",
"in",
"parser",
".",
"getlist",
"(",
"section",
",",
"'pyqt_disabled_features'",
",",
"[",
"]",
")",
":",
"flags",
".",
"append",
"(",
"'-x'",
")",
"flags",
".",
"append",
"(",
"feat",
")",
"self",
".",
"pyqt_sip_flags",
"=",
"' '",
".",
"join",
"(",
"flags",
")"
] |
https://github.com/wang-bin/QtAV/blob/3b937991afce248648836ae811324d4051b31def/python/configure.py#L783-L877
|
||
NERSC/timemory
|
431912b360ff50d1a160d7826e2eea04fbd1037f
|
timemory/trace/tracer.py
|
python
|
Tracer.runcall
|
(self, func, *args, **kw)
|
Trace a single function call
|
Trace a single function call
|
[
"Trace",
"a",
"single",
"function",
"call"
] |
def runcall(self, func, *args, **kw):
"""Trace a single function call"""
try:
self.start()
return func(*args, **kw)
finally:
self.stop()
|
[
"def",
"runcall",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"try",
":",
"self",
".",
"start",
"(",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"finally",
":",
"self",
".",
"stop",
"(",
")"
] |
https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/timemory/trace/tracer.py#L302-L309
|
||
rdkit/rdkit
|
ede860ae316d12d8568daf5ee800921c3389c84e
|
rdkit/Chem/QED.py
|
python
|
properties
|
(mol)
|
return qedProperties
|
Calculates the properties that are required to calculate the QED descriptor.
|
Calculates the properties that are required to calculate the QED descriptor.
|
[
"Calculates",
"the",
"properties",
"that",
"are",
"required",
"to",
"calculate",
"the",
"QED",
"descriptor",
"."
] |
def properties(mol):
"""
Calculates the properties that are required to calculate the QED descriptor.
"""
if mol is None:
raise ValueError('You need to provide a mol argument.')
mol = Chem.RemoveHs(mol)
qedProperties = QEDproperties(
MW=rdmd._CalcMolWt(mol),
ALOGP=Crippen.MolLogP(mol),
HBA=sum(len(mol.GetSubstructMatches(pattern)) for pattern in Acceptors
if mol.HasSubstructMatch(pattern)),
HBD=rdmd.CalcNumHBD(mol),
PSA=MolSurf.TPSA(mol),
ROTB=rdmd.CalcNumRotatableBonds(mol, rdmd.NumRotatableBondsOptions.Strict),
AROM=Chem.GetSSSR(Chem.DeleteSubstructs(Chem.Mol(mol), AliphaticRings)),
ALERTS=sum(1 for alert in StructuralAlerts if mol.HasSubstructMatch(alert)),
)
# The replacement
# AROM=Lipinski.NumAromaticRings(mol),
# is not identical. The expression above tends to count more rings
# N1C2=CC=CC=C2SC3=C1C=CC4=C3C=CC=C4
# OC1=C(O)C=C2C(=C1)OC3=CC(=O)C(=CC3=C2C4=CC=CC=C4)O
# CC(C)C1=CC2=C(C)C=CC2=C(C)C=C1 uses 2, should be 0 ?
return qedProperties
|
[
"def",
"properties",
"(",
"mol",
")",
":",
"if",
"mol",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'You need to provide a mol argument.'",
")",
"mol",
"=",
"Chem",
".",
"RemoveHs",
"(",
"mol",
")",
"qedProperties",
"=",
"QEDproperties",
"(",
"MW",
"=",
"rdmd",
".",
"_CalcMolWt",
"(",
"mol",
")",
",",
"ALOGP",
"=",
"Crippen",
".",
"MolLogP",
"(",
"mol",
")",
",",
"HBA",
"=",
"sum",
"(",
"len",
"(",
"mol",
".",
"GetSubstructMatches",
"(",
"pattern",
")",
")",
"for",
"pattern",
"in",
"Acceptors",
"if",
"mol",
".",
"HasSubstructMatch",
"(",
"pattern",
")",
")",
",",
"HBD",
"=",
"rdmd",
".",
"CalcNumHBD",
"(",
"mol",
")",
",",
"PSA",
"=",
"MolSurf",
".",
"TPSA",
"(",
"mol",
")",
",",
"ROTB",
"=",
"rdmd",
".",
"CalcNumRotatableBonds",
"(",
"mol",
",",
"rdmd",
".",
"NumRotatableBondsOptions",
".",
"Strict",
")",
",",
"AROM",
"=",
"Chem",
".",
"GetSSSR",
"(",
"Chem",
".",
"DeleteSubstructs",
"(",
"Chem",
".",
"Mol",
"(",
"mol",
")",
",",
"AliphaticRings",
")",
")",
",",
"ALERTS",
"=",
"sum",
"(",
"1",
"for",
"alert",
"in",
"StructuralAlerts",
"if",
"mol",
".",
"HasSubstructMatch",
"(",
"alert",
")",
")",
",",
")",
"# The replacement",
"# AROM=Lipinski.NumAromaticRings(mol),",
"# is not identical. The expression above tends to count more rings",
"# N1C2=CC=CC=C2SC3=C1C=CC4=C3C=CC=C4",
"# OC1=C(O)C=C2C(=C1)OC3=CC(=O)C(=CC3=C2C4=CC=CC=C4)O",
"# CC(C)C1=CC2=C(C)C=CC2=C(C)C=C1 uses 2, should be 0 ?",
"return",
"qedProperties"
] |
https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/Chem/QED.py#L243-L267
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/pandas/py3/pandas/core/indexes/base.py
|
python
|
Index.get_slice_bound
|
(self, label, side: str_t, kind=None)
|
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
int
Index of label.
|
Calculate slice bound that corresponds to given label.
|
[
"Calculate",
"slice",
"bound",
"that",
"corresponds",
"to",
"given",
"label",
"."
] |
def get_slice_bound(self, label, side: str_t, kind=None) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
int
Index of label.
"""
assert kind in ["loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg, must be either "
f"'left' or 'right': {side}"
)
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
else:
slc = lib.maybe_indices_to_slice(
slc.astype(np.intp, copy=False), len(self)
)
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
f"label: {repr(original_label)}"
)
if isinstance(slc, slice):
if side == "left":
return slc.start
else:
return slc.stop
else:
if side == "right":
return slc + 1
else:
return slc
|
[
"def",
"get_slice_bound",
"(",
"self",
",",
"label",
",",
"side",
":",
"str_t",
",",
"kind",
"=",
"None",
")",
"->",
"int",
":",
"assert",
"kind",
"in",
"[",
"\"loc\"",
",",
"\"getitem\"",
",",
"None",
"]",
"if",
"side",
"not",
"in",
"(",
"\"left\"",
",",
"\"right\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for side kwarg, must be either \"",
"f\"'left' or 'right': {side}\"",
")",
"original_label",
"=",
"label",
"# For datetime indices label may be a string that has to be converted",
"# to datetime boundary according to its resolution.",
"label",
"=",
"self",
".",
"_maybe_cast_slice_bound",
"(",
"label",
",",
"side",
")",
"# we need to look up the label",
"try",
":",
"slc",
"=",
"self",
".",
"get_loc",
"(",
"label",
")",
"except",
"KeyError",
"as",
"err",
":",
"try",
":",
"return",
"self",
".",
"_searchsorted_monotonic",
"(",
"label",
",",
"side",
")",
"except",
"ValueError",
":",
"# raise the original KeyError",
"raise",
"err",
"if",
"isinstance",
"(",
"slc",
",",
"np",
".",
"ndarray",
")",
":",
"# get_loc may return a boolean array or an array of indices, which",
"# is OK as long as they are representable by a slice.",
"if",
"is_bool_dtype",
"(",
"slc",
")",
":",
"slc",
"=",
"lib",
".",
"maybe_booleans_to_slice",
"(",
"slc",
".",
"view",
"(",
"\"u1\"",
")",
")",
"else",
":",
"slc",
"=",
"lib",
".",
"maybe_indices_to_slice",
"(",
"slc",
".",
"astype",
"(",
"np",
".",
"intp",
",",
"copy",
"=",
"False",
")",
",",
"len",
"(",
"self",
")",
")",
"if",
"isinstance",
"(",
"slc",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"KeyError",
"(",
"f\"Cannot get {side} slice bound for non-unique \"",
"f\"label: {repr(original_label)}\"",
")",
"if",
"isinstance",
"(",
"slc",
",",
"slice",
")",
":",
"if",
"side",
"==",
"\"left\"",
":",
"return",
"slc",
".",
"start",
"else",
":",
"return",
"slc",
".",
"stop",
"else",
":",
"if",
"side",
"==",
"\"right\"",
":",
"return",
"slc",
"+",
"1",
"else",
":",
"return",
"slc"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/indexes/base.py#L5767-L5833
|
||
TGAC/KAT
|
e8870331de2b4bb0a1b3b91c6afb8fb9d59e9216
|
deps/boost/tools/build/src/build/property_set.py
|
python
|
create
|
(raw_properties = [])
|
return __cache [key]
|
Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
|
Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
|
[
"Creates",
"a",
"new",
"PropertySet",
"instance",
"for",
"the",
"given",
"raw",
"properties",
"or",
"returns",
"an",
"already",
"existing",
"one",
"."
] |
def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
"""
assert (is_iterable_typed(raw_properties, property.Property)
or is_iterable_typed(raw_properties, basestring))
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
# These two lines of code are optimized to the current state
# of the Property class. Since this function acts as the caching
# frontend to the PropertySet class modifying these two lines
# could have a severe performance penalty. Be careful.
# It would be faster to sort by p.id, but some projects may rely
# on the fact that the properties are ordered alphabetically. So,
# we maintain alphabetical sorting so as to maintain backward compatibility.
x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition))
key = tuple(p.id for p in x)
if key not in __cache:
__cache [key] = PropertySet(x)
return __cache [key]
|
[
"def",
"create",
"(",
"raw_properties",
"=",
"[",
"]",
")",
":",
"assert",
"(",
"is_iterable_typed",
"(",
"raw_properties",
",",
"property",
".",
"Property",
")",
"or",
"is_iterable_typed",
"(",
"raw_properties",
",",
"basestring",
")",
")",
"# FIXME: propagate to callers.",
"if",
"len",
"(",
"raw_properties",
")",
">",
"0",
"and",
"isinstance",
"(",
"raw_properties",
"[",
"0",
"]",
",",
"property",
".",
"Property",
")",
":",
"x",
"=",
"raw_properties",
"else",
":",
"x",
"=",
"[",
"property",
".",
"create_from_string",
"(",
"ps",
")",
"for",
"ps",
"in",
"raw_properties",
"]",
"# These two lines of code are optimized to the current state",
"# of the Property class. Since this function acts as the caching",
"# frontend to the PropertySet class modifying these two lines",
"# could have a severe performance penalty. Be careful.",
"# It would be faster to sort by p.id, but some projects may rely",
"# on the fact that the properties are ordered alphabetically. So,",
"# we maintain alphabetical sorting so as to maintain backward compatibility.",
"x",
"=",
"sorted",
"(",
"set",
"(",
"x",
")",
",",
"key",
"=",
"lambda",
"p",
":",
"(",
"p",
".",
"feature",
".",
"name",
",",
"p",
".",
"value",
",",
"p",
".",
"condition",
")",
")",
"key",
"=",
"tuple",
"(",
"p",
".",
"id",
"for",
"p",
"in",
"x",
")",
"if",
"key",
"not",
"in",
"__cache",
":",
"__cache",
"[",
"key",
"]",
"=",
"PropertySet",
"(",
"x",
")",
"return",
"__cache",
"[",
"key",
"]"
] |
https://github.com/TGAC/KAT/blob/e8870331de2b4bb0a1b3b91c6afb8fb9d59e9216/deps/boost/tools/build/src/build/property_set.py#L36-L61
|
|
mindspore-ai/mindspore
|
fb8fd3338605bb34fa5cea054e535a8b1d753fab
|
mindspore/python/mindspore/train/callback/_landscape.py
|
python
|
SummaryLandscape._log_message
|
(self, create_landscape, index=None, interval=None, final_epochs=None)
|
Generate drawing information using log.
|
Generate drawing information using log.
|
[
"Generate",
"drawing",
"information",
"using",
"log",
"."
] |
def _log_message(self, create_landscape, index=None, interval=None, final_epochs=None):
"""Generate drawing information using log."""
if final_epochs is None:
if create_landscape['result']:
msg = f"Start to create the {index + 1}/{len(self._epoch_group) + 1} landscapes, " \
f"checkpoint is {interval}, decomposition is PCA."
else:
msg = f"Start to create the {index + 1}/{len(self._epoch_group)} landscapes, " \
f"checkpoint is {interval}, decomposition is PCA."
else:
if create_landscape['train']:
msg = f"Start to create the {len(self._epoch_group) + 1}/{len(self._epoch_group) + 1} landscapes, " \
f"checkpoint is {final_epochs}, decomposition is Random. "
else:
msg = f"Start to create the {1}/{1} landscapes, " \
f"checkpoint is {final_epochs}, decomposition is Random."
logger.info(msg)
|
[
"def",
"_log_message",
"(",
"self",
",",
"create_landscape",
",",
"index",
"=",
"None",
",",
"interval",
"=",
"None",
",",
"final_epochs",
"=",
"None",
")",
":",
"if",
"final_epochs",
"is",
"None",
":",
"if",
"create_landscape",
"[",
"'result'",
"]",
":",
"msg",
"=",
"f\"Start to create the {index + 1}/{len(self._epoch_group) + 1} landscapes, \"",
"f\"checkpoint is {interval}, decomposition is PCA.\"",
"else",
":",
"msg",
"=",
"f\"Start to create the {index + 1}/{len(self._epoch_group)} landscapes, \"",
"f\"checkpoint is {interval}, decomposition is PCA.\"",
"else",
":",
"if",
"create_landscape",
"[",
"'train'",
"]",
":",
"msg",
"=",
"f\"Start to create the {len(self._epoch_group) + 1}/{len(self._epoch_group) + 1} landscapes, \"",
"f\"checkpoint is {final_epochs}, decomposition is Random. \"",
"else",
":",
"msg",
"=",
"f\"Start to create the {1}/{1} landscapes, \"",
"f\"checkpoint is {final_epochs}, decomposition is Random.\"",
"logger",
".",
"info",
"(",
"msg",
")"
] |
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/train/callback/_landscape.py#L379-L395
|
||
pmq20/node-packer
|
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
|
current/tools/inspector_protocol/jinja2/nodes.py
|
python
|
Node.find
|
(self, node_type)
|
Find the first node of a given type. If no such node exists the
return value is `None`.
|
Find the first node of a given type. If no such node exists the
return value is `None`.
|
[
"Find",
"the",
"first",
"node",
"of",
"a",
"given",
"type",
".",
"If",
"no",
"such",
"node",
"exists",
"the",
"return",
"value",
"is",
"None",
"."
] |
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
|
[
"def",
"find",
"(",
"self",
",",
"node_type",
")",
":",
"for",
"result",
"in",
"self",
".",
"find_all",
"(",
"node_type",
")",
":",
"return",
"result"
] |
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/tools/inspector_protocol/jinja2/nodes.py#L177-L182
|
||
rbgirshick/caffe-fast-rcnn
|
28a579eaf0668850705598b3075b8969f22226d9
|
scripts/cpp_lint.py
|
python
|
_CppLintState.SetCountingStyle
|
(self, counting_style)
|
Sets the module's counting options.
|
Sets the module's counting options.
|
[
"Sets",
"the",
"module",
"s",
"counting",
"options",
"."
] |
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
|
[
"def",
"SetCountingStyle",
"(",
"self",
",",
"counting_style",
")",
":",
"self",
".",
"counting",
"=",
"counting_style"
] |
https://github.com/rbgirshick/caffe-fast-rcnn/blob/28a579eaf0668850705598b3075b8969f22226d9/scripts/cpp_lint.py#L713-L715
|
||
microsoft/TSS.MSR
|
0f2516fca2cd9929c31d5450e39301c9bde43688
|
TSS.Py/src/TpmTypes.py
|
python
|
TPMS_TDES_SYM_DETAILS.fromTpm
|
(buf)
|
return buf.createObj(TPMS_TDES_SYM_DETAILS)
|
Returns new TPMS_TDES_SYM_DETAILS object constructed from its
marshaled representation in the given TpmBuffer buffer
|
Returns new TPMS_TDES_SYM_DETAILS object constructed from its
marshaled representation in the given TpmBuffer buffer
|
[
"Returns",
"new",
"TPMS_TDES_SYM_DETAILS",
"object",
"constructed",
"from",
"its",
"marshaled",
"representation",
"in",
"the",
"given",
"TpmBuffer",
"buffer"
] |
def fromTpm(buf):
""" Returns new TPMS_TDES_SYM_DETAILS object constructed from its
marshaled representation in the given TpmBuffer buffer
"""
return buf.createObj(TPMS_TDES_SYM_DETAILS)
|
[
"def",
"fromTpm",
"(",
"buf",
")",
":",
"return",
"buf",
".",
"createObj",
"(",
"TPMS_TDES_SYM_DETAILS",
")"
] |
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L5613-L5617
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/graph_editor/subgraph.py
|
python
|
SubGraphView.remap_outputs_make_unique
|
(self)
|
return res
|
Remap the outputs so that all the tensors appears only once.
|
Remap the outputs so that all the tensors appears only once.
|
[
"Remap",
"the",
"outputs",
"so",
"that",
"all",
"the",
"tensors",
"appears",
"only",
"once",
"."
] |
def remap_outputs_make_unique(self):
"""Remap the outputs so that all the tensors appears only once."""
res = copy.copy(self)
res._remap_outputs_make_unique() # pylint: disable=protected-access
return res
|
[
"def",
"remap_outputs_make_unique",
"(",
"self",
")",
":",
"res",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"res",
".",
"_remap_outputs_make_unique",
"(",
")",
"# pylint: disable=protected-access",
"return",
"res"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/graph_editor/subgraph.py#L324-L328
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/_gdi.py
|
python
|
GraphicsGradientStops.GetCount
|
(*args, **kwargs)
|
return _gdi_.GraphicsGradientStops_GetCount(*args, **kwargs)
|
GetCount(self) -> unsigned int
|
GetCount(self) -> unsigned int
|
[
"GetCount",
"(",
"self",
")",
"-",
">",
"unsigned",
"int"
] |
def GetCount(*args, **kwargs):
"""GetCount(self) -> unsigned int"""
return _gdi_.GraphicsGradientStops_GetCount(*args, **kwargs)
|
[
"def",
"GetCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"GraphicsGradientStops_GetCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L6090-L6092
|
|
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSDarkRunBackgroundCorrection.py
|
python
|
DarkRunMonitorAndDetectorRemover.set_pure_detector_dark_run
|
(self, dark_run)
|
return dark_run
|
Sets all monitors on the dark run workspace to 0.
@param dark_run: the dark run workspace
|
Sets all monitors on the dark run workspace to 0.
|
[
"Sets",
"all",
"monitors",
"on",
"the",
"dark",
"run",
"workspace",
"to",
"0",
"."
] |
def set_pure_detector_dark_run(self, dark_run):
'''
Sets all monitors on the dark run workspace to 0.
@param dark_run: the dark run workspace
'''
# Get the list of monitor workspace indices
monitor_list = self.find_monitor_workspace_indices(dark_run)
# Since we only have around 10 or so monitors
# we set them manually to 0
for ws_index, dummy_det_id in monitor_list:
data = dark_run.dataY(ws_index)
error = dark_run.dataE(ws_index)
data = data*0
error = error*0
dark_run.setY(ws_index,data)
dark_run.setE(ws_index,error)
return dark_run
|
[
"def",
"set_pure_detector_dark_run",
"(",
"self",
",",
"dark_run",
")",
":",
"# Get the list of monitor workspace indices",
"monitor_list",
"=",
"self",
".",
"find_monitor_workspace_indices",
"(",
"dark_run",
")",
"# Since we only have around 10 or so monitors",
"# we set them manually to 0",
"for",
"ws_index",
",",
"dummy_det_id",
"in",
"monitor_list",
":",
"data",
"=",
"dark_run",
".",
"dataY",
"(",
"ws_index",
")",
"error",
"=",
"dark_run",
".",
"dataE",
"(",
"ws_index",
")",
"data",
"=",
"data",
"*",
"0",
"error",
"=",
"error",
"*",
"0",
"dark_run",
".",
"setY",
"(",
"ws_index",
",",
"data",
")",
"dark_run",
".",
"setE",
"(",
"ws_index",
",",
"error",
")",
"return",
"dark_run"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSDarkRunBackgroundCorrection.py#L269-L287
|
|
arangodb/arangodb
|
0d658689c7d1b721b314fa3ca27d38303e1570c8
|
3rdParty/V8/v7.9.317/third_party/jinja2/filters.py
|
python
|
do_random
|
(context, seq)
|
Return a random item from the sequence.
|
Return a random item from the sequence.
|
[
"Return",
"a",
"random",
"item",
"from",
"the",
"sequence",
"."
] |
def do_random(context, seq):
"""Return a random item from the sequence."""
try:
return random.choice(seq)
except IndexError:
return context.environment.undefined('No random item, sequence was empty.')
|
[
"def",
"do_random",
"(",
"context",
",",
"seq",
")",
":",
"try",
":",
"return",
"random",
".",
"choice",
"(",
"seq",
")",
"except",
"IndexError",
":",
"return",
"context",
".",
"environment",
".",
"undefined",
"(",
"'No random item, sequence was empty.'",
")"
] |
https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/v7.9.317/third_party/jinja2/filters.py#L451-L456
|
||
cms-sw/cmssw
|
fd9de012d503d3405420bcbeec0ec879baa57cf2
|
Alignment/MuonAlignment/python/svgfig.py
|
python
|
Path.parse_number
|
(self, index, pathdata)
|
Part of Path's text-command parsing algorithm; used internally.
|
Part of Path's text-command parsing algorithm; used internally.
|
[
"Part",
"of",
"Path",
"s",
"text",
"-",
"command",
"parsing",
"algorithm",
";",
"used",
"internally",
"."
] |
def parse_number(self, index, pathdata):
"""Part of Path's text-command parsing algorithm; used internally."""
index, pathdata = self.parse_whitespace(index, pathdata)
if index >= len(pathdata): return None, index, pathdata
first_digit = pathdata[index]
if "0" <= first_digit <= "9" or first_digit in ("-", "+", "."):
start = index
while index < len(pathdata) and ("0" <= pathdata[index] <= "9" or pathdata[index] in ("-", "+", ".", "e", "E")):
index += 1
end = index
index = end
return float(pathdata[start:end]), index, pathdata
else:
return None, index, pathdata
|
[
"def",
"parse_number",
"(",
"self",
",",
"index",
",",
"pathdata",
")",
":",
"index",
",",
"pathdata",
"=",
"self",
".",
"parse_whitespace",
"(",
"index",
",",
"pathdata",
")",
"if",
"index",
">=",
"len",
"(",
"pathdata",
")",
":",
"return",
"None",
",",
"index",
",",
"pathdata",
"first_digit",
"=",
"pathdata",
"[",
"index",
"]",
"if",
"\"0\"",
"<=",
"first_digit",
"<=",
"\"9\"",
"or",
"first_digit",
"in",
"(",
"\"-\"",
",",
"\"+\"",
",",
"\".\"",
")",
":",
"start",
"=",
"index",
"while",
"index",
"<",
"len",
"(",
"pathdata",
")",
"and",
"(",
"\"0\"",
"<=",
"pathdata",
"[",
"index",
"]",
"<=",
"\"9\"",
"or",
"pathdata",
"[",
"index",
"]",
"in",
"(",
"\"-\"",
",",
"\"+\"",
",",
"\".\"",
",",
"\"e\"",
",",
"\"E\"",
")",
")",
":",
"index",
"+=",
"1",
"end",
"=",
"index",
"index",
"=",
"end",
"return",
"float",
"(",
"pathdata",
"[",
"start",
":",
"end",
"]",
")",
",",
"index",
",",
"pathdata",
"else",
":",
"return",
"None",
",",
"index",
",",
"pathdata"
] |
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/Alignment/MuonAlignment/python/svgfig.py#L1087-L1103
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/math_ops.py
|
python
|
abs
|
(x, name=None)
|
r"""Computes the absolute value of a tensor.
Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```python
x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
tf.abs(x) # [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size, type, and sparsity as `x` with
absolute values.
Note, for `complex64` or `complex128` input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
|
r"""Computes the absolute value of a tensor.
|
[
"r",
"Computes",
"the",
"absolute",
"value",
"of",
"a",
"tensor",
"."
] |
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```python
x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
tf.abs(x) # [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size, type, and sparsity as `x` with
absolute values.
Note, for `complex64` or `complex128` input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
|
[
"def",
"abs",
"(",
"x",
",",
"name",
"=",
"None",
")",
":",
"# pylint: disable=redefined-builtin",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"Abs\"",
",",
"[",
"x",
"]",
")",
"as",
"name",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"\"x\"",
")",
"if",
"x",
".",
"dtype",
".",
"is_complex",
":",
"return",
"gen_math_ops",
".",
"complex_abs",
"(",
"x",
",",
"Tout",
"=",
"x",
".",
"dtype",
".",
"real_dtype",
",",
"name",
"=",
"name",
")",
"return",
"gen_math_ops",
".",
"_abs",
"(",
"x",
",",
"name",
"=",
"name",
")"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/math_ops.py#L247-L278
|
||
shader-slang/slang
|
b8982fcf43b86c1e39dcc3dd19bff2821633eda6
|
external/vulkan/registry/reg.py
|
python
|
Registry.getAlias
|
(self, elem, dict)
|
return alias
|
Check for an alias in the same require block.
- elem - Element to check for an alias
|
Check for an alias in the same require block.
|
[
"Check",
"for",
"an",
"alias",
"in",
"the",
"same",
"require",
"block",
"."
] |
def getAlias(self, elem, dict):
"""Check for an alias in the same require block.
- elem - Element to check for an alias"""
# Try to find an alias
alias = elem.get('alias')
if alias is None:
name = elem.get('name')
typeinfo = self.lookupElementInfo(name, dict)
alias = typeinfo.elem.get('alias')
return alias
|
[
"def",
"getAlias",
"(",
"self",
",",
"elem",
",",
"dict",
")",
":",
"# Try to find an alias",
"alias",
"=",
"elem",
".",
"get",
"(",
"'alias'",
")",
"if",
"alias",
"is",
"None",
":",
"name",
"=",
"elem",
".",
"get",
"(",
"'name'",
")",
"typeinfo",
"=",
"self",
".",
"lookupElementInfo",
"(",
"name",
",",
"dict",
")",
"alias",
"=",
"typeinfo",
".",
"elem",
".",
"get",
"(",
"'alias'",
")",
"return",
"alias"
] |
https://github.com/shader-slang/slang/blob/b8982fcf43b86c1e39dcc3dd19bff2821633eda6/external/vulkan/registry/reg.py#L796-L808
|
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Source/ThirdParty/CEF3/pristine/cef_source/tools/cef_parser.py
|
python
|
obj_analysis.is_result_vector
|
(self)
|
return (self.result_type == 'vector')
|
Returns true if this is a vector type.
|
Returns true if this is a vector type.
|
[
"Returns",
"true",
"if",
"this",
"is",
"a",
"vector",
"type",
"."
] |
def is_result_vector(self):
""" Returns true if this is a vector type. """
return (self.result_type == 'vector')
|
[
"def",
"is_result_vector",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"result_type",
"==",
"'vector'",
")"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/pristine/cef_source/tools/cef_parser.py#L1877-L1879
|
|
HackWebRTC/webrtc
|
7abfc990c00ab35090fff285fcf635d1d7892433
|
PRESUBMIT.py
|
python
|
CheckNewlineAtTheEndOfProtoFiles
|
(input_api, output_api, source_file_filter)
|
return results
|
Checks that all .proto files are terminated with a newline.
|
Checks that all .proto files are terminated with a newline.
|
[
"Checks",
"that",
"all",
".",
"proto",
"files",
"are",
"terminated",
"with",
"a",
"newline",
"."
] |
def CheckNewlineAtTheEndOfProtoFiles(input_api, output_api, source_file_filter):
"""Checks that all .proto files are terminated with a newline."""
error_msg = 'File {} must end with exactly one newline.'
results = []
file_filter = lambda x: input_api.FilterSourceFile(
x, white_list=(r'.+\.proto$',)) and source_file_filter(x)
for f in input_api.AffectedSourceFiles(file_filter):
file_path = f.LocalPath()
with open(file_path) as f:
lines = f.readlines()
if len(lines) > 0 and not lines[-1].endswith('\n'):
results.append(output_api.PresubmitError(error_msg.format(file_path)))
return results
|
[
"def",
"CheckNewlineAtTheEndOfProtoFiles",
"(",
"input_api",
",",
"output_api",
",",
"source_file_filter",
")",
":",
"error_msg",
"=",
"'File {} must end with exactly one newline.'",
"results",
"=",
"[",
"]",
"file_filter",
"=",
"lambda",
"x",
":",
"input_api",
".",
"FilterSourceFile",
"(",
"x",
",",
"white_list",
"=",
"(",
"r'.+\\.proto$'",
",",
")",
")",
"and",
"source_file_filter",
"(",
"x",
")",
"for",
"f",
"in",
"input_api",
".",
"AffectedSourceFiles",
"(",
"file_filter",
")",
":",
"file_path",
"=",
"f",
".",
"LocalPath",
"(",
")",
"with",
"open",
"(",
"file_path",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"if",
"len",
"(",
"lines",
")",
">",
"0",
"and",
"not",
"lines",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"'\\n'",
")",
":",
"results",
".",
"append",
"(",
"output_api",
".",
"PresubmitError",
"(",
"error_msg",
".",
"format",
"(",
"file_path",
")",
")",
")",
"return",
"results"
] |
https://github.com/HackWebRTC/webrtc/blob/7abfc990c00ab35090fff285fcf635d1d7892433/PRESUBMIT.py#L1059-L1071
|
|
wlanjie/AndroidFFmpeg
|
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
|
tools/fdk-aac-build/x86/toolchain/lib/python2.7/nntplib.py
|
python
|
NNTP.newgroups
|
(self, date, time, file=None)
|
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
|
Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names
|
Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names
|
[
"Process",
"a",
"NEWGROUPS",
"command",
".",
"Arguments",
":",
"-",
"date",
":",
"string",
"yymmdd",
"indicating",
"the",
"date",
"-",
"time",
":",
"string",
"hhmmss",
"indicating",
"the",
"time",
"Return",
":",
"-",
"resp",
":",
"server",
"response",
"if",
"successful",
"-",
"list",
":",
"list",
"of",
"newsgroup",
"names"
] |
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
|
[
"def",
"newgroups",
"(",
"self",
",",
"date",
",",
"time",
",",
"file",
"=",
"None",
")",
":",
"return",
"self",
".",
"longcmd",
"(",
"'NEWGROUPS '",
"+",
"date",
"+",
"' '",
"+",
"time",
",",
"file",
")"
] |
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/nntplib.py#L266-L274
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/setuptools/py2/setuptools/_vendor/pyparsing.py
|
python
|
ParserElement.transformString
|
( self, instring )
|
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
|
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
|
[
"Extension",
"to",
"C",
"{",
"L",
"{",
"scanString",
"}}",
"to",
"modify",
"matching",
"text",
"with",
"modified",
"tokens",
"that",
"may",
"be",
"returned",
"from",
"a",
"parse",
"action",
".",
"To",
"use",
"C",
"{",
"transformString",
"}",
"define",
"a",
"grammar",
"and",
"attach",
"a",
"parse",
"action",
"to",
"it",
"that",
"modifies",
"the",
"returned",
"token",
"list",
".",
"Invoking",
"C",
"{",
"transformString",
"()",
"}",
"on",
"a",
"target",
"string",
"will",
"then",
"scan",
"for",
"matches",
"and",
"replace",
"the",
"matched",
"text",
"patterns",
"according",
"to",
"the",
"logic",
"in",
"the",
"parse",
"action",
".",
"C",
"{",
"transformString",
"()",
"}",
"returns",
"the",
"resulting",
"transformed",
"string",
".",
"Example",
"::",
"wd",
"=",
"Word",
"(",
"alphas",
")",
"wd",
".",
"setParseAction",
"(",
"lambda",
"toks",
":",
"toks",
"[",
"0",
"]",
".",
"title",
"()",
")",
"print",
"(",
"wd",
".",
"transformString",
"(",
"now",
"is",
"the",
"winter",
"of",
"our",
"discontent",
"made",
"glorious",
"summer",
"by",
"this",
"sun",
"of",
"york",
".",
"))",
"Prints",
"::",
"Now",
"Is",
"The",
"Winter",
"Of",
"Our",
"Discontent",
"Made",
"Glorious",
"Summer",
"By",
"This",
"Sun",
"Of",
"York",
"."
] |
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
|
[
"def",
"transformString",
"(",
"self",
",",
"instring",
")",
":",
"out",
"=",
"[",
"]",
"lastE",
"=",
"0",
"# force preservation of <TAB>s, to minimize unwanted transformation of string, and to",
"# keep string locs straight between transformString and scanString",
"self",
".",
"keepTabs",
"=",
"True",
"try",
":",
"for",
"t",
",",
"s",
",",
"e",
"in",
"self",
".",
"scanString",
"(",
"instring",
")",
":",
"out",
".",
"append",
"(",
"instring",
"[",
"lastE",
":",
"s",
"]",
")",
"if",
"t",
":",
"if",
"isinstance",
"(",
"t",
",",
"ParseResults",
")",
":",
"out",
"+=",
"t",
".",
"asList",
"(",
")",
"elif",
"isinstance",
"(",
"t",
",",
"list",
")",
":",
"out",
"+=",
"t",
"else",
":",
"out",
".",
"append",
"(",
"t",
")",
"lastE",
"=",
"e",
"out",
".",
"append",
"(",
"instring",
"[",
"lastE",
":",
"]",
")",
"out",
"=",
"[",
"o",
"for",
"o",
"in",
"out",
"if",
"o",
"]",
"return",
"\"\"",
".",
"join",
"(",
"map",
"(",
"_ustr",
",",
"_flatten",
"(",
"out",
")",
")",
")",
"except",
"ParseBaseException",
"as",
"exc",
":",
"if",
"ParserElement",
".",
"verbose_stacktrace",
":",
"raise",
"else",
":",
"# catch and re-raise exception from here, clears out pyparsing internal stack trace",
"raise",
"exc"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/_vendor/pyparsing.py#L1729-L1770
|
||
microsoft/LightGBM
|
904b2d5158703c4900b68008617951dd2f9ff21b
|
python-package/lightgbm/basic.py
|
python
|
c_array
|
(ctype, values)
|
return (ctype * len(values))(*values)
|
Convert a Python array to C array.
|
Convert a Python array to C array.
|
[
"Convert",
"a",
"Python",
"array",
"to",
"C",
"array",
"."
] |
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
|
[
"def",
"c_array",
"(",
"ctype",
",",
"values",
")",
":",
"return",
"(",
"ctype",
"*",
"len",
"(",
"values",
")",
")",
"(",
"*",
"values",
")"
] |
https://github.com/microsoft/LightGBM/blob/904b2d5158703c4900b68008617951dd2f9ff21b/python-package/lightgbm/basic.py#L265-L267
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/ipython/py3/IPython/utils/sysinfo.py
|
python
|
get_sys_info
|
()
|
return pkg_info(path)
|
Return useful information about IPython and the system, as a dict.
|
Return useful information about IPython and the system, as a dict.
|
[
"Return",
"useful",
"information",
"about",
"IPython",
"and",
"the",
"system",
"as",
"a",
"dict",
"."
] |
def get_sys_info():
"""Return useful information about IPython and the system, as a dict."""
p = os.path
path = p.realpath(p.dirname(p.abspath(p.join(__file__, '..'))))
return pkg_info(path)
|
[
"def",
"get_sys_info",
"(",
")",
":",
"p",
"=",
"os",
".",
"path",
"path",
"=",
"p",
".",
"realpath",
"(",
"p",
".",
"dirname",
"(",
"p",
".",
"abspath",
"(",
"p",
".",
"join",
"(",
"__file__",
",",
"'..'",
")",
")",
")",
")",
"return",
"pkg_info",
"(",
"path",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/utils/sysinfo.py#L95-L99
|
|
panda3d/panda3d
|
833ad89ebad58395d0af0b7ec08538e5e4308265
|
direct/src/actor/Actor.py
|
python
|
Actor.__bindAnimToPart
|
(self, animName, partName, lodName,
allowAsyncBind = True)
|
return animControl
|
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
|
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
|
[
"Binds",
"the",
"named",
"animation",
"to",
"the",
"named",
"part",
"/",
"lod",
"and",
"returns",
"the",
"associated",
"animControl",
".",
"The",
"animation",
"is",
"loaded",
"and",
"bound",
"in",
"a",
"sub",
"-",
"thread",
"if",
"allowAsyncBind",
"is",
"True",
"self",
".",
"allowAsyncBind",
"is",
"True",
"threading",
"is",
"enabled",
"and",
"the",
"animation",
"has",
"a",
"preload",
"table",
"generated",
"for",
"it",
"(",
"e",
".",
"g",
".",
"via",
"egg",
"-",
"optchar",
"-",
"preload",
")",
".",
"Even",
"though",
"the",
"animation",
"may",
"or",
"may",
"not",
"be",
"yet",
"bound",
"at",
"the",
"time",
"this",
"function",
"returns",
"a",
"usable",
"animControl",
"is",
"returned",
"or",
"None",
"if",
"the",
"animation",
"could",
"not",
"be",
"bound",
"."
] |
def __bindAnimToPart(self, animName, partName, lodName,
allowAsyncBind = True):
"""
Binds the named animation to the named part/lod and returns
the associated animControl. The animation is loaded and bound
in a sub-thread, if allowAsyncBind is True,
self.allowAsyncBind is True, threading is enabled, and the
animation has a preload table generated for it (e.g. via
"egg-optchar -preload"). Even though the animation may or may
not be yet bound at the time this function returns, a usable
animControl is returned, or None if the animation could not be
bound.
"""
# make sure this anim is in the dict
subpartDef = self.__subpartDict.get(partName, Actor.SubpartDef(partName))
partDict = self.__animControlDict[lodName]
animDict = partDict.get(partName)
if animDict is None:
# It must be a subpart that hasn't been bound yet.
animDict = {}
partDict[partName] = animDict
anim = animDict.get(animName)
if anim is None:
# It must be a subpart that hasn't been bound yet.
anim = partDict[subpartDef.truePartName].get(animName)
anim = anim.makeCopy()
animDict[animName] = anim
if anim is None:
Actor.notify.error("actor has no animation %s", animName)
# only bind if not already bound!
if anim.animControl:
return anim.animControl
if self.mergeLODBundles:
bundle = self.__commonBundleHandles[subpartDef.truePartName].getBundle()
else:
bundle = self.__partBundleDict[lodName][subpartDef.truePartName].getBundle()
if anim.animBundle:
# We already have a bundle; just bind it.
animControl = bundle.bindAnim(anim.animBundle, -1, subpartDef.subset)
else:
# Load and bind the anim. This might be an asynchronous
# operation that will complete in the background, but if so it
# will still return a usable AnimControl.
animControl = bundle.loadBindAnim(
self.loader, Filename(anim.filename), -1,
subpartDef.subset, allowAsyncBind and self.allowAsyncBind)
if not animControl:
# Couldn't bind. (This implies the binding operation was
# not attempted asynchronously.)
return None
# store the animControl
anim.animControl = animControl
assert Actor.notify.debug("binding anim: %s to part: %s, lod: %s" %
(animName, partName, lodName))
return animControl
|
[
"def",
"__bindAnimToPart",
"(",
"self",
",",
"animName",
",",
"partName",
",",
"lodName",
",",
"allowAsyncBind",
"=",
"True",
")",
":",
"# make sure this anim is in the dict",
"subpartDef",
"=",
"self",
".",
"__subpartDict",
".",
"get",
"(",
"partName",
",",
"Actor",
".",
"SubpartDef",
"(",
"partName",
")",
")",
"partDict",
"=",
"self",
".",
"__animControlDict",
"[",
"lodName",
"]",
"animDict",
"=",
"partDict",
".",
"get",
"(",
"partName",
")",
"if",
"animDict",
"is",
"None",
":",
"# It must be a subpart that hasn't been bound yet.",
"animDict",
"=",
"{",
"}",
"partDict",
"[",
"partName",
"]",
"=",
"animDict",
"anim",
"=",
"animDict",
".",
"get",
"(",
"animName",
")",
"if",
"anim",
"is",
"None",
":",
"# It must be a subpart that hasn't been bound yet.",
"anim",
"=",
"partDict",
"[",
"subpartDef",
".",
"truePartName",
"]",
".",
"get",
"(",
"animName",
")",
"anim",
"=",
"anim",
".",
"makeCopy",
"(",
")",
"animDict",
"[",
"animName",
"]",
"=",
"anim",
"if",
"anim",
"is",
"None",
":",
"Actor",
".",
"notify",
".",
"error",
"(",
"\"actor has no animation %s\"",
",",
"animName",
")",
"# only bind if not already bound!",
"if",
"anim",
".",
"animControl",
":",
"return",
"anim",
".",
"animControl",
"if",
"self",
".",
"mergeLODBundles",
":",
"bundle",
"=",
"self",
".",
"__commonBundleHandles",
"[",
"subpartDef",
".",
"truePartName",
"]",
".",
"getBundle",
"(",
")",
"else",
":",
"bundle",
"=",
"self",
".",
"__partBundleDict",
"[",
"lodName",
"]",
"[",
"subpartDef",
".",
"truePartName",
"]",
".",
"getBundle",
"(",
")",
"if",
"anim",
".",
"animBundle",
":",
"# We already have a bundle; just bind it.",
"animControl",
"=",
"bundle",
".",
"bindAnim",
"(",
"anim",
".",
"animBundle",
",",
"-",
"1",
",",
"subpartDef",
".",
"subset",
")",
"else",
":",
"# Load and bind the anim. This might be an asynchronous",
"# operation that will complete in the background, but if so it",
"# will still return a usable AnimControl.",
"animControl",
"=",
"bundle",
".",
"loadBindAnim",
"(",
"self",
".",
"loader",
",",
"Filename",
"(",
"anim",
".",
"filename",
")",
",",
"-",
"1",
",",
"subpartDef",
".",
"subset",
",",
"allowAsyncBind",
"and",
"self",
".",
"allowAsyncBind",
")",
"if",
"not",
"animControl",
":",
"# Couldn't bind. (This implies the binding operation was",
"# not attempted asynchronously.)",
"return",
"None",
"# store the animControl",
"anim",
".",
"animControl",
"=",
"animControl",
"assert",
"Actor",
".",
"notify",
".",
"debug",
"(",
"\"binding anim: %s to part: %s, lod: %s\"",
"%",
"(",
"animName",
",",
"partName",
",",
"lodName",
")",
")",
"return",
"animControl"
] |
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/actor/Actor.py#L2328-L2391
|
|
notepadqq/notepadqq
|
d8000f256a516aa958d50bfcc19605dc296fbeb2
|
.travis/tools/macdeployqtfix/macdeployqtfix.py
|
python
|
normalize_qtlib_name
|
(filename)
|
return qtlib, abspath, rpath
|
input: a path to a qt library, as returned by otool, that can have this form :
- an absolute path /lib/xxx/yyy
- @executable_path/../Frameworks/QtSerialPort.framework/Versions/5/QtSerialPort
output:
a tuple (qtlib, abspath, rpath) where:
- qtlib is the name of the qtlib (QtCore, QtWidgets, etc.)
- abspath is the absolute path of the qt lib inside the app bundle of exepath
- relpath is the correct rpath to a qt lib inside the app bundle
|
input: a path to a qt library, as returned by otool, that can have this form :
- an absolute path /lib/xxx/yyy
-
|
[
"input",
":",
"a",
"path",
"to",
"a",
"qt",
"library",
"as",
"returned",
"by",
"otool",
"that",
"can",
"have",
"this",
"form",
":",
"-",
"an",
"absolute",
"path",
"/",
"lib",
"/",
"xxx",
"/",
"yyy",
"-"
] |
def normalize_qtlib_name(filename):
"""
input: a path to a qt library, as returned by otool, that can have this form :
- an absolute path /lib/xxx/yyy
- @executable_path/../Frameworks/QtSerialPort.framework/Versions/5/QtSerialPort
output:
a tuple (qtlib, abspath, rpath) where:
- qtlib is the name of the qtlib (QtCore, QtWidgets, etc.)
- abspath is the absolute path of the qt lib inside the app bundle of exepath
- relpath is the correct rpath to a qt lib inside the app bundle
"""
GlobalConfig.logger.debug('normalize_qtlib_name({0})'.format(filename))
qtlib_name_rgx = re.compile(QTLIB_NAME_REGEX)
rgxret = qtlib_name_rgx.match(filename)
if not rgxret:
msg = 'couldn\'t normalize a non-qt lib filename: {0}'.format(filename)
GlobalConfig.logger.critical(msg)
raise Exception(msg)
# qtlib normalization settings
qtlib = rgxret.groups()[0]
qtversion = 5
templ = Template(QTLIB_NORMALIZED)
# from qtlib, forge 2 path :
# - absolute path of qt lib in bundle,
abspath = os.path.normpath(templ.safe_substitute(
prefix=os.path.dirname(GlobalConfig.exepath) + '/..',
qtlib=qtlib,
qtversion=qtversion))
# - and rpath containing @executable_path, relative to exepath
rpath = templ.safe_substitute(
prefix='@executable_path/..',
qtlib=qtlib,
qtversion=qtversion)
GlobalConfig.logger.debug('\treturns({0})'.format((qtlib, abspath, rpath)))
return qtlib, abspath, rpath
|
[
"def",
"normalize_qtlib_name",
"(",
"filename",
")",
":",
"GlobalConfig",
".",
"logger",
".",
"debug",
"(",
"'normalize_qtlib_name({0})'",
".",
"format",
"(",
"filename",
")",
")",
"qtlib_name_rgx",
"=",
"re",
".",
"compile",
"(",
"QTLIB_NAME_REGEX",
")",
"rgxret",
"=",
"qtlib_name_rgx",
".",
"match",
"(",
"filename",
")",
"if",
"not",
"rgxret",
":",
"msg",
"=",
"'couldn\\'t normalize a non-qt lib filename: {0}'",
".",
"format",
"(",
"filename",
")",
"GlobalConfig",
".",
"logger",
".",
"critical",
"(",
"msg",
")",
"raise",
"Exception",
"(",
"msg",
")",
"# qtlib normalization settings",
"qtlib",
"=",
"rgxret",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"qtversion",
"=",
"5",
"templ",
"=",
"Template",
"(",
"QTLIB_NORMALIZED",
")",
"# from qtlib, forge 2 path :",
"# - absolute path of qt lib in bundle,",
"abspath",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"templ",
".",
"safe_substitute",
"(",
"prefix",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"GlobalConfig",
".",
"exepath",
")",
"+",
"'/..'",
",",
"qtlib",
"=",
"qtlib",
",",
"qtversion",
"=",
"qtversion",
")",
")",
"# - and rpath containing @executable_path, relative to exepath",
"rpath",
"=",
"templ",
".",
"safe_substitute",
"(",
"prefix",
"=",
"'@executable_path/..'",
",",
"qtlib",
"=",
"qtlib",
",",
"qtversion",
"=",
"qtversion",
")",
"GlobalConfig",
".",
"logger",
".",
"debug",
"(",
"'\\treturns({0})'",
".",
"format",
"(",
"(",
"qtlib",
",",
"abspath",
",",
"rpath",
")",
")",
")",
"return",
"qtlib",
",",
"abspath",
",",
"rpath"
] |
https://github.com/notepadqq/notepadqq/blob/d8000f256a516aa958d50bfcc19605dc296fbeb2/.travis/tools/macdeployqtfix/macdeployqtfix.py#L140-L180
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/msw/html.py
|
python
|
HtmlParser.GetSource
|
(*args, **kwargs)
|
return _html.HtmlParser_GetSource(*args, **kwargs)
|
GetSource(self) -> String
|
GetSource(self) -> String
|
[
"GetSource",
"(",
"self",
")",
"-",
">",
"String"
] |
def GetSource(*args, **kwargs):
"""GetSource(self) -> String"""
return _html.HtmlParser_GetSource(*args, **kwargs)
|
[
"def",
"GetSource",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_html",
".",
"HtmlParser_GetSource",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/html.py#L225-L227
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/python/ops/distributions/dirichlet.py
|
python
|
Dirichlet.__init__
|
(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet")
|
Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
|
Initialize a batch of Dirichlet distributions.
|
[
"Initialize",
"a",
"batch",
"of",
"Dirichlet",
"distributions",
"."
] |
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
name="Dirichlet"):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration]):
self._concentration = self._maybe_assert_valid_concentration(
ops.convert_to_tensor(concentration, name="concentration"),
validate_args)
self._total_concentration = math_ops.reduce_sum(self._concentration, -1)
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._total_concentration],
name=name)
|
[
"def",
"__init__",
"(",
"self",
",",
"concentration",
",",
"validate_args",
"=",
"False",
",",
"allow_nan_stats",
"=",
"True",
",",
"name",
"=",
"\"Dirichlet\"",
")",
":",
"parameters",
"=",
"locals",
"(",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"concentration",
"]",
")",
":",
"self",
".",
"_concentration",
"=",
"self",
".",
"_maybe_assert_valid_concentration",
"(",
"ops",
".",
"convert_to_tensor",
"(",
"concentration",
",",
"name",
"=",
"\"concentration\"",
")",
",",
"validate_args",
")",
"self",
".",
"_total_concentration",
"=",
"math_ops",
".",
"reduce_sum",
"(",
"self",
".",
"_concentration",
",",
"-",
"1",
")",
"super",
"(",
"Dirichlet",
",",
"self",
")",
".",
"__init__",
"(",
"dtype",
"=",
"self",
".",
"_concentration",
".",
"dtype",
",",
"validate_args",
"=",
"validate_args",
",",
"allow_nan_stats",
"=",
"allow_nan_stats",
",",
"reparameterization_type",
"=",
"distribution",
".",
"NOT_REPARAMETERIZED",
",",
"parameters",
"=",
"parameters",
",",
"graph_parents",
"=",
"[",
"self",
".",
"_concentration",
",",
"self",
".",
"_total_concentration",
"]",
",",
"name",
"=",
"name",
")"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/distributions/dirichlet.py#L131-L169
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.