nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
list | function
stringlengths 34
151k
| function_tokens
list | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
klzgrad/naiveproxy
|
ed2c513637c77b18721fe428d7ed395b4d284c83
|
src/build/android/pylib/local/emulator/avd.py
|
python
|
_AvdManagerAgent.Create
|
(self, avd_name, system_image, force=False)
|
Call `avdmanager create`.
Args:
avd_name: name of the AVD to create.
system_image: system image to use for the AVD.
force: whether to force creation, overwriting any existing
AVD with the same name.
|
Call `avdmanager create`.
|
[
"Call",
"avdmanager",
"create",
"."
] |
def Create(self, avd_name, system_image, force=False):
"""Call `avdmanager create`.
Args:
avd_name: name of the AVD to create.
system_image: system image to use for the AVD.
force: whether to force creation, overwriting any existing
AVD with the same name.
"""
create_cmd = [
_DEFAULT_AVDMANAGER_PATH,
'-v',
'create',
'avd',
'-n',
avd_name,
'-k',
system_image,
]
if force:
create_cmd += ['--force']
create_proc = cmd_helper.Popen(
create_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self._env)
output, error = create_proc.communicate(input='\n')
if create_proc.returncode != 0:
raise AvdException(
'AVD creation failed',
command=create_cmd,
stdout=output,
stderr=error)
for line in output.splitlines():
logging.info(' %s', line)
|
[
"def",
"Create",
"(",
"self",
",",
"avd_name",
",",
"system_image",
",",
"force",
"=",
"False",
")",
":",
"create_cmd",
"=",
"[",
"_DEFAULT_AVDMANAGER_PATH",
",",
"'-v'",
",",
"'create'",
",",
"'avd'",
",",
"'-n'",
",",
"avd_name",
",",
"'-k'",
",",
"system_image",
",",
"]",
"if",
"force",
":",
"create_cmd",
"+=",
"[",
"'--force'",
"]",
"create_proc",
"=",
"cmd_helper",
".",
"Popen",
"(",
"create_cmd",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
",",
"env",
"=",
"self",
".",
"_env",
")",
"output",
",",
"error",
"=",
"create_proc",
".",
"communicate",
"(",
"input",
"=",
"'\\n'",
")",
"if",
"create_proc",
".",
"returncode",
"!=",
"0",
":",
"raise",
"AvdException",
"(",
"'AVD creation failed'",
",",
"command",
"=",
"create_cmd",
",",
"stdout",
"=",
"output",
",",
"stderr",
"=",
"error",
")",
"for",
"line",
"in",
"output",
".",
"splitlines",
"(",
")",
":",
"logging",
".",
"info",
"(",
"' %s'",
",",
"line",
")"
] |
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/pylib/local/emulator/avd.py#L102-L139
|
||
cmu-db/noisepage
|
79276e68fe83322f1249e8a8be96bd63c583ae56
|
build-support/cpplint.py
|
python
|
RemoveMultiLineComments
|
(filename, lines, error)
|
Removes multiline (c-style) comments from lines.
|
Removes multiline (c-style) comments from lines.
|
[
"Removes",
"multiline",
"(",
"c",
"-",
"style",
")",
"comments",
"from",
"lines",
"."
] |
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
|
[
"def",
"RemoveMultiLineComments",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"lineix",
"=",
"0",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"lineix_begin",
"=",
"FindNextMultiLineCommentStart",
"(",
"lines",
",",
"lineix",
")",
"if",
"lineix_begin",
">=",
"len",
"(",
"lines",
")",
":",
"return",
"lineix_end",
"=",
"FindNextMultiLineCommentEnd",
"(",
"lines",
",",
"lineix_begin",
")",
"if",
"lineix_end",
">=",
"len",
"(",
"lines",
")",
":",
"error",
"(",
"filename",
",",
"lineix_begin",
"+",
"1",
",",
"'readability/multiline_comment'",
",",
"5",
",",
"'Could not find end of multi-line comment'",
")",
"return",
"RemoveMultiLineCommentsFromRange",
"(",
"lines",
",",
"lineix_begin",
",",
"lineix_end",
"+",
"1",
")",
"lineix",
"=",
"lineix_end",
"+",
"1"
] |
https://github.com/cmu-db/noisepage/blob/79276e68fe83322f1249e8a8be96bd63c583ae56/build-support/cpplint.py#L1617-L1630
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_controls.py
|
python
|
TreeCtrl.ItemHasChildren
|
(*args, **kwargs)
|
return _controls_.TreeCtrl_ItemHasChildren(*args, **kwargs)
|
ItemHasChildren(self, TreeItemId item) -> bool
|
ItemHasChildren(self, TreeItemId item) -> bool
|
[
"ItemHasChildren",
"(",
"self",
"TreeItemId",
"item",
")",
"-",
">",
"bool"
] |
def ItemHasChildren(*args, **kwargs):
"""ItemHasChildren(self, TreeItemId item) -> bool"""
return _controls_.TreeCtrl_ItemHasChildren(*args, **kwargs)
|
[
"def",
"ItemHasChildren",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TreeCtrl_ItemHasChildren",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_controls.py#L5335-L5337
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/_core.py
|
python
|
Sizer.Clear
|
(*args, **kwargs)
|
return _core_.Sizer_Clear(*args, **kwargs)
|
Clear(self, bool deleteWindows=False)
Clear all items from the sizer, optionally destroying the window items
as well.
|
Clear(self, bool deleteWindows=False)
|
[
"Clear",
"(",
"self",
"bool",
"deleteWindows",
"=",
"False",
")"
] |
def Clear(*args, **kwargs):
"""
Clear(self, bool deleteWindows=False)
Clear all items from the sizer, optionally destroying the window items
as well.
"""
return _core_.Sizer_Clear(*args, **kwargs)
|
[
"def",
"Clear",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"Sizer_Clear",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L14930-L14937
|
|
hughperkins/tf-coriander
|
970d3df6c11400ad68405f22b0c42a52374e94ca
|
tensorflow/python/training/supervisor.py
|
python
|
Supervisor._init_summary_op
|
(self, summary_op=USE_DEFAULT)
|
Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
|
Initializes summary_op.
|
[
"Initializes",
"summary_op",
"."
] |
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initializes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = logging_ops.merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
|
[
"def",
"_init_summary_op",
"(",
"self",
",",
"summary_op",
"=",
"USE_DEFAULT",
")",
":",
"if",
"summary_op",
"is",
"Supervisor",
".",
"USE_DEFAULT",
":",
"summary_op",
"=",
"self",
".",
"_get_first_op_from_collection",
"(",
"ops",
".",
"GraphKeys",
".",
"SUMMARY_OP",
")",
"if",
"summary_op",
"is",
"None",
":",
"summary_op",
"=",
"logging_ops",
".",
"merge_all_summaries",
"(",
")",
"if",
"summary_op",
"is",
"not",
"None",
":",
"ops",
".",
"add_to_collection",
"(",
"ops",
".",
"GraphKeys",
".",
"SUMMARY_OP",
",",
"summary_op",
")",
"self",
".",
"_summary_op",
"=",
"summary_op"
] |
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/training/supervisor.py#L464-L477
|
||
NVIDIA/TensorRT
|
42805f078052daad1a98bc5965974fcffaad0960
|
demo/BERT/inference.py
|
python
|
parse_args
|
()
|
return args
|
Parse command line arguments
|
Parse command line arguments
|
[
"Parse",
"command",
"line",
"arguments"
] |
def parse_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-e', '--engine',
help='Path to BERT TensorRT engine')
parser.add_argument("-b", "--batch-size", default=1, help="Batch size for inference.", type=int)
parser.add_argument('-p', '--passage', nargs='*',
help='Text for paragraph/passage for BERT QA',
default='')
parser.add_argument('-pf', '--passage-file',
help='File containing input passage',
default='')
parser.add_argument('-q', '--question', nargs='*',
help='Text for query/question for BERT QA',
default='')
parser.add_argument('-qf', '--question-file',
help='File containing input question',
default='')
parser.add_argument('-sq', '--squad-json',
help='SQuAD json file',
default='')
parser.add_argument('-o', '--output-prediction-file',
help='Output prediction file for SQuAD evaluation',
default='./predictions.json')
parser.add_argument('-v', '--vocab-file',
help='Path to file containing entire understandable vocab')
parser.add_argument('-s', '--sequence-length',
help='The sequence length to use. Defaults to 128',
default=128, type=int)
parser.add_argument('--max-query-length',
help='The maximum length of a query in number of tokens. Queries longer than this will be truncated',
default=64, type=int)
parser.add_argument('--max-answer-length',
help='The maximum length of an answer that can be generated',
default=30, type=int)
parser.add_argument('--n-best-size',
help='Total number of n-best predictions to generate in the nbest_predictions.json output file',
default=20, type=int)
parser.add_argument('--doc-stride',
help='When splitting up a long document into chunks, what stride to take between chunks',
default=128, type=int)
args, _ = parser.parse_known_args()
return args
|
[
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"__doc__",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--engine'",
",",
"help",
"=",
"'Path to BERT TensorRT engine'",
")",
"parser",
".",
"add_argument",
"(",
"\"-b\"",
",",
"\"--batch-size\"",
",",
"default",
"=",
"1",
",",
"help",
"=",
"\"Batch size for inference.\"",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--passage'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"'Text for paragraph/passage for BERT QA'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-pf'",
",",
"'--passage-file'",
",",
"help",
"=",
"'File containing input passage'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-q'",
",",
"'--question'",
",",
"nargs",
"=",
"'*'",
",",
"help",
"=",
"'Text for query/question for BERT QA'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-qf'",
",",
"'--question-file'",
",",
"help",
"=",
"'File containing input question'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-sq'",
",",
"'--squad-json'",
",",
"help",
"=",
"'SQuAD json file'",
",",
"default",
"=",
"''",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output-prediction-file'",
",",
"help",
"=",
"'Output prediction file for SQuAD evaluation'",
",",
"default",
"=",
"'./predictions.json'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--vocab-file'",
",",
"help",
"=",
"'Path to file containing entire understandable vocab'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--sequence-length'",
",",
"help",
"=",
"'The sequence length to use. Defaults to 128'",
",",
"default",
"=",
"128",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--max-query-length'",
",",
"help",
"=",
"'The maximum length of a query in number of tokens. Queries longer than this will be truncated'",
",",
"default",
"=",
"64",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--max-answer-length'",
",",
"help",
"=",
"'The maximum length of an answer that can be generated'",
",",
"default",
"=",
"30",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--n-best-size'",
",",
"help",
"=",
"'Total number of n-best predictions to generate in the nbest_predictions.json output file'",
",",
"default",
"=",
"20",
",",
"type",
"=",
"int",
")",
"parser",
".",
"add_argument",
"(",
"'--doc-stride'",
",",
"help",
"=",
"'When splitting up a long document into chunks, what stride to take between chunks'",
",",
"default",
"=",
"128",
",",
"type",
"=",
"int",
")",
"args",
",",
"_",
"=",
"parser",
".",
"parse_known_args",
"(",
")",
"return",
"args"
] |
https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/demo/BERT/inference.py#L39-L83
|
|
gnina/gnina
|
b9ae032f52fc7a8153987bde09c0efa3620d8bb6
|
caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py
|
python
|
PascalMultilabelDataLayerSync.backward
|
(self, top, propagate_down, bottom)
|
These layers does not back propagate
|
These layers does not back propagate
|
[
"These",
"layers",
"does",
"not",
"back",
"propagate"
] |
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
|
[
"def",
"backward",
"(",
"self",
",",
"top",
",",
"propagate_down",
",",
"bottom",
")",
":",
"pass"
] |
https://github.com/gnina/gnina/blob/b9ae032f52fc7a8153987bde09c0efa3620d8bb6/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py#L74-L78
|
||
yue/yue
|
619d62c191b13c51c01be451dc48917c34a5aefc
|
building/tools/cpplint.py
|
python
|
_CppLintState.IncrementErrorCount
|
(self, category)
|
Bumps the module's error statistic.
|
Bumps the module's error statistic.
|
[
"Bumps",
"the",
"module",
"s",
"error",
"statistic",
"."
] |
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
|
[
"def",
"IncrementErrorCount",
"(",
"self",
",",
"category",
")",
":",
"self",
".",
"error_count",
"+=",
"1",
"if",
"self",
".",
"counting",
"in",
"(",
"'toplevel'",
",",
"'detailed'",
")",
":",
"if",
"self",
".",
"counting",
"!=",
"'detailed'",
":",
"category",
"=",
"category",
".",
"split",
"(",
"'/'",
")",
"[",
"0",
"]",
"if",
"category",
"not",
"in",
"self",
".",
"errors_by_category",
":",
"self",
".",
"errors_by_category",
"[",
"category",
"]",
"=",
"0",
"self",
".",
"errors_by_category",
"[",
"category",
"]",
"+=",
"1"
] |
https://github.com/yue/yue/blob/619d62c191b13c51c01be451dc48917c34a5aefc/building/tools/cpplint.py#L937-L945
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_carbon/propgrid.py
|
python
|
PGMultiButton.GetPrimarySize
|
(*args, **kwargs)
|
return _propgrid.PGMultiButton_GetPrimarySize(*args, **kwargs)
|
GetPrimarySize(self) -> Size
|
GetPrimarySize(self) -> Size
|
[
"GetPrimarySize",
"(",
"self",
")",
"-",
">",
"Size"
] |
def GetPrimarySize(*args, **kwargs):
"""GetPrimarySize(self) -> Size"""
return _propgrid.PGMultiButton_GetPrimarySize(*args, **kwargs)
|
[
"def",
"GetPrimarySize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PGMultiButton_GetPrimarySize",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/propgrid.py#L2843-L2845
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py3/scipy/spatial/distance.py
|
python
|
euclidean
|
(u, v, w=None)
|
return minkowski(u, v, p=2, w=w)
|
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.euclidean([1, 0, 0], [0, 1, 0])
1.4142135623730951
>>> distance.euclidean([1, 1, 0], [0, 1, 0])
1.0
|
Computes the Euclidean distance between two 1-D arrays.
|
[
"Computes",
"the",
"Euclidean",
"distance",
"between",
"two",
"1",
"-",
"D",
"arrays",
"."
] |
def euclidean(u, v, w=None):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.euclidean([1, 0, 0], [0, 1, 0])
1.4142135623730951
>>> distance.euclidean([1, 1, 0], [0, 1, 0])
1.0
"""
return minkowski(u, v, p=2, w=w)
|
[
"def",
"euclidean",
"(",
"u",
",",
"v",
",",
"w",
"=",
"None",
")",
":",
"return",
"minkowski",
"(",
"u",
",",
"v",
",",
"p",
"=",
"2",
",",
"w",
"=",
"w",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/spatial/distance.py#L566-L602
|
|
kushview/Element
|
1cc16380caa2ab79461246ba758b9de1f46db2a5
|
waflib/extras/fc_xlf.py
|
python
|
get_xlf_version
|
(conf, fc)
|
Get the compiler version
|
Get the compiler version
|
[
"Get",
"the",
"compiler",
"version"
] |
def get_xlf_version(conf, fc):
"""Get the compiler version"""
cmd = fc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlf %r' % cmd)
for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLF version.')
|
[
"def",
"get_xlf_version",
"(",
"conf",
",",
"fc",
")",
":",
"cmd",
"=",
"fc",
"+",
"[",
"'-qversion'",
"]",
"try",
":",
"out",
",",
"err",
"=",
"conf",
".",
"cmd_and_log",
"(",
"cmd",
",",
"output",
"=",
"0",
")",
"except",
"Errors",
".",
"WafError",
":",
"conf",
".",
"fatal",
"(",
"'Could not find xlf %r'",
"%",
"cmd",
")",
"for",
"v",
"in",
"(",
"r\"IBM XL Fortran.* V(?P<major>\\d*)\\.(?P<minor>\\d*)\"",
",",
")",
":",
"version_re",
"=",
"re",
".",
"compile",
"(",
"v",
",",
"re",
".",
"I",
")",
".",
"search",
"match",
"=",
"version_re",
"(",
"out",
"or",
"err",
")",
"if",
"match",
":",
"k",
"=",
"match",
".",
"groupdict",
"(",
")",
"conf",
".",
"env",
"[",
"'FC_VERSION'",
"]",
"=",
"(",
"k",
"[",
"'major'",
"]",
",",
"k",
"[",
"'minor'",
"]",
")",
"break",
"else",
":",
"conf",
".",
"fatal",
"(",
"'Could not determine the XLF version.'",
")"
] |
https://github.com/kushview/Element/blob/1cc16380caa2ab79461246ba758b9de1f46db2a5/waflib/extras/fc_xlf.py#L37-L54
|
||
BVLC/caffe
|
9b891540183ddc834a02b2bd81b31afae71b2153
|
python/caffe/draw.py
|
python
|
get_edge_label
|
(layer)
|
return edge_label
|
Define edge label based on layer type.
|
Define edge label based on layer type.
|
[
"Define",
"edge",
"label",
"based",
"on",
"layer",
"type",
"."
] |
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
|
[
"def",
"get_edge_label",
"(",
"layer",
")",
":",
"if",
"layer",
".",
"type",
"==",
"'Data'",
":",
"edge_label",
"=",
"'Batch '",
"+",
"str",
"(",
"layer",
".",
"data_param",
".",
"batch_size",
")",
"elif",
"layer",
".",
"type",
"==",
"'Convolution'",
"or",
"layer",
".",
"type",
"==",
"'Deconvolution'",
":",
"edge_label",
"=",
"str",
"(",
"layer",
".",
"convolution_param",
".",
"num_output",
")",
"elif",
"layer",
".",
"type",
"==",
"'InnerProduct'",
":",
"edge_label",
"=",
"str",
"(",
"layer",
".",
"inner_product_param",
".",
"num_output",
")",
"else",
":",
"edge_label",
"=",
"'\"\"'",
"return",
"edge_label"
] |
https://github.com/BVLC/caffe/blob/9b891540183ddc834a02b2bd81b31afae71b2153/python/caffe/draw.py#L46-L59
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/contrib/losses/python/losses/loss_ops.py
|
python
|
_scale_losses
|
(losses, weights)
|
return math_ops.reduce_sum(reduced_losses)
|
Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
|
Computes the scaled loss.
|
[
"Computes",
"the",
"scaled",
"loss",
"."
] |
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
|
[
"def",
"_scale_losses",
"(",
"losses",
",",
"weights",
")",
":",
"# First, compute the sum of the losses over all elements:",
"start_index",
"=",
"max",
"(",
"0",
",",
"weights",
".",
"get_shape",
"(",
")",
".",
"ndims",
")",
"reduction_indices",
"=",
"list",
"(",
"range",
"(",
"start_index",
",",
"losses",
".",
"get_shape",
"(",
")",
".",
"ndims",
")",
")",
"reduced_losses",
"=",
"math_ops",
".",
"reduce_sum",
"(",
"losses",
",",
"reduction_indices",
"=",
"reduction_indices",
")",
"reduced_losses",
"=",
"math_ops",
".",
"multiply",
"(",
"reduced_losses",
",",
"weights",
")",
"return",
"math_ops",
".",
"reduce_sum",
"(",
"reduced_losses",
")"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/losses/python/losses/loss_ops.py#L49-L72
|
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Source/ThirdParty/CEF3/pristine/cef_source/tools/cef_parser.py
|
python
|
obj_argument.remove_name
|
(self)
|
return name
|
Remove and return the name value.
|
Remove and return the name value.
|
[
"Remove",
"and",
"return",
"the",
"name",
"value",
"."
] |
def remove_name(self):
""" Remove and return the name value. """
name = self.type.get_name()
self.type.name = None
return name
|
[
"def",
"remove_name",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"type",
".",
"get_name",
"(",
")",
"self",
".",
"type",
".",
"name",
"=",
"None",
"return",
"name"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/pristine/cef_source/tools/cef_parser.py#L1315-L1319
|
|
ApolloAuto/apollo-platform
|
86d9dc6743b496ead18d597748ebabd34a513289
|
ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/distutils/system_info.py
|
python
|
fftw_info.calc_ver_info
|
(self, ver_param)
|
Returns True on successful version detection, else False
|
Returns True on successful version detection, else False
|
[
"Returns",
"True",
"on",
"successful",
"version",
"detection",
"else",
"False"
] |
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
|
[
"def",
"calc_ver_info",
"(",
"self",
",",
"ver_param",
")",
":",
"lib_dirs",
"=",
"self",
".",
"get_lib_dirs",
"(",
")",
"incl_dirs",
"=",
"self",
".",
"get_include_dirs",
"(",
")",
"incl_dir",
"=",
"None",
"libs",
"=",
"self",
".",
"get_libs",
"(",
"self",
".",
"section",
"+",
"'_libs'",
",",
"ver_param",
"[",
"'libs'",
"]",
")",
"info",
"=",
"self",
".",
"check_libs",
"(",
"lib_dirs",
",",
"libs",
")",
"if",
"info",
"is",
"not",
"None",
":",
"flag",
"=",
"0",
"for",
"d",
"in",
"incl_dirs",
":",
"if",
"len",
"(",
"self",
".",
"combine_paths",
"(",
"d",
",",
"ver_param",
"[",
"'includes'",
"]",
")",
")",
"==",
"len",
"(",
"ver_param",
"[",
"'includes'",
"]",
")",
":",
"dict_append",
"(",
"info",
",",
"include_dirs",
"=",
"[",
"d",
"]",
")",
"flag",
"=",
"1",
"incl_dirs",
"=",
"[",
"d",
"]",
"break",
"if",
"flag",
":",
"dict_append",
"(",
"info",
",",
"define_macros",
"=",
"ver_param",
"[",
"'macros'",
"]",
")",
"else",
":",
"info",
"=",
"None",
"if",
"info",
"is",
"not",
"None",
":",
"self",
".",
"set_info",
"(",
"*",
"*",
"info",
")",
"return",
"True",
"else",
":",
"log",
".",
"info",
"(",
"' %s not found'",
"%",
"(",
"ver_param",
"[",
"'name'",
"]",
")",
")",
"return",
"False"
] |
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/distutils/system_info.py#L759-L784
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/_windows.py
|
python
|
StatusBar.GetBorderY
|
(*args, **kwargs)
|
return _windows_.StatusBar_GetBorderY(*args, **kwargs)
|
GetBorderY(self) -> int
|
GetBorderY(self) -> int
|
[
"GetBorderY",
"(",
"self",
")",
"-",
">",
"int"
] |
def GetBorderY(*args, **kwargs):
"""GetBorderY(self) -> int"""
return _windows_.StatusBar_GetBorderY(*args, **kwargs)
|
[
"def",
"GetBorderY",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"StatusBar_GetBorderY",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L1291-L1293
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/dtypes/common.py
|
python
|
is_object_dtype
|
(arr_or_dtype)
|
return _is_dtype_type(arr_or_dtype, classes(np.object_))
|
Check whether an array-like or dtype is of the object dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the object dtype.
Examples
--------
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
|
Check whether an array-like or dtype is of the object dtype.
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"or",
"dtype",
"is",
"of",
"the",
"object",
"dtype",
"."
] |
def is_object_dtype(arr_or_dtype) -> bool:
"""
Check whether an array-like or dtype is of the object dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean
Whether or not the array-like or dtype is of the object dtype.
Examples
--------
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
"""
return _is_dtype_type(arr_or_dtype, classes(np.object_))
|
[
"def",
"is_object_dtype",
"(",
"arr_or_dtype",
")",
"->",
"bool",
":",
"return",
"_is_dtype_type",
"(",
"arr_or_dtype",
",",
"classes",
"(",
"np",
".",
"object_",
")",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/dtypes/common.py#L222-L249
|
|
mongodb/mongo
|
d8ff665343ad29cf286ee2cf4a1960d29371937b
|
src/third_party/scons-3.1.2/scons-time.py
|
python
|
SConsTimer.profile_name
|
(self, invocation)
|
return os.path.join(self.outdir, name)
|
Returns the absolute path of a profile file for the specified
invocation number.
|
Returns the absolute path of a profile file for the specified
invocation number.
|
[
"Returns",
"the",
"absolute",
"path",
"of",
"a",
"profile",
"file",
"for",
"the",
"specified",
"invocation",
"number",
"."
] |
def profile_name(self, invocation):
"""
Returns the absolute path of a profile file for the specified
invocation number.
"""
name = self.prefix_run + '-%d.prof' % invocation
return os.path.join(self.outdir, name)
|
[
"def",
"profile_name",
"(",
"self",
",",
"invocation",
")",
":",
"name",
"=",
"self",
".",
"prefix_run",
"+",
"'-%d.prof'",
"%",
"invocation",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"outdir",
",",
"name",
")"
] |
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/src/third_party/scons-3.1.2/scons-time.py#L602-L608
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python3/src/Lib/nntplib.py
|
python
|
NNTP._putcmd
|
(self, line)
|
Internal: send one command to the server (through _putline()).
The `line` must be a unicode string.
|
Internal: send one command to the server (through _putline()).
The `line` must be a unicode string.
|
[
"Internal",
":",
"send",
"one",
"command",
"to",
"the",
"server",
"(",
"through",
"_putline",
"()",
")",
".",
"The",
"line",
"must",
"be",
"a",
"unicode",
"string",
"."
] |
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be a unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
|
[
"def",
"_putcmd",
"(",
"self",
",",
"line",
")",
":",
"if",
"self",
".",
"debugging",
":",
"print",
"(",
"'*cmd*'",
",",
"repr",
"(",
"line",
")",
")",
"line",
"=",
"line",
".",
"encode",
"(",
"self",
".",
"encoding",
",",
"self",
".",
"errors",
")",
"self",
".",
"_putline",
"(",
"line",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/nntplib.py#L450-L455
|
||
psi4/psi4
|
be533f7f426b6ccc263904e55122899b16663395
|
psi4/driver/qcdb/libmintsmolecule.py
|
python
|
compute_atom_map
|
(mol, tol=0.05)
|
return atom_map
|
Computes atom mappings during symmetry operations. Useful in
generating SO information and Cartesian displacement SALCs.
param mol Molecule to form mapping matrix from.
returns Integer matrix of dimension natoms X nirreps.
|
Computes atom mappings during symmetry operations. Useful in
generating SO information and Cartesian displacement SALCs.
param mol Molecule to form mapping matrix from.
returns Integer matrix of dimension natoms X nirreps.
|
[
"Computes",
"atom",
"mappings",
"during",
"symmetry",
"operations",
".",
"Useful",
"in",
"generating",
"SO",
"information",
"and",
"Cartesian",
"displacement",
"SALCs",
".",
"param",
"mol",
"Molecule",
"to",
"form",
"mapping",
"matrix",
"from",
".",
"returns",
"Integer",
"matrix",
"of",
"dimension",
"natoms",
"X",
"nirreps",
"."
] |
def compute_atom_map(mol, tol=0.05):
"""Computes atom mappings during symmetry operations. Useful in
generating SO information and Cartesian displacement SALCs.
param mol Molecule to form mapping matrix from.
returns Integer matrix of dimension natoms X nirreps.
"""
# create the character table for the point group
ct = mol.point_group().char_table()
natom = mol.natom()
ng = ct.order()
atom_map = [0] * natom
for i in range(natom):
atom_map[i] = [0] * ng
np3 = [0.0, 0.0, 0.0]
so = SymmetryOperation()
# loop over all centers
for i in range(natom):
ac = mol.xyz(i)
# then for each symop in the pointgroup, transform the coordinates of
# center "i" and see which atom it maps into
for g in range(ng):
so = ct.symm_operation(g)
for ii in range(3):
np3[ii] = 0
for jj in range(3):
np3[ii] += so[ii][jj] * ac[jj]
atom_map[i][g] = mol.atom_at_position(np3, tol)
if atom_map[i][g] < 0:
print(""" Molecule:\n""")
mol.print_out()
print(""" attempted to find atom at\n""")
print(""" %lf %lf %lf\n""" % (np3[0], np3[1], np3[2]))
raise ValidationError("ERROR: Symmetry operation %d did not map atom %d to another atom:\n" % (g, i + 1))
return atom_map
|
[
"def",
"compute_atom_map",
"(",
"mol",
",",
"tol",
"=",
"0.05",
")",
":",
"# create the character table for the point group",
"ct",
"=",
"mol",
".",
"point_group",
"(",
")",
".",
"char_table",
"(",
")",
"natom",
"=",
"mol",
".",
"natom",
"(",
")",
"ng",
"=",
"ct",
".",
"order",
"(",
")",
"atom_map",
"=",
"[",
"0",
"]",
"*",
"natom",
"for",
"i",
"in",
"range",
"(",
"natom",
")",
":",
"atom_map",
"[",
"i",
"]",
"=",
"[",
"0",
"]",
"*",
"ng",
"np3",
"=",
"[",
"0.0",
",",
"0.0",
",",
"0.0",
"]",
"so",
"=",
"SymmetryOperation",
"(",
")",
"# loop over all centers",
"for",
"i",
"in",
"range",
"(",
"natom",
")",
":",
"ac",
"=",
"mol",
".",
"xyz",
"(",
"i",
")",
"# then for each symop in the pointgroup, transform the coordinates of",
"# center \"i\" and see which atom it maps into",
"for",
"g",
"in",
"range",
"(",
"ng",
")",
":",
"so",
"=",
"ct",
".",
"symm_operation",
"(",
"g",
")",
"for",
"ii",
"in",
"range",
"(",
"3",
")",
":",
"np3",
"[",
"ii",
"]",
"=",
"0",
"for",
"jj",
"in",
"range",
"(",
"3",
")",
":",
"np3",
"[",
"ii",
"]",
"+=",
"so",
"[",
"ii",
"]",
"[",
"jj",
"]",
"*",
"ac",
"[",
"jj",
"]",
"atom_map",
"[",
"i",
"]",
"[",
"g",
"]",
"=",
"mol",
".",
"atom_at_position",
"(",
"np3",
",",
"tol",
")",
"if",
"atom_map",
"[",
"i",
"]",
"[",
"g",
"]",
"<",
"0",
":",
"print",
"(",
"\"\"\" Molecule:\\n\"\"\"",
")",
"mol",
".",
"print_out",
"(",
")",
"print",
"(",
"\"\"\" attempted to find atom at\\n\"\"\"",
")",
"print",
"(",
"\"\"\" %lf %lf %lf\\n\"\"\"",
"%",
"(",
"np3",
"[",
"0",
"]",
",",
"np3",
"[",
"1",
"]",
",",
"np3",
"[",
"2",
"]",
")",
")",
"raise",
"ValidationError",
"(",
"\"ERROR: Symmetry operation %d did not map atom %d to another atom:\\n\"",
"%",
"(",
"g",
",",
"i",
"+",
"1",
")",
")",
"return",
"atom_map"
] |
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/libmintsmolecule.py#L3249-L3289
|
|
mavlink/mavros
|
a32232d57a5e91abf6737e454d4199cae29b369c
|
mavros/mavros/cmd/ftp.py
|
python
|
reset
|
(client)
|
Reset ftp server.
|
Reset ftp server.
|
[
"Reset",
"ftp",
"server",
"."
] |
def reset(client):
"""Reset ftp server."""
client.ftp.reset_server()
|
[
"def",
"reset",
"(",
"client",
")",
":",
"client",
".",
"ftp",
".",
"reset_server",
"(",
")"
] |
https://github.com/mavlink/mavros/blob/a32232d57a5e91abf6737e454d4199cae29b369c/mavros/mavros/cmd/ftp.py#L147-L149
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python/src/Lib/lib-tk/Tix.py
|
python
|
Grid.anchor_get
|
(self)
|
return self._getints(self.tk.call(self, 'anchor', 'get'))
|
Get the (x,y) coordinate of the current anchor cell
|
Get the (x,y) coordinate of the current anchor cell
|
[
"Get",
"the",
"(",
"x",
"y",
")",
"coordinate",
"of",
"the",
"current",
"anchor",
"cell"
] |
def anchor_get(self):
"Get the (x,y) coordinate of the current anchor cell"
return self._getints(self.tk.call(self, 'anchor', 'get'))
|
[
"def",
"anchor_get",
"(",
"self",
")",
":",
"return",
"self",
".",
"_getints",
"(",
"self",
".",
"tk",
".",
"call",
"(",
"self",
",",
"'anchor'",
",",
"'get'",
")",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/Tix.py#L1805-L1807
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/contrib/bayesflow/python/ops/hmc_impl.py
|
python
|
chain
|
(n_iterations, step_size, n_leapfrog_steps, initial_x,
target_log_prob_fn, event_dims=(), name=None)
|
Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)
algorithm that takes a series of gradient-informed steps to produce
a Metropolis proposal. This function samples from an HMC Markov
chain whose initial state is `initial_x` and whose stationary
distribution has log-density `target_log_prob_fn()`.
This function can update multiple chains in parallel. It assumes
that all dimensions of `initial_x` not specified in `event_dims` are
independent, and should therefore be updated independently. The
output of `target_log_prob_fn()` should sum log-probabilities across
all event dimensions. Slices along dimensions not in `event_dims`
may have different target distributions; this is up to
`target_log_prob_fn()`.
This function basically just wraps `hmc.kernel()` in a tf.scan() loop.
Args:
n_iterations: Integer number of Markov chain updates to run.
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`initial_x`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely.
When possible, it's often helpful to match per-variable step
sizes to the standard deviations of the target distribution in
each variable.
n_leapfrog_steps: Integer number of steps to run the leapfrog
integrator for. Total progress per HMC step is roughly
proportional to step_size * n_leapfrog_steps.
initial_x: Tensor of initial state(s) of the Markov chain(s).
target_log_prob_fn: Python callable which takes an argument like `initial_x`
and returns its (possibly unnormalized) log-density under the target
distribution.
event_dims: List of dimensions that should not be treated as
independent. This allows for multiple chains to be run independently
in parallel. Default is (), i.e., all dimensions are independent.
name: Python `str` name prefixed to Ops created by this function.
Returns:
acceptance_probs: Tensor with the acceptance probabilities for each
iteration. Has shape matching `target_log_prob_fn(initial_x)`.
chain_states: Tensor with the state of the Markov chain at each iteration.
Has shape `[n_iterations, initial_x.shape[0],...,initial_x.shape[-1]`.
#### Examples:
```python
# Sampling from a standard normal (note `log_joint()` is unnormalized):
def log_joint(x):
return tf.reduce_sum(-0.5 * tf.square(x))
chain, acceptance_probs = hmc.chain(1000, 0.5, 2, tf.zeros(10), log_joint,
event_dims=[0])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from a diagonal-variance Gaussian:
variances = tf.linspace(1., 3., 10)
def log_joint(x):
return tf.reduce_sum(-0.5 / variances * tf.square(x))
chain, acceptance_probs = hmc.chain(1000, 0.5, 2, tf.zeros(10), log_joint,
event_dims=[0])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from factor-analysis posteriors with known factors W:
# mu[i, j] ~ Normal(0, 1)
# x[i] ~ Normal(matmul(mu[i], W), I)
def log_joint(mu, x, W):
prior = -0.5 * tf.reduce_sum(tf.square(mu), 1)
x_mean = tf.matmul(mu, W)
likelihood = -0.5 * tf.reduce_sum(tf.square(x - x_mean), 1)
return prior + likelihood
chain, acceptance_probs = hmc.chain(1000, 0.1, 2,
tf.zeros([x.shape[0], W.shape[0]]),
lambda mu: log_joint(mu, x, W),
event_dims=[1])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from the posterior of a Bayesian regression model.:
# Run 100 chains in parallel, each with a different initialization.
initial_beta = tf.random_normal([100, x.shape[1]])
chain, acceptance_probs = hmc.chain(1000, 0.1, 10, initial_beta,
log_joint_partial, event_dims=[1])
# Discard first halves of chains as warmup/burn-in
warmed_up = chain[500:]
# Averaging across samples within a chain and across chains
mean_est = tf.reduce_mean(warmed_up, [0, 1])
var_est = tf.reduce_mean(tf.square(warmed_up), [0, 1]) - tf.square(mean_est)
```
|
Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.
|
[
"Runs",
"multiple",
"iterations",
"of",
"one",
"or",
"more",
"Hamiltonian",
"Monte",
"Carlo",
"chains",
"."
] |
def chain(n_iterations, step_size, n_leapfrog_steps, initial_x,
target_log_prob_fn, event_dims=(), name=None):
"""Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)
algorithm that takes a series of gradient-informed steps to produce
a Metropolis proposal. This function samples from an HMC Markov
chain whose initial state is `initial_x` and whose stationary
distribution has log-density `target_log_prob_fn()`.
This function can update multiple chains in parallel. It assumes
that all dimensions of `initial_x` not specified in `event_dims` are
independent, and should therefore be updated independently. The
output of `target_log_prob_fn()` should sum log-probabilities across
all event dimensions. Slices along dimensions not in `event_dims`
may have different target distributions; this is up to
`target_log_prob_fn()`.
This function basically just wraps `hmc.kernel()` in a tf.scan() loop.
Args:
n_iterations: Integer number of Markov chain updates to run.
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`initial_x`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely.
When possible, it's often helpful to match per-variable step
sizes to the standard deviations of the target distribution in
each variable.
n_leapfrog_steps: Integer number of steps to run the leapfrog
integrator for. Total progress per HMC step is roughly
proportional to step_size * n_leapfrog_steps.
initial_x: Tensor of initial state(s) of the Markov chain(s).
target_log_prob_fn: Python callable which takes an argument like `initial_x`
and returns its (possibly unnormalized) log-density under the target
distribution.
event_dims: List of dimensions that should not be treated as
independent. This allows for multiple chains to be run independently
in parallel. Default is (), i.e., all dimensions are independent.
name: Python `str` name prefixed to Ops created by this function.
Returns:
acceptance_probs: Tensor with the acceptance probabilities for each
iteration. Has shape matching `target_log_prob_fn(initial_x)`.
chain_states: Tensor with the state of the Markov chain at each iteration.
Has shape `[n_iterations, initial_x.shape[0],...,initial_x.shape[-1]`.
#### Examples:
```python
# Sampling from a standard normal (note `log_joint()` is unnormalized):
def log_joint(x):
return tf.reduce_sum(-0.5 * tf.square(x))
chain, acceptance_probs = hmc.chain(1000, 0.5, 2, tf.zeros(10), log_joint,
event_dims=[0])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from a diagonal-variance Gaussian:
variances = tf.linspace(1., 3., 10)
def log_joint(x):
return tf.reduce_sum(-0.5 / variances * tf.square(x))
chain, acceptance_probs = hmc.chain(1000, 0.5, 2, tf.zeros(10), log_joint,
event_dims=[0])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from factor-analysis posteriors with known factors W:
# mu[i, j] ~ Normal(0, 1)
# x[i] ~ Normal(matmul(mu[i], W), I)
def log_joint(mu, x, W):
prior = -0.5 * tf.reduce_sum(tf.square(mu), 1)
x_mean = tf.matmul(mu, W)
likelihood = -0.5 * tf.reduce_sum(tf.square(x - x_mean), 1)
return prior + likelihood
chain, acceptance_probs = hmc.chain(1000, 0.1, 2,
tf.zeros([x.shape[0], W.shape[0]]),
lambda mu: log_joint(mu, x, W),
event_dims=[1])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from the posterior of a Bayesian regression model.:
# Run 100 chains in parallel, each with a different initialization.
initial_beta = tf.random_normal([100, x.shape[1]])
chain, acceptance_probs = hmc.chain(1000, 0.1, 10, initial_beta,
log_joint_partial, event_dims=[1])
# Discard first halves of chains as warmup/burn-in
warmed_up = chain[500:]
# Averaging across samples within a chain and across chains
mean_est = tf.reduce_mean(warmed_up, [0, 1])
var_est = tf.reduce_mean(tf.square(warmed_up), [0, 1]) - tf.square(mean_est)
```
"""
with ops.name_scope(name, 'hmc_chain', [n_iterations, step_size,
n_leapfrog_steps, initial_x]):
initial_x = ops.convert_to_tensor(initial_x, name='initial_x')
non_event_shape = array_ops.shape(target_log_prob_fn(initial_x))
def body(a, _):
updated_x, acceptance_probs, log_prob, grad = kernel(
step_size, n_leapfrog_steps, a[0], target_log_prob_fn, event_dims,
a[2], a[3])
return updated_x, acceptance_probs, log_prob, grad
potential_and_grad = _make_potential_and_grad(target_log_prob_fn)
potential, grad = potential_and_grad(initial_x)
return functional_ops.scan(body, array_ops.zeros(n_iterations),
(initial_x, array_ops.zeros(non_event_shape),
-potential, -grad))[:2]
|
[
"def",
"chain",
"(",
"n_iterations",
",",
"step_size",
",",
"n_leapfrog_steps",
",",
"initial_x",
",",
"target_log_prob_fn",
",",
"event_dims",
"=",
"(",
")",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"'hmc_chain'",
",",
"[",
"n_iterations",
",",
"step_size",
",",
"n_leapfrog_steps",
",",
"initial_x",
"]",
")",
":",
"initial_x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"initial_x",
",",
"name",
"=",
"'initial_x'",
")",
"non_event_shape",
"=",
"array_ops",
".",
"shape",
"(",
"target_log_prob_fn",
"(",
"initial_x",
")",
")",
"def",
"body",
"(",
"a",
",",
"_",
")",
":",
"updated_x",
",",
"acceptance_probs",
",",
"log_prob",
",",
"grad",
"=",
"kernel",
"(",
"step_size",
",",
"n_leapfrog_steps",
",",
"a",
"[",
"0",
"]",
",",
"target_log_prob_fn",
",",
"event_dims",
",",
"a",
"[",
"2",
"]",
",",
"a",
"[",
"3",
"]",
")",
"return",
"updated_x",
",",
"acceptance_probs",
",",
"log_prob",
",",
"grad",
"potential_and_grad",
"=",
"_make_potential_and_grad",
"(",
"target_log_prob_fn",
")",
"potential",
",",
"grad",
"=",
"potential_and_grad",
"(",
"initial_x",
")",
"return",
"functional_ops",
".",
"scan",
"(",
"body",
",",
"array_ops",
".",
"zeros",
"(",
"n_iterations",
")",
",",
"(",
"initial_x",
",",
"array_ops",
".",
"zeros",
"(",
"non_event_shape",
")",
",",
"-",
"potential",
",",
"-",
"grad",
")",
")",
"[",
":",
"2",
"]"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/bayesflow/python/ops/hmc_impl.py#L57-L179
|
||
msftguy/ssh-rd
|
a5f3a79daeac5844edebf01916c9613563f1c390
|
_3rd/boost_1_48_0/tools/build/v2/build/generators.py
|
python
|
Generator.convert_to_consumable_types
|
(self, project, name, prop_set, sources, only_one=False)
|
return (consumed, bypassed)
|
Attempts to convert 'source' to the types that this generator can
handle. The intention is to produce the set of targets can should be
used when generator is run.
only_one: convert 'source' to only one of source types
if there's more that one possibility, report an
error.
Returns a pair:
consumed: all targets that can be consumed.
bypassed: all targets that cannot be consumed.
|
Attempts to convert 'source' to the types that this generator can
handle. The intention is to produce the set of targets can should be
used when generator is run.
only_one: convert 'source' to only one of source types
if there's more that one possibility, report an
error.
Returns a pair:
consumed: all targets that can be consumed.
bypassed: all targets that cannot be consumed.
|
[
"Attempts",
"to",
"convert",
"source",
"to",
"the",
"types",
"that",
"this",
"generator",
"can",
"handle",
".",
"The",
"intention",
"is",
"to",
"produce",
"the",
"set",
"of",
"targets",
"can",
"should",
"be",
"used",
"when",
"generator",
"is",
"run",
".",
"only_one",
":",
"convert",
"source",
"to",
"only",
"one",
"of",
"source",
"types",
"if",
"there",
"s",
"more",
"that",
"one",
"possibility",
"report",
"an",
"error",
".",
"Returns",
"a",
"pair",
":",
"consumed",
":",
"all",
"targets",
"that",
"can",
"be",
"consumed",
".",
"bypassed",
":",
"all",
"targets",
"that",
"cannot",
"be",
"consumed",
"."
] |
def convert_to_consumable_types (self, project, name, prop_set, sources, only_one=False):
""" Attempts to convert 'source' to the types that this generator can
handle. The intention is to produce the set of targets can should be
used when generator is run.
only_one: convert 'source' to only one of source types
if there's more that one possibility, report an
error.
Returns a pair:
consumed: all targets that can be consumed.
bypassed: all targets that cannot be consumed.
"""
consumed = []
bypassed = []
missing_types = []
if len (sources) > 1:
# Don't know how to handle several sources yet. Just try
# to pass the request to other generator
missing_types = self.source_types_
else:
(c, m) = self.consume_directly (sources [0])
consumed += c
missing_types += m
# No need to search for transformation if
# some source type has consumed source and
# no more source types are needed.
if only_one and consumed:
missing_types = []
#TODO: we should check that only one source type
#if create of 'only_one' is true.
# TODO: consider if consuned/bypassed separation should
# be done by 'construct_types'.
if missing_types:
transformed = construct_types (project, name, missing_types, prop_set, sources)
# Add targets of right type to 'consumed'. Add others to
# 'bypassed'. The 'generators.construct' rule has done
# its best to convert everything to the required type.
# There's no need to rerun it on targets of different types.
# NOTE: ignoring usage requirements
for t in transformed[1]:
if t.type() in missing_types:
consumed.append(t)
else:
bypassed.append(t)
consumed = unique(consumed)
bypassed = unique(bypassed)
# remove elements of 'bypassed' that are in 'consumed'
# Suppose the target type of current generator, X is produced from
# X_1 and X_2, which are produced from Y by one generator.
# When creating X_1 from Y, X_2 will be added to 'bypassed'
# Likewise, when creating X_2 from Y, X_1 will be added to 'bypassed'
# But they are also in 'consumed'. We have to remove them from
# bypassed, so that generators up the call stack don't try to convert
# them.
# In this particular case, X_1 instance in 'consumed' and X_1 instance
# in 'bypassed' will be the same: because they have the same source and
# action name, and 'virtual-target.register' won't allow two different
# instances. Therefore, it's OK to use 'set.difference'.
bypassed = set.difference(bypassed, consumed)
return (consumed, bypassed)
|
[
"def",
"convert_to_consumable_types",
"(",
"self",
",",
"project",
",",
"name",
",",
"prop_set",
",",
"sources",
",",
"only_one",
"=",
"False",
")",
":",
"consumed",
"=",
"[",
"]",
"bypassed",
"=",
"[",
"]",
"missing_types",
"=",
"[",
"]",
"if",
"len",
"(",
"sources",
")",
">",
"1",
":",
"# Don't know how to handle several sources yet. Just try ",
"# to pass the request to other generator",
"missing_types",
"=",
"self",
".",
"source_types_",
"else",
":",
"(",
"c",
",",
"m",
")",
"=",
"self",
".",
"consume_directly",
"(",
"sources",
"[",
"0",
"]",
")",
"consumed",
"+=",
"c",
"missing_types",
"+=",
"m",
"# No need to search for transformation if",
"# some source type has consumed source and",
"# no more source types are needed.",
"if",
"only_one",
"and",
"consumed",
":",
"missing_types",
"=",
"[",
"]",
"#TODO: we should check that only one source type",
"#if create of 'only_one' is true.",
"# TODO: consider if consuned/bypassed separation should",
"# be done by 'construct_types'.",
"if",
"missing_types",
":",
"transformed",
"=",
"construct_types",
"(",
"project",
",",
"name",
",",
"missing_types",
",",
"prop_set",
",",
"sources",
")",
"# Add targets of right type to 'consumed'. Add others to",
"# 'bypassed'. The 'generators.construct' rule has done",
"# its best to convert everything to the required type.",
"# There's no need to rerun it on targets of different types.",
"# NOTE: ignoring usage requirements",
"for",
"t",
"in",
"transformed",
"[",
"1",
"]",
":",
"if",
"t",
".",
"type",
"(",
")",
"in",
"missing_types",
":",
"consumed",
".",
"append",
"(",
"t",
")",
"else",
":",
"bypassed",
".",
"append",
"(",
"t",
")",
"consumed",
"=",
"unique",
"(",
"consumed",
")",
"bypassed",
"=",
"unique",
"(",
"bypassed",
")",
"# remove elements of 'bypassed' that are in 'consumed'",
"# Suppose the target type of current generator, X is produced from ",
"# X_1 and X_2, which are produced from Y by one generator.",
"# When creating X_1 from Y, X_2 will be added to 'bypassed'",
"# Likewise, when creating X_2 from Y, X_1 will be added to 'bypassed'",
"# But they are also in 'consumed'. We have to remove them from",
"# bypassed, so that generators up the call stack don't try to convert",
"# them. ",
"# In this particular case, X_1 instance in 'consumed' and X_1 instance",
"# in 'bypassed' will be the same: because they have the same source and",
"# action name, and 'virtual-target.register' won't allow two different",
"# instances. Therefore, it's OK to use 'set.difference'.",
"bypassed",
"=",
"set",
".",
"difference",
"(",
"bypassed",
",",
"consumed",
")",
"return",
"(",
"consumed",
",",
"bypassed",
")"
] |
https://github.com/msftguy/ssh-rd/blob/a5f3a79daeac5844edebf01916c9613563f1c390/_3rd/boost_1_48_0/tools/build/v2/build/generators.py#L485-L558
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/richtext.py
|
python
|
RichTextParagraphLayoutBox.PromoteList
|
(*args)
|
return _richtext.RichTextParagraphLayoutBox_PromoteList(*args)
|
PromoteList(self, int promoteBy, RichTextRange range, wxRichTextListStyleDefinition def=None,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int specifiedLevel=-1) -> bool
PromoteList(self, int promoteBy, RichTextRange range, String defName,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool
|
PromoteList(self, int promoteBy, RichTextRange range, wxRichTextListStyleDefinition def=None,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int specifiedLevel=-1) -> bool
PromoteList(self, int promoteBy, RichTextRange range, String defName,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool
|
[
"PromoteList",
"(",
"self",
"int",
"promoteBy",
"RichTextRange",
"range",
"wxRichTextListStyleDefinition",
"def",
"=",
"None",
"int",
"flags",
"=",
"RICHTEXT_SETSTYLE_WITH_UNDO",
"int",
"specifiedLevel",
"=",
"-",
"1",
")",
"-",
">",
"bool",
"PromoteList",
"(",
"self",
"int",
"promoteBy",
"RichTextRange",
"range",
"String",
"defName",
"int",
"flags",
"=",
"RICHTEXT_SETSTYLE_WITH_UNDO",
"int",
"specifiedLevel",
"=",
"-",
"1",
")",
"-",
">",
"bool"
] |
def PromoteList(*args):
"""
PromoteList(self, int promoteBy, RichTextRange range, wxRichTextListStyleDefinition def=None,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO,
int specifiedLevel=-1) -> bool
PromoteList(self, int promoteBy, RichTextRange range, String defName,
int flags=RICHTEXT_SETSTYLE_WITH_UNDO, int specifiedLevel=-1) -> bool
"""
return _richtext.RichTextParagraphLayoutBox_PromoteList(*args)
|
[
"def",
"PromoteList",
"(",
"*",
"args",
")",
":",
"return",
"_richtext",
".",
"RichTextParagraphLayoutBox_PromoteList",
"(",
"*",
"args",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/richtext.py#L1779-L1787
|
|
tuttleofx/TuttleOFX
|
36fc4cae15092a84ea8c29b9c6658c7cabfadb6e
|
applications/example/pythonBinding/demo_progress_handle.py
|
python
|
ProgressHandle.beginSequence
|
(self)
|
Called before the beginning of the process
|
Called before the beginning of the process
|
[
"Called",
"before",
"the",
"beginning",
"of",
"the",
"process"
] |
def beginSequence(self):
"""
Called before the beginning of the process
"""
if self.callback:
self.callback()
print "---> beginSequence"
|
[
"def",
"beginSequence",
"(",
"self",
")",
":",
"if",
"self",
".",
"callback",
":",
"self",
".",
"callback",
"(",
")",
"print",
"\"---> beginSequence\""
] |
https://github.com/tuttleofx/TuttleOFX/blob/36fc4cae15092a84ea8c29b9c6658c7cabfadb6e/applications/example/pythonBinding/demo_progress_handle.py#L34-L40
|
||
windystrife/UnrealEngine_NVIDIAGameWorks
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/calendar.py
|
python
|
TextCalendar.formatweekday
|
(self, day, width)
|
return names[day][:width].center(width)
|
Returns a formatted week day name.
|
Returns a formatted week day name.
|
[
"Returns",
"a",
"formatted",
"week",
"day",
"name",
"."
] |
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
|
[
"def",
"formatweekday",
"(",
"self",
",",
"day",
",",
"width",
")",
":",
"if",
"width",
">=",
"9",
":",
"names",
"=",
"day_name",
"else",
":",
"names",
"=",
"day_abbr",
"return",
"names",
"[",
"day",
"]",
"[",
":",
"width",
"]",
".",
"center",
"(",
"width",
")"
] |
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/calendar.py#L287-L295
|
|
facebook/ThreatExchange
|
31914a51820c73c8a0daffe62ccca29a6e3d359e
|
api-reference-examples/python/pytx/pytx/rtu.py
|
python
|
ListenerView.dispatch_request
|
(self)
|
return self.get_response
|
This must be here for the Flask View to work. We verify that we got POST
data and send it to the callback function, otherwise we assume it was a
GET and respond with the configured GET response.
|
This must be here for the Flask View to work. We verify that we got POST
data and send it to the callback function, otherwise we assume it was a
GET and respond with the configured GET response.
|
[
"This",
"must",
"be",
"here",
"for",
"the",
"Flask",
"View",
"to",
"work",
".",
"We",
"verify",
"that",
"we",
"got",
"POST",
"data",
"and",
"send",
"it",
"to",
"the",
"callback",
"function",
"otherwise",
"we",
"assume",
"it",
"was",
"a",
"GET",
"and",
"respond",
"with",
"the",
"configured",
"GET",
"response",
"."
] |
def dispatch_request(self):
"""
This must be here for the Flask View to work. We verify that we got POST
data and send it to the callback function, otherwise we assume it was a
GET and respond with the configured GET response.
"""
if request.method == 'POST':
return self.callback(request=request.get_json(force=True))
return self.get_response
|
[
"def",
"dispatch_request",
"(",
"self",
")",
":",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"return",
"self",
".",
"callback",
"(",
"request",
"=",
"request",
".",
"get_json",
"(",
"force",
"=",
"True",
")",
")",
"return",
"self",
".",
"get_response"
] |
https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/api-reference-examples/python/pytx/pytx/rtu.py#L98-L107
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py2/scipy/special/orthogonal.py
|
python
|
_initial_nodes_b
|
(n, k)
|
return xksq
|
r"""Gatteschi initial guesses
Computes an initial approximation to the square of the `k`-th
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.2 in the
original paper. The guesses are accurate in the region just
below :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate root
See Also
--------
initial_nodes
roots_hermite_asy
|
r"""Gatteschi initial guesses
|
[
"r",
"Gatteschi",
"initial",
"guesses"
] |
def _initial_nodes_b(n, k):
r"""Gatteschi initial guesses
Computes an initial approximation to the square of the `k`-th
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.2 in the
original paper. The guesses are accurate in the region just
below :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate root
See Also
--------
initial_nodes
roots_hermite_asy
"""
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Airy roots by approximation
ak = specfun.airyzo(k.max(), 1)[0][::-1]
# Initial approximation of Hermite roots (square)
xksq = (nu +
2.0**(2.0/3.0) * ak * nu**(1.0/3.0) +
1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) +
(9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) +
(16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) -
(15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0))
return xksq
|
[
"def",
"_initial_nodes_b",
"(",
"n",
",",
"k",
")",
":",
"a",
"=",
"n",
"%",
"2",
"-",
"0.5",
"nu",
"=",
"4.0",
"*",
"floor",
"(",
"n",
"/",
"2.0",
")",
"+",
"2.0",
"*",
"a",
"+",
"2.0",
"# Airy roots by approximation",
"ak",
"=",
"specfun",
".",
"airyzo",
"(",
"k",
".",
"max",
"(",
")",
",",
"1",
")",
"[",
"0",
"]",
"[",
":",
":",
"-",
"1",
"]",
"# Initial approximation of Hermite roots (square)",
"xksq",
"=",
"(",
"nu",
"+",
"2.0",
"**",
"(",
"2.0",
"/",
"3.0",
")",
"*",
"ak",
"*",
"nu",
"**",
"(",
"1.0",
"/",
"3.0",
")",
"+",
"1.0",
"/",
"5.0",
"*",
"2.0",
"**",
"(",
"4.0",
"/",
"3.0",
")",
"*",
"ak",
"**",
"2",
"*",
"nu",
"**",
"(",
"-",
"1.0",
"/",
"3.0",
")",
"+",
"(",
"9.0",
"/",
"140.0",
"-",
"12.0",
"/",
"175.0",
"*",
"ak",
"**",
"3",
")",
"*",
"nu",
"**",
"(",
"-",
"1.0",
")",
"+",
"(",
"16.0",
"/",
"1575.0",
"*",
"ak",
"+",
"92.0",
"/",
"7875.0",
"*",
"ak",
"**",
"4",
")",
"*",
"2.0",
"**",
"(",
"2.0",
"/",
"3.0",
")",
"*",
"nu",
"**",
"(",
"-",
"5.0",
"/",
"3.0",
")",
"-",
"(",
"15152.0",
"/",
"3031875.0",
"*",
"ak",
"**",
"5",
"+",
"1088.0",
"/",
"121275.0",
"*",
"ak",
"**",
"2",
")",
"*",
"2.0",
"**",
"(",
"1.0",
"/",
"3.0",
")",
"*",
"nu",
"**",
"(",
"-",
"7.0",
"/",
"3.0",
")",
")",
"return",
"xksq"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/special/orthogonal.py#L797-L834
|
|
Polidea/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
|
python
|
SBTypeMember.GetBitfieldSizeInBits
|
(self)
|
return _lldb.SBTypeMember_GetBitfieldSizeInBits(self)
|
GetBitfieldSizeInBits(self) -> uint32_t
|
GetBitfieldSizeInBits(self) -> uint32_t
|
[
"GetBitfieldSizeInBits",
"(",
"self",
")",
"-",
">",
"uint32_t"
] |
def GetBitfieldSizeInBits(self):
"""GetBitfieldSizeInBits(self) -> uint32_t"""
return _lldb.SBTypeMember_GetBitfieldSizeInBits(self)
|
[
"def",
"GetBitfieldSizeInBits",
"(",
"self",
")",
":",
"return",
"_lldb",
".",
"SBTypeMember_GetBitfieldSizeInBits",
"(",
"self",
")"
] |
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L10160-L10162
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/python/training/input.py
|
python
|
_batch_join
|
(tensors_list, batch_size, keep_input, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None)
|
Helper function for `batch_join` and `maybe_batch_join`.
|
Helper function for `batch_join` and `maybe_batch_join`.
|
[
"Helper",
"function",
"for",
"batch_join",
"and",
"maybe_batch_join",
"."
] |
def _batch_join(tensors_list, batch_size, keep_input, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Helper function for `batch_join` and `maybe_batch_join`."""
if context.in_eager_mode():
raise ValueError(
"Queue-using input pipelines are not supported when eager execution is"
" enabled. Please use tf.data to ingest data into your model instead.")
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.to_float(queue.size()) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
|
[
"def",
"_batch_join",
"(",
"tensors_list",
",",
"batch_size",
",",
"keep_input",
",",
"capacity",
"=",
"32",
",",
"enqueue_many",
"=",
"False",
",",
"shapes",
"=",
"None",
",",
"dynamic_pad",
"=",
"False",
",",
"allow_smaller_final_batch",
"=",
"False",
",",
"shared_name",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"context",
".",
"in_eager_mode",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Queue-using input pipelines are not supported when eager execution is\"",
"\" enabled. Please use tf.data to ingest data into your model instead.\"",
")",
"tensor_list_list",
"=",
"_as_tensor_list_list",
"(",
"tensors_list",
")",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"\"batch_join\"",
",",
"_flatten",
"(",
"tensor_list_list",
")",
"+",
"[",
"keep_input",
"]",
")",
"as",
"name",
":",
"tensor_list_list",
"=",
"_validate_join",
"(",
"tensor_list_list",
")",
"keep_input",
"=",
"_validate_keep_input",
"(",
"keep_input",
",",
"enqueue_many",
")",
"tensor_list_list",
",",
"sparse_info",
"=",
"_store_sparse_tensors_join",
"(",
"tensor_list_list",
",",
"enqueue_many",
",",
"keep_input",
")",
"types",
"=",
"_dtypes",
"(",
"tensor_list_list",
")",
"shapes",
"=",
"_shapes",
"(",
"tensor_list_list",
",",
"shapes",
",",
"enqueue_many",
")",
"# TODO(josh11b,mrry): Switch to BatchQueue once it is written.",
"queue",
"=",
"_which_queue",
"(",
"dynamic_pad",
")",
"(",
"capacity",
"=",
"capacity",
",",
"dtypes",
"=",
"types",
",",
"shapes",
"=",
"shapes",
",",
"shared_name",
"=",
"shared_name",
")",
"_enqueue_join",
"(",
"queue",
",",
"tensor_list_list",
",",
"enqueue_many",
",",
"keep_input",
")",
"summary",
".",
"scalar",
"(",
"\"fraction_of_%d_full\"",
"%",
"capacity",
",",
"math_ops",
".",
"to_float",
"(",
"queue",
".",
"size",
"(",
")",
")",
"*",
"(",
"1.",
"/",
"capacity",
")",
")",
"if",
"allow_smaller_final_batch",
":",
"dequeued",
"=",
"queue",
".",
"dequeue_up_to",
"(",
"batch_size",
",",
"name",
"=",
"name",
")",
"else",
":",
"dequeued",
"=",
"queue",
".",
"dequeue_many",
"(",
"batch_size",
",",
"name",
"=",
"name",
")",
"dequeued",
"=",
"_restore_sparse_tensors",
"(",
"dequeued",
",",
"sparse_info",
")",
"# tensors_list was validated to not be empty.",
"return",
"_as_original_type",
"(",
"tensors_list",
"[",
"0",
"]",
",",
"dequeued",
")"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/training/input.py#L726-L756
|
||
rbgirshick/caffe-fast-rcnn
|
28a579eaf0668850705598b3075b8969f22226d9
|
python/caffe/io.py
|
python
|
Transformer.set_mean
|
(self, in_, mean)
|
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
|
Set the mean to subtract for centering the data.
|
[
"Set",
"the",
"mean",
"to",
"subtract",
"for",
"centering",
"the",
"data",
"."
] |
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
|
[
"def",
"set_mean",
"(",
"self",
",",
"in_",
",",
"mean",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"ms",
"=",
"mean",
".",
"shape",
"if",
"mean",
".",
"ndim",
"==",
"1",
":",
"# broadcast channels",
"if",
"ms",
"[",
"0",
"]",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Mean channels incompatible with input.'",
")",
"mean",
"=",
"mean",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"# elementwise mean",
"if",
"len",
"(",
"ms",
")",
"==",
"2",
":",
"ms",
"=",
"(",
"1",
",",
")",
"+",
"ms",
"if",
"len",
"(",
"ms",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Mean shape invalid'",
")",
"if",
"ms",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
":",
"]",
":",
"raise",
"ValueError",
"(",
"'Mean shape incompatible with input shape.'",
")",
"self",
".",
"mean",
"[",
"in_",
"]",
"=",
"mean"
] |
https://github.com/rbgirshick/caffe-fast-rcnn/blob/28a579eaf0668850705598b3075b8969f22226d9/python/caffe/io.py#L232-L256
|
||
GJDuck/LowFat
|
ecf6a0f0fa1b73a27a626cf493cc39e477b6faea
|
llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/__init__.py
|
python
|
tempdir
|
()
|
return getenv('TMPDIR', getenv('TEMP', getenv('TMP', '/tmp')))
|
Return the default temorary directory.
|
Return the default temorary directory.
|
[
"Return",
"the",
"default",
"temorary",
"directory",
"."
] |
def tempdir():
""" Return the default temorary directory. """
from os import getenv
return getenv('TMPDIR', getenv('TEMP', getenv('TMP', '/tmp')))
|
[
"def",
"tempdir",
"(",
")",
":",
"from",
"os",
"import",
"getenv",
"return",
"getenv",
"(",
"'TMPDIR'",
",",
"getenv",
"(",
"'TEMP'",
",",
"getenv",
"(",
"'TMP'",
",",
"'/tmp'",
")",
")",
")"
] |
https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/__init__.py#L33-L37
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/ftplib.py
|
python
|
FTP.acct
|
(self, password)
|
return self.voidcmd(cmd)
|
Send new account name.
|
Send new account name.
|
[
"Send",
"new",
"account",
"name",
"."
] |
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
|
[
"def",
"acct",
"(",
"self",
",",
"password",
")",
":",
"cmd",
"=",
"'ACCT '",
"+",
"password",
"return",
"self",
".",
"voidcmd",
"(",
"cmd",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/ftplib.py#L548-L551
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/requests/sessions.py
|
python
|
Session.merge_environment_settings
|
(self, url, proxies, stream, verify, cert)
|
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
|
Check the environment and merge it with some settings.
:rtype: dict
|
Check the environment and merge it with some settings.
|
[
"Check",
"the",
"environment",
"and",
"merge",
"it",
"with",
"some",
"settings",
"."
] |
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
|
[
"def",
"merge_environment_settings",
"(",
"self",
",",
"url",
",",
"proxies",
",",
"stream",
",",
"verify",
",",
"cert",
")",
":",
"# Gather clues from the surrounding environment.",
"if",
"self",
".",
"trust_env",
":",
"# Set environment's proxies.",
"no_proxy",
"=",
"proxies",
".",
"get",
"(",
"'no_proxy'",
")",
"if",
"proxies",
"is",
"not",
"None",
"else",
"None",
"env_proxies",
"=",
"get_environ_proxies",
"(",
"url",
",",
"no_proxy",
"=",
"no_proxy",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"env_proxies",
".",
"items",
"(",
")",
":",
"proxies",
".",
"setdefault",
"(",
"k",
",",
"v",
")",
"# Look for requests environment configuration and be compatible",
"# with cURL.",
"if",
"verify",
"is",
"True",
"or",
"verify",
"is",
"None",
":",
"verify",
"=",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'REQUESTS_CA_BUNDLE'",
")",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"'CURL_CA_BUNDLE'",
")",
")",
"# Merge all the kwargs.",
"proxies",
"=",
"merge_setting",
"(",
"proxies",
",",
"self",
".",
"proxies",
")",
"stream",
"=",
"merge_setting",
"(",
"stream",
",",
"self",
".",
"stream",
")",
"verify",
"=",
"merge_setting",
"(",
"verify",
",",
"self",
".",
"verify",
")",
"cert",
"=",
"merge_setting",
"(",
"cert",
",",
"self",
".",
"cert",
")",
"return",
"{",
"'verify'",
":",
"verify",
",",
"'proxies'",
":",
"proxies",
",",
"'stream'",
":",
"stream",
",",
"'cert'",
":",
"cert",
"}"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/common-code/lib/requests/sessions.py#L687-L714
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/configloader.py
|
python
|
build_profile_map
|
(parsed_ini_config)
|
return final_config
|
Convert the parsed INI config into a profile map.
The config file format requires that every profile except the
default to be prepended with "profile", e.g.::
[profile test]
aws_... = foo
aws_... = bar
[profile bar]
aws_... = foo
aws_... = bar
# This is *not* a profile
[preview]
otherstuff = 1
# Neither is this
[foobar]
morestuff = 2
The build_profile_map will take a parsed INI config file where each top
level key represents a section name, and convert into a format where all
the profiles are under a single top level "profiles" key, and each key in
the sub dictionary is a profile name. For example, the above config file
would be converted from::
{"profile test": {"aws_...": "foo", "aws...": "bar"},
"profile bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
into::
{"profiles": {"test": {"aws_...": "foo", "aws...": "bar"},
"bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
If there are no profiles in the provided parsed INI contents, then
an empty dict will be the value associated with the ``profiles`` key.
.. note::
This will not mutate the passed in parsed_ini_config. Instead it will
make a deepcopy and return that value.
|
Convert the parsed INI config into a profile map.
|
[
"Convert",
"the",
"parsed",
"INI",
"config",
"into",
"a",
"profile",
"map",
"."
] |
def build_profile_map(parsed_ini_config):
"""Convert the parsed INI config into a profile map.
The config file format requires that every profile except the
default to be prepended with "profile", e.g.::
[profile test]
aws_... = foo
aws_... = bar
[profile bar]
aws_... = foo
aws_... = bar
# This is *not* a profile
[preview]
otherstuff = 1
# Neither is this
[foobar]
morestuff = 2
The build_profile_map will take a parsed INI config file where each top
level key represents a section name, and convert into a format where all
the profiles are under a single top level "profiles" key, and each key in
the sub dictionary is a profile name. For example, the above config file
would be converted from::
{"profile test": {"aws_...": "foo", "aws...": "bar"},
"profile bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
into::
{"profiles": {"test": {"aws_...": "foo", "aws...": "bar"},
"bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
If there are no profiles in the provided parsed INI contents, then
an empty dict will be the value associated with the ``profiles`` key.
.. note::
This will not mutate the passed in parsed_ini_config. Instead it will
make a deepcopy and return that value.
"""
parsed_config = copy.deepcopy(parsed_ini_config)
profiles = {}
final_config = {}
for key, values in parsed_config.items():
if key.startswith("profile"):
try:
parts = shlex.split(key)
except ValueError:
continue
if len(parts) == 2:
profiles[parts[1]] = values
elif key == 'default':
# default section is special and is considered a profile
# name but we don't require you use 'profile "default"'
# as a section.
profiles[key] = values
else:
final_config[key] = values
final_config['profiles'] = profiles
return final_config
|
[
"def",
"build_profile_map",
"(",
"parsed_ini_config",
")",
":",
"parsed_config",
"=",
"copy",
".",
"deepcopy",
"(",
"parsed_ini_config",
")",
"profiles",
"=",
"{",
"}",
"final_config",
"=",
"{",
"}",
"for",
"key",
",",
"values",
"in",
"parsed_config",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"\"profile\"",
")",
":",
"try",
":",
"parts",
"=",
"shlex",
".",
"split",
"(",
"key",
")",
"except",
"ValueError",
":",
"continue",
"if",
"len",
"(",
"parts",
")",
"==",
"2",
":",
"profiles",
"[",
"parts",
"[",
"1",
"]",
"]",
"=",
"values",
"elif",
"key",
"==",
"'default'",
":",
"# default section is special and is considered a profile",
"# name but we don't require you use 'profile \"default\"'",
"# as a section.",
"profiles",
"[",
"key",
"]",
"=",
"values",
"else",
":",
"final_config",
"[",
"key",
"]",
"=",
"values",
"final_config",
"[",
"'profiles'",
"]",
"=",
"profiles",
"return",
"final_config"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/botocore/configloader.py#L202-L272
|
|
pmq20/node-packer
|
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
|
lts/tools/gyp/pylib/gyp/generator/android.py
|
python
|
AndroidMkWriter.WriteCopies
|
(self, copies, extra_outputs)
|
Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
|
Write Makefile code for any 'copies' from the gyp input.
|
[
"Write",
"Makefile",
"code",
"for",
"any",
"copies",
"from",
"the",
"gyp",
"input",
"."
] |
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
|
[
"def",
"WriteCopies",
"(",
"self",
",",
"copies",
",",
"extra_outputs",
")",
":",
"self",
".",
"WriteLn",
"(",
"'### Generated for copy rule.'",
")",
"variable",
"=",
"make",
".",
"StringToMakefileVariable",
"(",
"self",
".",
"relative_target",
"+",
"'_copies'",
")",
"outputs",
"=",
"[",
"]",
"for",
"copy",
"in",
"copies",
":",
"for",
"path",
"in",
"copy",
"[",
"'files'",
"]",
":",
"# The Android build system does not allow generation of files into the",
"# source tree. The destination should start with a variable, which will",
"# typically be $(gyp_intermediate_dir) or",
"# $(gyp_shared_intermediate_dir). Note that we can't use an assertion",
"# because some of the gyp tests depend on this.",
"if",
"not",
"copy",
"[",
"'destination'",
"]",
".",
"startswith",
"(",
"'$'",
")",
":",
"print",
"(",
"'WARNING: Copy rule for target %s writes output to '",
"'local path %s'",
"%",
"(",
"self",
".",
"target",
",",
"copy",
"[",
"'destination'",
"]",
")",
")",
"# LocalPathify() calls normpath, stripping trailing slashes.",
"path",
"=",
"Sourceify",
"(",
"self",
".",
"LocalPathify",
"(",
"path",
")",
")",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"[",
"1",
"]",
"output",
"=",
"Sourceify",
"(",
"self",
".",
"LocalPathify",
"(",
"os",
".",
"path",
".",
"join",
"(",
"copy",
"[",
"'destination'",
"]",
",",
"filename",
")",
")",
")",
"self",
".",
"WriteLn",
"(",
"'%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)'",
"%",
"(",
"output",
",",
"path",
")",
")",
"self",
".",
"WriteLn",
"(",
"'\\t@echo Copying: $@'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) mkdir -p $(dir $@)'",
")",
"self",
".",
"WriteLn",
"(",
"'\\t$(hide) $(ACP) -rpf $< $@'",
")",
"self",
".",
"WriteLn",
"(",
")",
"outputs",
".",
"append",
"(",
"output",
")",
"self",
".",
"WriteLn",
"(",
"'%s = %s'",
"%",
"(",
"variable",
",",
"' '",
".",
"join",
"(",
"map",
"(",
"make",
".",
"QuoteSpaces",
",",
"outputs",
")",
")",
")",
")",
"extra_outputs",
".",
"append",
"(",
"'$(%s)'",
"%",
"variable",
")",
"self",
".",
"WriteLn",
"(",
")"
] |
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/gyp/pylib/gyp/generator/android.py#L416-L453
|
||
moderngl/moderngl
|
32fe79927e02b0fa893b3603d677bdae39771e14
|
moderngl/context.py
|
python
|
Context.depth_func
|
(self)
|
int: Set the default depth func.
The depth function is set using a string.
Example::
ctx.depth_func = '<=' # GL_LEQUAL
ctx.depth_func = '<' # GL_LESS
ctx.depth_func = '>=' # GL_GEQUAL
ctx.depth_func = '>' # GL_GREATER
ctx.depth_func = '==' # GL_EQUAL
ctx.depth_func = '!=' # GL_NOTEQUAL
ctx.depth_func = '0' # GL_NEVER
ctx.depth_func = '1' # GL_ALWAYS
|
int: Set the default depth func.
The depth function is set using a string.
|
[
"int",
":",
"Set",
"the",
"default",
"depth",
"func",
".",
"The",
"depth",
"function",
"is",
"set",
"using",
"a",
"string",
"."
] |
def depth_func(self) -> str:
'''
int: Set the default depth func.
The depth function is set using a string.
Example::
ctx.depth_func = '<=' # GL_LEQUAL
ctx.depth_func = '<' # GL_LESS
ctx.depth_func = '>=' # GL_GEQUAL
ctx.depth_func = '>' # GL_GREATER
ctx.depth_func = '==' # GL_EQUAL
ctx.depth_func = '!=' # GL_NOTEQUAL
ctx.depth_func = '0' # GL_NEVER
ctx.depth_func = '1' # GL_ALWAYS
'''
raise NotImplementedError()
|
[
"def",
"depth_func",
"(",
"self",
")",
"->",
"str",
":",
"raise",
"NotImplementedError",
"(",
")"
] |
https://github.com/moderngl/moderngl/blob/32fe79927e02b0fa893b3603d677bdae39771e14/moderngl/context.py#L353-L370
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/gtk/richtext.py
|
python
|
TextAttrBorder.HasColour
|
(*args, **kwargs)
|
return _richtext.TextAttrBorder_HasColour(*args, **kwargs)
|
HasColour(self) -> bool
|
HasColour(self) -> bool
|
[
"HasColour",
"(",
"self",
")",
"-",
">",
"bool"
] |
def HasColour(*args, **kwargs):
"""HasColour(self) -> bool"""
return _richtext.TextAttrBorder_HasColour(*args, **kwargs)
|
[
"def",
"HasColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"TextAttrBorder_HasColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L390-L392
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
third_party/catapult/third_party/Paste/paste/util/intset.py
|
python
|
IntSet.len
|
(self)
|
return rlen
|
Returns the length of this integer set as an integer. In case the
length is infinite, returns -1. This function exists because of a
limitation of the builtin len() function which expects values in
the range 0 <= len < 2**31. Use this function in case your integer
set might be larger.
|
Returns the length of this integer set as an integer. In case the
length is infinite, returns -1. This function exists because of a
limitation of the builtin len() function which expects values in
the range 0 <= len < 2**31. Use this function in case your integer
set might be larger.
|
[
"Returns",
"the",
"length",
"of",
"this",
"integer",
"set",
"as",
"an",
"integer",
".",
"In",
"case",
"the",
"length",
"is",
"infinite",
"returns",
"-",
"1",
".",
"This",
"function",
"exists",
"because",
"of",
"a",
"limitation",
"of",
"the",
"builtin",
"len",
"()",
"function",
"which",
"expects",
"values",
"in",
"the",
"range",
"0",
"<",
"=",
"len",
"<",
"2",
"**",
"31",
".",
"Use",
"this",
"function",
"in",
"case",
"your",
"integer",
"set",
"might",
"be",
"larger",
"."
] |
def len(self):
"""Returns the length of this integer set as an integer. In case the
length is infinite, returns -1. This function exists because of a
limitation of the builtin len() function which expects values in
the range 0 <= len < 2**31. Use this function in case your integer
set might be larger."""
if not self._ranges:
return 0
if self._ranges[0][0] is _MININF or self._ranges[-1][1] is _MAXINF:
return -1
rlen = 0
for r in self._ranges:
rlen += r[1]-r[0]
return rlen
|
[
"def",
"len",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_ranges",
":",
"return",
"0",
"if",
"self",
".",
"_ranges",
"[",
"0",
"]",
"[",
"0",
"]",
"is",
"_MININF",
"or",
"self",
".",
"_ranges",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"is",
"_MAXINF",
":",
"return",
"-",
"1",
"rlen",
"=",
"0",
"for",
"r",
"in",
"self",
".",
"_ranges",
":",
"rlen",
"+=",
"r",
"[",
"1",
"]",
"-",
"r",
"[",
"0",
"]",
"return",
"rlen"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/Paste/paste/util/intset.py#L424-L438
|
|
SpaceNetChallenge/BuildingDetectors
|
3def3c44b5847c744cd2f3356182892d92496579
|
qinhaifang/src/caffe-mnc/python/caffe/pycaffe.py
|
python
|
_Net_blobs
|
(self)
|
return OrderedDict(zip(self._blob_names, self._blobs))
|
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
|
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
|
[
"An",
"OrderedDict",
"(",
"bottom",
"to",
"top",
"i",
".",
"e",
".",
"input",
"to",
"output",
")",
"of",
"network",
"blobs",
"indexed",
"by",
"name"
] |
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict(zip(self._blob_names, self._blobs))
|
[
"def",
"_Net_blobs",
"(",
"self",
")",
":",
"return",
"OrderedDict",
"(",
"zip",
"(",
"self",
".",
"_blob_names",
",",
"self",
".",
"_blobs",
")",
")"
] |
https://github.com/SpaceNetChallenge/BuildingDetectors/blob/3def3c44b5847c744cd2f3356182892d92496579/qinhaifang/src/caffe-mnc/python/caffe/pycaffe.py#L23-L28
|
|
gnuradio/gnuradio
|
09c3c4fa4bfb1a02caac74cb5334dfe065391e3b
|
gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py
|
python
|
qa_ofdm_frame_equalizer_vcvc.test_001c_carrier_offset_no_cp
|
(self)
|
Same as before, but put a carrier offset in there
|
Same as before, but put a carrier offset in there
|
[
"Same",
"as",
"before",
"but",
"put",
"a",
"carrier",
"offset",
"in",
"there"
] |
def test_001c_carrier_offset_no_cp(self):
"""
Same as before, but put a carrier offset in there
"""
fft_len = 8
cp_len = 0
n_syms = 1
carr_offset = 1
occupied_carriers = ((-2, -1, 1, 2),)
tx_data = (
0, 0, 0, -1j, -1j, 0, -1j, -1j,
)
# The rx'd signal is shifted
rx_expected = (0, 0, 1, 1, 0, 1, 1, 0) * n_syms
equalizer = digital.ofdm_equalizer_static(fft_len, occupied_carriers)
chan_tag = gr.tag_t()
chan_tag.offset = 0
chan_tag.key = pmt.string_to_symbol("ofdm_sync_chan_taps")
# Note: this is shifted to the correct position!
chan_tag.value = pmt.init_c32vector(
fft_len, (0, 0, -1j, -1j, 0, -1j, -1j, 0))
offset_tag = gr.tag_t()
offset_tag.offset = 0
offset_tag.key = pmt.string_to_symbol("ofdm_sync_carr_offset")
offset_tag.value = pmt.from_long(carr_offset)
src = blocks.vector_source_c(
tx_data, False, fft_len, (chan_tag, offset_tag))
eq = digital.ofdm_frame_equalizer_vcvc(
equalizer.base(), cp_len, self.tsb_key)
sink = blocks.tsb_vector_sink_c(fft_len, tsb_key=self.tsb_key)
self.tb.connect(
src,
blocks.stream_to_tagged_stream(
gr.sizeof_gr_complex,
fft_len,
n_syms,
self.tsb_key),
eq,
sink)
self.tb.run()
# Check data
self.assertComplexTuplesAlmostEqual(
rx_expected, sink.data()[0], places=4)
|
[
"def",
"test_001c_carrier_offset_no_cp",
"(",
"self",
")",
":",
"fft_len",
"=",
"8",
"cp_len",
"=",
"0",
"n_syms",
"=",
"1",
"carr_offset",
"=",
"1",
"occupied_carriers",
"=",
"(",
"(",
"-",
"2",
",",
"-",
"1",
",",
"1",
",",
"2",
")",
",",
")",
"tx_data",
"=",
"(",
"0",
",",
"0",
",",
"0",
",",
"-",
"1j",
",",
"-",
"1j",
",",
"0",
",",
"-",
"1j",
",",
"-",
"1j",
",",
")",
"# The rx'd signal is shifted",
"rx_expected",
"=",
"(",
"0",
",",
"0",
",",
"1",
",",
"1",
",",
"0",
",",
"1",
",",
"1",
",",
"0",
")",
"*",
"n_syms",
"equalizer",
"=",
"digital",
".",
"ofdm_equalizer_static",
"(",
"fft_len",
",",
"occupied_carriers",
")",
"chan_tag",
"=",
"gr",
".",
"tag_t",
"(",
")",
"chan_tag",
".",
"offset",
"=",
"0",
"chan_tag",
".",
"key",
"=",
"pmt",
".",
"string_to_symbol",
"(",
"\"ofdm_sync_chan_taps\"",
")",
"# Note: this is shifted to the correct position!",
"chan_tag",
".",
"value",
"=",
"pmt",
".",
"init_c32vector",
"(",
"fft_len",
",",
"(",
"0",
",",
"0",
",",
"-",
"1j",
",",
"-",
"1j",
",",
"0",
",",
"-",
"1j",
",",
"-",
"1j",
",",
"0",
")",
")",
"offset_tag",
"=",
"gr",
".",
"tag_t",
"(",
")",
"offset_tag",
".",
"offset",
"=",
"0",
"offset_tag",
".",
"key",
"=",
"pmt",
".",
"string_to_symbol",
"(",
"\"ofdm_sync_carr_offset\"",
")",
"offset_tag",
".",
"value",
"=",
"pmt",
".",
"from_long",
"(",
"carr_offset",
")",
"src",
"=",
"blocks",
".",
"vector_source_c",
"(",
"tx_data",
",",
"False",
",",
"fft_len",
",",
"(",
"chan_tag",
",",
"offset_tag",
")",
")",
"eq",
"=",
"digital",
".",
"ofdm_frame_equalizer_vcvc",
"(",
"equalizer",
".",
"base",
"(",
")",
",",
"cp_len",
",",
"self",
".",
"tsb_key",
")",
"sink",
"=",
"blocks",
".",
"tsb_vector_sink_c",
"(",
"fft_len",
",",
"tsb_key",
"=",
"self",
".",
"tsb_key",
")",
"self",
".",
"tb",
".",
"connect",
"(",
"src",
",",
"blocks",
".",
"stream_to_tagged_stream",
"(",
"gr",
".",
"sizeof_gr_complex",
",",
"fft_len",
",",
"n_syms",
",",
"self",
".",
"tsb_key",
")",
",",
"eq",
",",
"sink",
")",
"self",
".",
"tb",
".",
"run",
"(",
")",
"# Check data",
"self",
".",
"assertComplexTuplesAlmostEqual",
"(",
"rx_expected",
",",
"sink",
".",
"data",
"(",
")",
"[",
"0",
"]",
",",
"places",
"=",
"4",
")"
] |
https://github.com/gnuradio/gnuradio/blob/09c3c4fa4bfb1a02caac74cb5334dfe065391e3b/gr-digital/python/digital/qa_ofdm_frame_equalizer_vcvc.py#L103-L145
|
||
natanielruiz/android-yolo
|
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
|
jni-build/jni/include/tensorflow/python/training/saver.py
|
python
|
Saver.from_proto
|
(saver_def)
|
return Saver(saver_def=saver_def)
|
Returns a `Saver` object created from `saver_def`.
|
Returns a `Saver` object created from `saver_def`.
|
[
"Returns",
"a",
"Saver",
"object",
"created",
"from",
"saver_def",
"."
] |
def from_proto(saver_def):
"""Returns a `Saver` object created from `saver_def`."""
return Saver(saver_def=saver_def)
|
[
"def",
"from_proto",
"(",
"saver_def",
")",
":",
"return",
"Saver",
"(",
"saver_def",
"=",
"saver_def",
")"
] |
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/training/saver.py#L968-L970
|
|
openthread/openthread
|
9fcdbed9c526c70f1556d1ed84099c1535c7cd32
|
third_party/mbedtls/repo/scripts/assemble_changelog.py
|
python
|
ChangelogFormat.format_category
|
(cls, title, body)
|
Construct the text of a category section from its title and body.
|
Construct the text of a category section from its title and body.
|
[
"Construct",
"the",
"text",
"of",
"a",
"category",
"section",
"from",
"its",
"title",
"and",
"body",
"."
] |
def format_category(cls, title, body):
"""Construct the text of a category section from its title and body."""
raise NotImplementedError
|
[
"def",
"format_category",
"(",
"cls",
",",
"title",
",",
"body",
")",
":",
"raise",
"NotImplementedError"
] |
https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/third_party/mbedtls/repo/scripts/assemble_changelog.py#L115-L117
|
||
miyosuda/TensorFlowAndroidDemo
|
35903e0221aa5f109ea2dbef27f20b52e317f42d
|
jni-build/jni/include/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py
|
python
|
SdcaModel._l2_loss
|
(self, l2)
|
Computes the (un-normalized) l2 loss of the model.
|
Computes the (un-normalized) l2 loss of the model.
|
[
"Computes",
"the",
"(",
"un",
"-",
"normalized",
")",
"l2",
"loss",
"of",
"the",
"model",
"."
] |
def _l2_loss(self, l2):
"""Computes the (un-normalized) l2 loss of the model."""
with name_scope('l2_loss'):
sum = 0.0
for name in ['sparse_features_weights', 'dense_features_weights']:
for weights in self._convert_n_to_tensor(self._variables[name]):
sum += math_ops.reduce_sum(math_ops.square(weights))
# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2
return l2 * sum / 2.0
|
[
"def",
"_l2_loss",
"(",
"self",
",",
"l2",
")",
":",
"with",
"name_scope",
"(",
"'l2_loss'",
")",
":",
"sum",
"=",
"0.0",
"for",
"name",
"in",
"[",
"'sparse_features_weights'",
",",
"'dense_features_weights'",
"]",
":",
"for",
"weights",
"in",
"self",
".",
"_convert_n_to_tensor",
"(",
"self",
".",
"_variables",
"[",
"name",
"]",
")",
":",
"sum",
"+=",
"math_ops",
".",
"reduce_sum",
"(",
"math_ops",
".",
"square",
"(",
"weights",
")",
")",
"# SDCA L2 regularization cost is: l2 * sum(weights^2) / 2",
"return",
"l2",
"*",
"sum",
"/",
"2.0"
] |
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/linear_optimizer/python/ops/sdca_ops.py#L408-L416
|
||
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/variables.py
|
python
|
RefVariable.assign_sub
|
(self, delta, use_locking=False, name=None, read_value=True)
|
return assign.op
|
Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
|
Subtracts a value from this variable.
|
[
"Subtracts",
"a",
"value",
"from",
"this",
"variable",
"."
] |
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
assign = state_ops.assign_sub(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
|
[
"def",
"assign_sub",
"(",
"self",
",",
"delta",
",",
"use_locking",
"=",
"False",
",",
"name",
"=",
"None",
",",
"read_value",
"=",
"True",
")",
":",
"assign",
"=",
"state_ops",
".",
"assign_sub",
"(",
"self",
".",
"_variable",
",",
"delta",
",",
"use_locking",
"=",
"use_locking",
",",
"name",
"=",
"name",
")",
"if",
"read_value",
":",
"return",
"assign",
"return",
"assign",
".",
"op"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/variables.py#L2094-L2114
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/command/build_src.py
|
python
|
build_src.pyrex_sources
|
(self, sources, extension)
|
return new_sources
|
Pyrex not supported; this remains for Cython support (see below)
|
Pyrex not supported; this remains for Cython support (see below)
|
[
"Pyrex",
"not",
"supported",
";",
"this",
"remains",
"for",
"Cython",
"support",
"(",
"see",
"below",
")"
] |
def pyrex_sources(self, sources, extension):
"""Pyrex not supported; this remains for Cython support (see below)"""
new_sources = []
ext_name = extension.name.split('.')[-1]
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == '.pyx':
target_file = self.generate_a_pyrex_source(base, ext_name,
source,
extension)
new_sources.append(target_file)
else:
new_sources.append(source)
return new_sources
|
[
"def",
"pyrex_sources",
"(",
"self",
",",
"sources",
",",
"extension",
")",
":",
"new_sources",
"=",
"[",
"]",
"ext_name",
"=",
"extension",
".",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"for",
"source",
"in",
"sources",
":",
"(",
"base",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"source",
")",
"if",
"ext",
"==",
"'.pyx'",
":",
"target_file",
"=",
"self",
".",
"generate_a_pyrex_source",
"(",
"base",
",",
"ext_name",
",",
"source",
",",
"extension",
")",
"new_sources",
".",
"append",
"(",
"target_file",
")",
"else",
":",
"new_sources",
".",
"append",
"(",
"source",
")",
"return",
"new_sources"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/command/build_src.py#L445-L458
|
|
pytorch/pytorch
|
7176c92687d3cc847cc046bf002269c6949a21c2
|
caffe2/python/workspace.py
|
python
|
ApplyTransform
|
(transform_key, net)
|
return transformed_net
|
Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
Returns:
Transformed NetDef protobuf object.
|
Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
|
[
"Apply",
"a",
"Transform",
"to",
"a",
"NetDef",
"protobuf",
"object",
"and",
"returns",
"the",
"new",
"transformed",
"NetDef",
"."
] |
def ApplyTransform(transform_key, net):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
Returns:
Transformed NetDef protobuf object.
"""
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
|
[
"def",
"ApplyTransform",
"(",
"transform_key",
",",
"net",
")",
":",
"transformed_net",
"=",
"caffe2_pb2",
".",
"NetDef",
"(",
")",
"transformed_str",
"=",
"C",
".",
"apply_transform",
"(",
"str",
"(",
"transform_key",
")",
".",
"encode",
"(",
"'utf-8'",
")",
",",
"net",
".",
"SerializeToString",
"(",
")",
",",
")",
"transformed_net",
".",
"ParseFromString",
"(",
"transformed_str",
")",
"return",
"transformed_net"
] |
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/workspace.py#L462-L478
|
|
tum-vision/fusenet
|
a1451be2971b348a01b0f525c2a3a7a0e215a591
|
scripts/cpp_lint.py
|
python
|
CheckInvalidIncrement
|
(filename, clean_lines, linenum, error)
|
Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
Checks for invalid increment *count++.
|
[
"Checks",
"for",
"invalid",
"increment",
"*",
"count",
"++",
"."
] |
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
|
[
"def",
"CheckInvalidIncrement",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"if",
"_RE_PATTERN_INVALID_INCREMENT",
".",
"match",
"(",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/invalid_increment'",
",",
"5",
",",
"'Changing pointer instead of value (or unused value of operator*).'",
")"
] |
https://github.com/tum-vision/fusenet/blob/a1451be2971b348a01b0f525c2a3a7a0e215a591/scripts/cpp_lint.py#L1733-L1752
|
||
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scikit-learn/py2/sklearn/feature_selection/mutual_info_.py
|
python
|
_compute_mi_cc
|
(x, y, n_neighbors)
|
return max(0, mi)
|
Compute mutual information between two continuous variables.
Parameters
----------
x, y : ndarray, shape (n_samples,)
Samples of two continuous random variables, must have an identical
shape.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information. If it turned out to be negative it is
replace by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
|
Compute mutual information between two continuous variables.
|
[
"Compute",
"mutual",
"information",
"between",
"two",
"continuous",
"variables",
"."
] |
def _compute_mi_cc(x, y, n_neighbors):
"""Compute mutual information between two continuous variables.
Parameters
----------
x, y : ndarray, shape (n_samples,)
Samples of two continuous random variables, must have an identical
shape.
n_neighbors : int
Number of nearest neighbors to search for each point, see [1]_.
Returns
-------
mi : float
Estimated mutual information. If it turned out to be negative it is
replace by 0.
Notes
-----
True mutual information can't be negative. If its estimate by a numerical
method is negative, it means (providing the method is adequate) that the
mutual information is close to 0 and replacing it by 0 is a reasonable
strategy.
References
----------
.. [1] A. Kraskov, H. Stogbauer and P. Grassberger, "Estimating mutual
information". Phys. Rev. E 69, 2004.
"""
n_samples = x.size
x = x.reshape((-1, 1))
y = y.reshape((-1, 1))
xy = np.hstack((x, y))
# Here we rely on NearestNeighbors to select the fastest algorithm.
nn = NearestNeighbors(metric='chebyshev', n_neighbors=n_neighbors)
nn.fit(xy)
radius = nn.kneighbors()[0]
radius = np.nextafter(radius[:, -1], 0)
# Algorithm is selected explicitly to allow passing an array as radius
# later (not all algorithms support this).
nn.set_params(algorithm='kd_tree')
nn.fit(x)
ind = nn.radius_neighbors(radius=radius, return_distance=False)
nx = np.array([i.size for i in ind])
nn.fit(y)
ind = nn.radius_neighbors(radius=radius, return_distance=False)
ny = np.array([i.size for i in ind])
mi = (digamma(n_samples) + digamma(n_neighbors) -
np.mean(digamma(nx + 1)) - np.mean(digamma(ny + 1)))
return max(0, mi)
|
[
"def",
"_compute_mi_cc",
"(",
"x",
",",
"y",
",",
"n_neighbors",
")",
":",
"n_samples",
"=",
"x",
".",
"size",
"x",
"=",
"x",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"y",
"=",
"y",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"1",
")",
")",
"xy",
"=",
"np",
".",
"hstack",
"(",
"(",
"x",
",",
"y",
")",
")",
"# Here we rely on NearestNeighbors to select the fastest algorithm.",
"nn",
"=",
"NearestNeighbors",
"(",
"metric",
"=",
"'chebyshev'",
",",
"n_neighbors",
"=",
"n_neighbors",
")",
"nn",
".",
"fit",
"(",
"xy",
")",
"radius",
"=",
"nn",
".",
"kneighbors",
"(",
")",
"[",
"0",
"]",
"radius",
"=",
"np",
".",
"nextafter",
"(",
"radius",
"[",
":",
",",
"-",
"1",
"]",
",",
"0",
")",
"# Algorithm is selected explicitly to allow passing an array as radius",
"# later (not all algorithms support this).",
"nn",
".",
"set_params",
"(",
"algorithm",
"=",
"'kd_tree'",
")",
"nn",
".",
"fit",
"(",
"x",
")",
"ind",
"=",
"nn",
".",
"radius_neighbors",
"(",
"radius",
"=",
"radius",
",",
"return_distance",
"=",
"False",
")",
"nx",
"=",
"np",
".",
"array",
"(",
"[",
"i",
".",
"size",
"for",
"i",
"in",
"ind",
"]",
")",
"nn",
".",
"fit",
"(",
"y",
")",
"ind",
"=",
"nn",
".",
"radius_neighbors",
"(",
"radius",
"=",
"radius",
",",
"return_distance",
"=",
"False",
")",
"ny",
"=",
"np",
".",
"array",
"(",
"[",
"i",
".",
"size",
"for",
"i",
"in",
"ind",
"]",
")",
"mi",
"=",
"(",
"digamma",
"(",
"n_samples",
")",
"+",
"digamma",
"(",
"n_neighbors",
")",
"-",
"np",
".",
"mean",
"(",
"digamma",
"(",
"nx",
"+",
"1",
")",
")",
"-",
"np",
".",
"mean",
"(",
"digamma",
"(",
"ny",
"+",
"1",
")",
")",
")",
"return",
"max",
"(",
"0",
",",
"mi",
")"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/feature_selection/mutual_info_.py#L18-L76
|
|
google/llvm-propeller
|
45c226984fe8377ebfb2ad7713c680d652ba678d
|
llvm/utils/benchmark/mingw.py
|
python
|
root
|
(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger())
|
return root_dir
|
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
|
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
|
[
"Returns",
"the",
"root",
"folder",
"of",
"a",
"specific",
"version",
"of",
"the",
"mingw",
"-",
"builds",
"variant",
"of",
"gcc",
".",
"Will",
"download",
"the",
"compiler",
"if",
"needed"
] |
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision == None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
|
[
"def",
"root",
"(",
"location",
"=",
"None",
",",
"arch",
"=",
"None",
",",
"version",
"=",
"None",
",",
"threading",
"=",
"None",
",",
"exceptions",
"=",
"None",
",",
"revision",
"=",
"None",
",",
"log",
"=",
"EmptyLogger",
"(",
")",
")",
":",
"# Get the repository if we don't have all the information",
"if",
"not",
"(",
"arch",
"and",
"version",
"and",
"threading",
"and",
"exceptions",
"and",
"revision",
")",
":",
"versions",
"=",
"repository",
"(",
"log",
"=",
"log",
")",
"# Determine some defaults",
"version",
"=",
"version",
"or",
"max",
"(",
"versions",
".",
"keys",
"(",
")",
")",
"if",
"not",
"arch",
":",
"arch",
"=",
"platform",
".",
"machine",
"(",
")",
".",
"lower",
"(",
")",
"if",
"arch",
"==",
"'x86'",
":",
"arch",
"=",
"'i686'",
"elif",
"arch",
"==",
"'amd64'",
":",
"arch",
"=",
"'x86_64'",
"if",
"not",
"threading",
":",
"keys",
"=",
"versions",
"[",
"version",
"]",
"[",
"arch",
"]",
".",
"keys",
"(",
")",
"if",
"'posix'",
"in",
"keys",
":",
"threading",
"=",
"'posix'",
"elif",
"'win32'",
"in",
"keys",
":",
"threading",
"=",
"'win32'",
"else",
":",
"threading",
"=",
"keys",
"[",
"0",
"]",
"if",
"not",
"exceptions",
":",
"keys",
"=",
"versions",
"[",
"version",
"]",
"[",
"arch",
"]",
"[",
"threading",
"]",
".",
"keys",
"(",
")",
"if",
"'seh'",
"in",
"keys",
":",
"exceptions",
"=",
"'seh'",
"elif",
"'sjlj'",
"in",
"keys",
":",
"exceptions",
"=",
"'sjlj'",
"else",
":",
"exceptions",
"=",
"keys",
"[",
"0",
"]",
"if",
"revision",
"==",
"None",
":",
"revision",
"=",
"max",
"(",
"versions",
"[",
"version",
"]",
"[",
"arch",
"]",
"[",
"threading",
"]",
"[",
"exceptions",
"]",
".",
"keys",
"(",
")",
")",
"if",
"not",
"location",
":",
"location",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
",",
"'mingw-builds'",
")",
"# Get the download url",
"url",
"=",
"versions",
"[",
"version",
"]",
"[",
"arch",
"]",
"[",
"threading",
"]",
"[",
"exceptions",
"]",
"[",
"revision",
"]",
"# Tell the user whatzzup",
"log",
".",
"info",
"(",
"'finding MinGW %s'",
",",
"'.'",
".",
"join",
"(",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"version",
")",
")",
"log",
".",
"debug",
"(",
"' - arch: %s'",
",",
"arch",
")",
"log",
".",
"debug",
"(",
"' - threading: %s'",
",",
"threading",
")",
"log",
".",
"debug",
"(",
"' - exceptions: %s'",
",",
"exceptions",
")",
"log",
".",
"debug",
"(",
"' - revision: %s'",
",",
"revision",
")",
"log",
".",
"debug",
"(",
"' - url: %s'",
",",
"url",
")",
"# Store each specific revision differently",
"slug",
"=",
"'{version}-{arch}-{threading}-{exceptions}-rev{revision}'",
"slug",
"=",
"slug",
".",
"format",
"(",
"version",
"=",
"'.'",
".",
"join",
"(",
"str",
"(",
"v",
")",
"for",
"v",
"in",
"version",
")",
",",
"arch",
"=",
"arch",
",",
"threading",
"=",
"threading",
",",
"exceptions",
"=",
"exceptions",
",",
"revision",
"=",
"revision",
")",
"if",
"arch",
"==",
"'x86_64'",
":",
"root_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"location",
",",
"slug",
",",
"'mingw64'",
")",
"elif",
"arch",
"==",
"'i686'",
":",
"root_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"location",
",",
"slug",
",",
"'mingw32'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown MinGW arch: '",
"+",
"arch",
")",
"# Download if needed",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"root_dir",
")",
":",
"downloaded",
"=",
"download",
"(",
"url",
",",
"os",
".",
"path",
".",
"join",
"(",
"location",
",",
"slug",
")",
",",
"log",
"=",
"log",
")",
"if",
"downloaded",
"!=",
"root_dir",
":",
"raise",
"ValueError",
"(",
"'The location of mingw did not match\\n%s\\n%s'",
"%",
"(",
"downloaded",
",",
"root_dir",
")",
")",
"return",
"root_dir"
] |
https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/llvm/utils/benchmark/mingw.py#L172-L246
|
|
ceph/ceph
|
959663007321a369c83218414a29bd9dbc8bda3a
|
src/pybind/mgr/orchestrator/module.py
|
python
|
OrchestratorCli._daemon_action_redeploy
|
(self,
name: str,
image: Optional[str] = None)
|
return HandleCommandResult(stdout=completion.result_str())
|
Redeploy a daemon (with a specifc image)
|
Redeploy a daemon (with a specifc image)
|
[
"Redeploy",
"a",
"daemon",
"(",
"with",
"a",
"specifc",
"image",
")"
] |
def _daemon_action_redeploy(self,
name: str,
image: Optional[str] = None) -> HandleCommandResult:
"""Redeploy a daemon (with a specifc image)"""
if '.' not in name:
raise OrchestratorError('%s is not a valid daemon name' % name)
completion = self.daemon_action("redeploy", name, image=image)
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
|
[
"def",
"_daemon_action_redeploy",
"(",
"self",
",",
"name",
":",
"str",
",",
"image",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"HandleCommandResult",
":",
"if",
"'.'",
"not",
"in",
"name",
":",
"raise",
"OrchestratorError",
"(",
"'%s is not a valid daemon name'",
"%",
"name",
")",
"completion",
"=",
"self",
".",
"daemon_action",
"(",
"\"redeploy\"",
",",
"name",
",",
"image",
"=",
"image",
")",
"raise_if_exception",
"(",
"completion",
")",
"return",
"HandleCommandResult",
"(",
"stdout",
"=",
"completion",
".",
"result_str",
"(",
")",
")"
] |
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/orchestrator/module.py#L966-L974
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/pandas/py2/pandas/io/pytables.py
|
python
|
DataCol.set_metadata
|
(self, metadata)
|
record the metadata
|
record the metadata
|
[
"record",
"the",
"metadata"
] |
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
|
[
"def",
"set_metadata",
"(",
"self",
",",
"metadata",
")",
":",
"if",
"metadata",
"is",
"not",
"None",
":",
"metadata",
"=",
"np",
".",
"array",
"(",
"metadata",
",",
"copy",
"=",
"False",
")",
".",
"ravel",
"(",
")",
"self",
".",
"metadata",
"=",
"metadata"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/io/pytables.py#L1917-L1921
|
||
ChromiumWebApps/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
tools/symsrc/pefile.py
|
python
|
PE.set_word_at_offset
|
(self, offset, word)
|
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
|
Set the word value at the given file offset.
|
Set the word value at the given file offset.
|
[
"Set",
"the",
"word",
"value",
"at",
"the",
"given",
"file",
"offset",
"."
] |
def set_word_at_offset(self, offset, word):
"""Set the word value at the given file offset."""
return self.set_bytes_at_offset(offset, self.get_data_from_word(word))
|
[
"def",
"set_word_at_offset",
"(",
"self",
",",
"offset",
",",
"word",
")",
":",
"return",
"self",
".",
"set_bytes_at_offset",
"(",
"offset",
",",
"self",
".",
"get_data_from_word",
"(",
"word",
")",
")"
] |
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/symsrc/pefile.py#L3497-L3499
|
|
thalium/icebox
|
99d147d5b9269222225443ce171b4fd46d8985d4
|
third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py
|
python
|
cleanupCharEncodingHandlers
|
()
|
Cleanup the memory allocated for the char encoding support,
it unregisters all the encoding handlers and the aliases.
|
Cleanup the memory allocated for the char encoding support,
it unregisters all the encoding handlers and the aliases.
|
[
"Cleanup",
"the",
"memory",
"allocated",
"for",
"the",
"char",
"encoding",
"support",
"it",
"unregisters",
"all",
"the",
"encoding",
"handlers",
"and",
"the",
"aliases",
"."
] |
def cleanupCharEncodingHandlers():
"""Cleanup the memory allocated for the char encoding support,
it unregisters all the encoding handlers and the aliases. """
libxml2mod.xmlCleanupCharEncodingHandlers()
|
[
"def",
"cleanupCharEncodingHandlers",
"(",
")",
":",
"libxml2mod",
".",
"xmlCleanupCharEncodingHandlers",
"(",
")"
] |
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L1116-L1119
|
||
Project-OSRM/osrm-backend
|
f2e284623e25b5570dd2a5e6985abcb3790fd348
|
third_party/flatbuffers/conanfile.py
|
python
|
FlatbuffersConan.build
|
(self)
|
Configure, build and install FlatBuffers using CMake.
|
Configure, build and install FlatBuffers using CMake.
|
[
"Configure",
"build",
"and",
"install",
"FlatBuffers",
"using",
"CMake",
"."
] |
def build(self):
"""Configure, build and install FlatBuffers using CMake.
"""
cmake = self.configure_cmake()
cmake.build()
|
[
"def",
"build",
"(",
"self",
")",
":",
"cmake",
"=",
"self",
".",
"configure_cmake",
"(",
")",
"cmake",
".",
"build",
"(",
")"
] |
https://github.com/Project-OSRM/osrm-backend/blob/f2e284623e25b5570dd2a5e6985abcb3790fd348/third_party/flatbuffers/conanfile.py#L48-L52
|
||
y123456yz/reading-and-annotate-mongodb-3.6
|
93280293672ca7586dc24af18132aa61e4ed7fcf
|
mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/FS.py
|
python
|
FileBuildInfo.prepare_dependencies
|
(self)
|
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
|
Prepares a FileBuildInfo object for explaining what changed
|
[
"Prepares",
"a",
"FileBuildInfo",
"object",
"for",
"explaining",
"what",
"changed"
] |
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
if strings is None or nodeinfos is None:
continue
nodes = []
for s, ni in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes)
|
[
"def",
"prepare_dependencies",
"(",
"self",
")",
":",
"attrs",
"=",
"[",
"(",
"'bsources'",
",",
"'bsourcesigs'",
")",
",",
"(",
"'bdepends'",
",",
"'bdependsigs'",
")",
",",
"(",
"'bimplicit'",
",",
"'bimplicitsigs'",
")",
",",
"]",
"for",
"(",
"nattr",
",",
"sattr",
")",
"in",
"attrs",
":",
"try",
":",
"strings",
"=",
"getattr",
"(",
"self",
",",
"nattr",
")",
"nodeinfos",
"=",
"getattr",
"(",
"self",
",",
"sattr",
")",
"except",
"AttributeError",
":",
"continue",
"if",
"strings",
"is",
"None",
"or",
"nodeinfos",
"is",
"None",
":",
"continue",
"nodes",
"=",
"[",
"]",
"for",
"s",
",",
"ni",
"in",
"zip",
"(",
"strings",
",",
"nodeinfos",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"SCons",
".",
"Node",
".",
"Node",
")",
":",
"s",
"=",
"ni",
".",
"str_to_node",
"(",
"s",
")",
"nodes",
".",
"append",
"(",
"s",
")",
"setattr",
"(",
"self",
",",
"nattr",
",",
"nodes",
")"
] |
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/FS.py#L2534-L2561
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/extern/pubsub.py
|
python
|
_getCallableName
|
(callable)
|
Get name for a callable, ie function, bound
method or callable instance
|
Get name for a callable, ie function, bound
method or callable instance
|
[
"Get",
"name",
"for",
"a",
"callable",
"ie",
"function",
"bound",
"method",
"or",
"callable",
"instance"
] |
def _getCallableName(callable):
"""Get name for a callable, ie function, bound
method or callable instance"""
if ismethod(callable):
return '%s.%s ' % (callable.im_self, callable.im_func.func_name)
elif isfunction(callable):
return '%s ' % callable.__name__
else:
return '%s ' % callable
|
[
"def",
"_getCallableName",
"(",
"callable",
")",
":",
"if",
"ismethod",
"(",
"callable",
")",
":",
"return",
"'%s.%s '",
"%",
"(",
"callable",
".",
"im_self",
",",
"callable",
".",
"im_func",
".",
"func_name",
")",
"elif",
"isfunction",
"(",
"callable",
")",
":",
"return",
"'%s '",
"%",
"callable",
".",
"__name__",
"else",
":",
"return",
"'%s '",
"%",
"callable"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/pubsub.py#L113-L121
|
||
keyboardio/Kaleidoscope
|
d59604e98b2439d108647f15be52984a6837d360
|
bin/cpplint.py
|
python
|
CheckMakePairUsesDeduction
|
(filename, clean_lines, linenum, error)
|
Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
|
Check that make_pair's template arguments are deduced.
|
[
"Check",
"that",
"make_pair",
"s",
"template",
"arguments",
"are",
"deduced",
"."
] |
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
|
[
"def",
"CheckMakePairUsesDeduction",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"match",
"=",
"_RE_PATTERN_EXPLICIT_MAKEPAIR",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/explicit_make_pair'",
",",
"4",
",",
"# 4 = high confidence",
"'For C++11-compatibility, omit template arguments from make_pair'",
"' OR use pair directly OR if appropriate, construct a pair directly'",
")"
] |
https://github.com/keyboardio/Kaleidoscope/blob/d59604e98b2439d108647f15be52984a6837d360/bin/cpplint.py#L5855-L5873
|
||
trilinos/Trilinos
|
6168be6dd51e35e1cd681e9c4b24433e709df140
|
packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/phactori.py
|
python
|
PhactoriImagesetBlock.WriteImagesPassedOnOffFilter
|
(self, datadescription)
|
write out the .png/.jpg/whatever images associated with this imageset
block for the current timestep/state. Must loop through camera angles
and do a write for each one if necessary
|
write out the .png/.jpg/whatever images associated with this imageset
block for the current timestep/state. Must loop through camera angles
and do a write for each one if necessary
|
[
"write",
"out",
"the",
".",
"png",
"/",
".",
"jpg",
"/",
"whatever",
"images",
"associated",
"with",
"this",
"imageset",
"block",
"for",
"the",
"current",
"timestep",
"/",
"state",
".",
"Must",
"loop",
"through",
"camera",
"angles",
"and",
"do",
"a",
"write",
"for",
"each",
"one",
"if",
"necessary"
] |
def WriteImagesPassedOnOffFilter(self, datadescription):
"""write out the .png/.jpg/whatever images associated with this imageset
block for the current timestep/state. Must loop through camera angles
and do a write for each one if necessary"""
global gPipeAndViewsState
if PhactoriDbg(100):
myDebugPrint3(
"PhactoriImagesetBlock::WriteImagesPassedOnOffFilter entered\n")
for ii in range(len(self.mLookDirectionList)):
oneLookDirection = self.mLookDirectionList[ii]
oneLookDirectionFilenameAddon = self.mLookDirectionFilenameAddon[ii]
fname, fnameRR = self.mImageFileNameCountSettings.GetImageFilename(
datadescription, self.mImageSettings,
oneLookDirectionFilenameAddon,
self.mRepresentation.mFilenameAddon,
self.mCamera.mFilenameAddon)
#used to do this:
#view.ViewTime = datadescription.GetTime()
#maybe need to do this?
#UpdatePipelineWithCurrentTimeArgument()
#SetUpViewAndRepresentationBeforeWriteImage(oneViewInfo)
self.SetUpViewAndRepresentationBeforeWriteImage(oneLookDirection, ii)
#only need to update color range for first look direction, rest
#are same
if ii == 0:
#UpdateColorRangeImmediatelyBeforeWrite(phactoriImagesetName)
if self.mRepresentation.mUseFixedColorRange == False:
UseDataRangeForColorValues(self.mPvDataRepresentation2,
self.mRepresentation, self.mOperation)
for ii in range(1, len(self.mVisibleReps)):
oneVisOp = self.mVisibleOps[ii]
oneVisRep = self.mVisibleReps[ii]
oneVisPvDataRep = self.mVisiblePvDataReps[ii]
UseDataRangeForColorValues(oneVisPvDataRep,
oneVisRep, oneVisOp)
if self.mName.startswith("is_element_select") == False:
for onevisop in self.mVisibleOps:
if onevisop.mType == "nearestpoints":
onevisop.mOperationSpecifics.\
RunCalculationToFindNearestPoints(gPipeAndViewsState)
if onevisop.mType == "castnormalrays":
onevisop.mOperationSpecifics.\
RunCalculationToCastRays(gPipeAndViewsState)
UpdatePipelineWithCurrentTimeArgument(onevisop.mParaViewFilter)
if onevisop.mName == "surfaceofinterest1":
svrng = onevisop.mParaViewFilter.ThresholdRange
#onevisop.mParaViewFilter.ThresholdRange = [svrng[0]*0.5, svrng[1]*0.5]
onevisop.mParaViewFilter.ThresholdRange = [1.0, 10.0]
UpdatePipelineWithCurrentTimeArgument(onevisop.mParaViewFilter)
onevisop.mParaViewFilter.ThresholdRange = svrng
UpdatePipelineWithCurrentTimeArgument(onevisop.mParaViewFilter)
if PhactoriDbg():
firstop = self.mVisibleOps[0]
firstPvFilter = firstop.GetPvFilter()
label1 = "before WriteImage() " + fname + "\n"
DebugPrintCellAndPointArrayInfo(label1, firstPvFilter, 100)
myDebugPrint3("self.mPvDataRepresentation2.Representation:\n" + \
str(self.mPvDataRepresentation2.Representation) + "\n")
if PhactoriDbg():
myDebugPrint3("calling WriteImage() " + fname + "\n")
global gCameraTestMode
if gCameraTestMode == 1:
self.WriteOutCameraInformationForTesting(fname)
global gSkipWriteImageForTests
if gSkipWriteImageForTests == False:
WriteImage(fname, self.mSharedPvRenderView2,
Magnification=1)
GetGlobalDataArtifactTracker().AddImageToDataArtifactOutputList(fname)
if PhactoriDbg():
myDebugPrint3("returned from WriteImage()\n")
#handle datetime naming extra work to avoid race condition
if fnameRR != None:
if SmartGetLocalProcessId() == 0:
import os
os.rename(fname, fnameRR)
#hack, double write
#WriteImage(fname, self.mSharedPvRenderView2,
# Magnification=1)
#ClearPvViewAndPvRepAfterWriteImage(oneViewInfo)
self.ClearPvViewAndPvRepAfterWriteImage()
if self.mWriteFirstImageTwiceFlag == 0:
self.mWriteFirstImageTwiceFlag = 1
if PhactoriDbg(100):
myDebugPrint3(
"mWriteFirstImageTwiceFlag triggers (3) re-render of first image\n")
self.WriteImagesPassedOnOffFilter(datadescription)
if self.mName.startswith("is_dead_cells"):
timestep = datadescription.GetTimeStep()
imageBasename = self.mImageSettings.mImageBasename
imageBasedirectory = self.mImageSettings.mImageBasedirectory
import os
#dcfname = + str(timestep) + "_.csv"
lpid = SmartGetLocalProcessId()
#dcfname = imageBasedirectory + os.sep + imageBasename + \
# str(timestep) + "." + str(lpid) + "._.csv"
dcfname = imageBasedirectory + os.sep + imageBasename + \
str(timestep) + "." + str(lpid) + ".csv"
if PhactoriDbg(100):
myDebugPrint3("is_dead_cells: writing:\n" + dcfname + "\n")
rcrsnParams = self.mOperation.OutputElementListToFile(dcfname)
if PhactoriDbg(100):
myDebugPrint3("is_dead_cells: done writing:\n" + dcfname + "\n")
#write out dead cell count
global WriteDeadCellSummaryFile
if WriteDeadCellSummaryFile:
myVals = [rcrsnParams.mElementCount,
rcrsnParams.mKilledByCriteriaCount[2],
rcrsnParams.mKilledByCriteriaCount[3],
rcrsnParams.mKilledByCriteriaCount[5]]
UseReduceToSumArrayOfInts(myVals)
if lpid == 0:
if self.DeadCellIoFf == None:
dcsummaryname = imageBasedirectory + os.sep + imageBasename + \
"dead_cell_info.csv"
self.DeadCellIoFf = open(dcsummaryname, "w")
self.DeadCellIoFf.write("step, simtime, number of dead cells, " \
"killed 2, killed 3, killed 5\n")
timestep = datadescription.GetTimeStep()
simTime = gPipeAndViewsState.CurrentDatadescription.GetTime()
self.DeadCellIoFf.write(
str(timestep) + ", " + \
str(simTime) + "," + \
str(myVals[0]) + ", " + \
str(myVals[1]) + ", " + \
str(myVals[2]) + ", " + \
str(myVals[3]) + "\n")
self.DeadCellIoFf.flush()
if self.mName.startswith("is_element_select"):
timestep = datadescription.GetTimeStep()
#global gPipeAndViewsState
simTime = gPipeAndViewsState.CurrentDatadescription.GetTime()
imageBasename = self.mImageSettings.mImageBasename
imageBasedirectory = self.mImageSettings.mImageBasedirectory
import os
#dcfname = imageBasedirectory + os.sep + imageBasename + ".csv"
dcfname1 = imageBasedirectory + os.sep + imageBasename + "element.csv"
dcfname2 = imageBasedirectory + os.sep + imageBasename + "node.csv"
self.mOperation.OutputSingleElementToTimeHistoryFile(
dcfname1, dcfname2, timestep, simTime)
if PhactoriDbg(100):
myDebugPrint3(
"PhactoriImagesetBlock::WriteImagesPassedOnOffFilter returning\n")
|
[
"def",
"WriteImagesPassedOnOffFilter",
"(",
"self",
",",
"datadescription",
")",
":",
"global",
"gPipeAndViewsState",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"\"PhactoriImagesetBlock::WriteImagesPassedOnOffFilter entered\\n\"",
")",
"for",
"ii",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"mLookDirectionList",
")",
")",
":",
"oneLookDirection",
"=",
"self",
".",
"mLookDirectionList",
"[",
"ii",
"]",
"oneLookDirectionFilenameAddon",
"=",
"self",
".",
"mLookDirectionFilenameAddon",
"[",
"ii",
"]",
"fname",
",",
"fnameRR",
"=",
"self",
".",
"mImageFileNameCountSettings",
".",
"GetImageFilename",
"(",
"datadescription",
",",
"self",
".",
"mImageSettings",
",",
"oneLookDirectionFilenameAddon",
",",
"self",
".",
"mRepresentation",
".",
"mFilenameAddon",
",",
"self",
".",
"mCamera",
".",
"mFilenameAddon",
")",
"#used to do this:",
"#view.ViewTime = datadescription.GetTime()",
"#maybe need to do this?",
"#UpdatePipelineWithCurrentTimeArgument()",
"#SetUpViewAndRepresentationBeforeWriteImage(oneViewInfo)",
"self",
".",
"SetUpViewAndRepresentationBeforeWriteImage",
"(",
"oneLookDirection",
",",
"ii",
")",
"#only need to update color range for first look direction, rest",
"#are same",
"if",
"ii",
"==",
"0",
":",
"#UpdateColorRangeImmediatelyBeforeWrite(phactoriImagesetName)",
"if",
"self",
".",
"mRepresentation",
".",
"mUseFixedColorRange",
"==",
"False",
":",
"UseDataRangeForColorValues",
"(",
"self",
".",
"mPvDataRepresentation2",
",",
"self",
".",
"mRepresentation",
",",
"self",
".",
"mOperation",
")",
"for",
"ii",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"mVisibleReps",
")",
")",
":",
"oneVisOp",
"=",
"self",
".",
"mVisibleOps",
"[",
"ii",
"]",
"oneVisRep",
"=",
"self",
".",
"mVisibleReps",
"[",
"ii",
"]",
"oneVisPvDataRep",
"=",
"self",
".",
"mVisiblePvDataReps",
"[",
"ii",
"]",
"UseDataRangeForColorValues",
"(",
"oneVisPvDataRep",
",",
"oneVisRep",
",",
"oneVisOp",
")",
"if",
"self",
".",
"mName",
".",
"startswith",
"(",
"\"is_element_select\"",
")",
"==",
"False",
":",
"for",
"onevisop",
"in",
"self",
".",
"mVisibleOps",
":",
"if",
"onevisop",
".",
"mType",
"==",
"\"nearestpoints\"",
":",
"onevisop",
".",
"mOperationSpecifics",
".",
"RunCalculationToFindNearestPoints",
"(",
"gPipeAndViewsState",
")",
"if",
"onevisop",
".",
"mType",
"==",
"\"castnormalrays\"",
":",
"onevisop",
".",
"mOperationSpecifics",
".",
"RunCalculationToCastRays",
"(",
"gPipeAndViewsState",
")",
"UpdatePipelineWithCurrentTimeArgument",
"(",
"onevisop",
".",
"mParaViewFilter",
")",
"if",
"onevisop",
".",
"mName",
"==",
"\"surfaceofinterest1\"",
":",
"svrng",
"=",
"onevisop",
".",
"mParaViewFilter",
".",
"ThresholdRange",
"#onevisop.mParaViewFilter.ThresholdRange = [svrng[0]*0.5, svrng[1]*0.5]",
"onevisop",
".",
"mParaViewFilter",
".",
"ThresholdRange",
"=",
"[",
"1.0",
",",
"10.0",
"]",
"UpdatePipelineWithCurrentTimeArgument",
"(",
"onevisop",
".",
"mParaViewFilter",
")",
"onevisop",
".",
"mParaViewFilter",
".",
"ThresholdRange",
"=",
"svrng",
"UpdatePipelineWithCurrentTimeArgument",
"(",
"onevisop",
".",
"mParaViewFilter",
")",
"if",
"PhactoriDbg",
"(",
")",
":",
"firstop",
"=",
"self",
".",
"mVisibleOps",
"[",
"0",
"]",
"firstPvFilter",
"=",
"firstop",
".",
"GetPvFilter",
"(",
")",
"label1",
"=",
"\"before WriteImage() \"",
"+",
"fname",
"+",
"\"\\n\"",
"DebugPrintCellAndPointArrayInfo",
"(",
"label1",
",",
"firstPvFilter",
",",
"100",
")",
"myDebugPrint3",
"(",
"\"self.mPvDataRepresentation2.Representation:\\n\"",
"+",
"str",
"(",
"self",
".",
"mPvDataRepresentation2",
".",
"Representation",
")",
"+",
"\"\\n\"",
")",
"if",
"PhactoriDbg",
"(",
")",
":",
"myDebugPrint3",
"(",
"\"calling WriteImage() \"",
"+",
"fname",
"+",
"\"\\n\"",
")",
"global",
"gCameraTestMode",
"if",
"gCameraTestMode",
"==",
"1",
":",
"self",
".",
"WriteOutCameraInformationForTesting",
"(",
"fname",
")",
"global",
"gSkipWriteImageForTests",
"if",
"gSkipWriteImageForTests",
"==",
"False",
":",
"WriteImage",
"(",
"fname",
",",
"self",
".",
"mSharedPvRenderView2",
",",
"Magnification",
"=",
"1",
")",
"GetGlobalDataArtifactTracker",
"(",
")",
".",
"AddImageToDataArtifactOutputList",
"(",
"fname",
")",
"if",
"PhactoriDbg",
"(",
")",
":",
"myDebugPrint3",
"(",
"\"returned from WriteImage()\\n\"",
")",
"#handle datetime naming extra work to avoid race condition",
"if",
"fnameRR",
"!=",
"None",
":",
"if",
"SmartGetLocalProcessId",
"(",
")",
"==",
"0",
":",
"import",
"os",
"os",
".",
"rename",
"(",
"fname",
",",
"fnameRR",
")",
"#hack, double write",
"#WriteImage(fname, self.mSharedPvRenderView2,",
"# Magnification=1)",
"#ClearPvViewAndPvRepAfterWriteImage(oneViewInfo)",
"self",
".",
"ClearPvViewAndPvRepAfterWriteImage",
"(",
")",
"if",
"self",
".",
"mWriteFirstImageTwiceFlag",
"==",
"0",
":",
"self",
".",
"mWriteFirstImageTwiceFlag",
"=",
"1",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"\"mWriteFirstImageTwiceFlag triggers (3) re-render of first image\\n\"",
")",
"self",
".",
"WriteImagesPassedOnOffFilter",
"(",
"datadescription",
")",
"if",
"self",
".",
"mName",
".",
"startswith",
"(",
"\"is_dead_cells\"",
")",
":",
"timestep",
"=",
"datadescription",
".",
"GetTimeStep",
"(",
")",
"imageBasename",
"=",
"self",
".",
"mImageSettings",
".",
"mImageBasename",
"imageBasedirectory",
"=",
"self",
".",
"mImageSettings",
".",
"mImageBasedirectory",
"import",
"os",
"#dcfname = + str(timestep) + \"_.csv\"",
"lpid",
"=",
"SmartGetLocalProcessId",
"(",
")",
"#dcfname = imageBasedirectory + os.sep + imageBasename + \\",
"# str(timestep) + \".\" + str(lpid) + \"._.csv\"",
"dcfname",
"=",
"imageBasedirectory",
"+",
"os",
".",
"sep",
"+",
"imageBasename",
"+",
"str",
"(",
"timestep",
")",
"+",
"\".\"",
"+",
"str",
"(",
"lpid",
")",
"+",
"\".csv\"",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"\"is_dead_cells: writing:\\n\"",
"+",
"dcfname",
"+",
"\"\\n\"",
")",
"rcrsnParams",
"=",
"self",
".",
"mOperation",
".",
"OutputElementListToFile",
"(",
"dcfname",
")",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"\"is_dead_cells: done writing:\\n\"",
"+",
"dcfname",
"+",
"\"\\n\"",
")",
"#write out dead cell count",
"global",
"WriteDeadCellSummaryFile",
"if",
"WriteDeadCellSummaryFile",
":",
"myVals",
"=",
"[",
"rcrsnParams",
".",
"mElementCount",
",",
"rcrsnParams",
".",
"mKilledByCriteriaCount",
"[",
"2",
"]",
",",
"rcrsnParams",
".",
"mKilledByCriteriaCount",
"[",
"3",
"]",
",",
"rcrsnParams",
".",
"mKilledByCriteriaCount",
"[",
"5",
"]",
"]",
"UseReduceToSumArrayOfInts",
"(",
"myVals",
")",
"if",
"lpid",
"==",
"0",
":",
"if",
"self",
".",
"DeadCellIoFf",
"==",
"None",
":",
"dcsummaryname",
"=",
"imageBasedirectory",
"+",
"os",
".",
"sep",
"+",
"imageBasename",
"+",
"\"dead_cell_info.csv\"",
"self",
".",
"DeadCellIoFf",
"=",
"open",
"(",
"dcsummaryname",
",",
"\"w\"",
")",
"self",
".",
"DeadCellIoFf",
".",
"write",
"(",
"\"step, simtime, number of dead cells, \"",
"\"killed 2, killed 3, killed 5\\n\"",
")",
"timestep",
"=",
"datadescription",
".",
"GetTimeStep",
"(",
")",
"simTime",
"=",
"gPipeAndViewsState",
".",
"CurrentDatadescription",
".",
"GetTime",
"(",
")",
"self",
".",
"DeadCellIoFf",
".",
"write",
"(",
"str",
"(",
"timestep",
")",
"+",
"\", \"",
"+",
"str",
"(",
"simTime",
")",
"+",
"\",\"",
"+",
"str",
"(",
"myVals",
"[",
"0",
"]",
")",
"+",
"\", \"",
"+",
"str",
"(",
"myVals",
"[",
"1",
"]",
")",
"+",
"\", \"",
"+",
"str",
"(",
"myVals",
"[",
"2",
"]",
")",
"+",
"\", \"",
"+",
"str",
"(",
"myVals",
"[",
"3",
"]",
")",
"+",
"\"\\n\"",
")",
"self",
".",
"DeadCellIoFf",
".",
"flush",
"(",
")",
"if",
"self",
".",
"mName",
".",
"startswith",
"(",
"\"is_element_select\"",
")",
":",
"timestep",
"=",
"datadescription",
".",
"GetTimeStep",
"(",
")",
"#global gPipeAndViewsState",
"simTime",
"=",
"gPipeAndViewsState",
".",
"CurrentDatadescription",
".",
"GetTime",
"(",
")",
"imageBasename",
"=",
"self",
".",
"mImageSettings",
".",
"mImageBasename",
"imageBasedirectory",
"=",
"self",
".",
"mImageSettings",
".",
"mImageBasedirectory",
"import",
"os",
"#dcfname = imageBasedirectory + os.sep + imageBasename + \".csv\"",
"dcfname1",
"=",
"imageBasedirectory",
"+",
"os",
".",
"sep",
"+",
"imageBasename",
"+",
"\"element.csv\"",
"dcfname2",
"=",
"imageBasedirectory",
"+",
"os",
".",
"sep",
"+",
"imageBasename",
"+",
"\"node.csv\"",
"self",
".",
"mOperation",
".",
"OutputSingleElementToTimeHistoryFile",
"(",
"dcfname1",
",",
"dcfname2",
",",
"timestep",
",",
"simTime",
")",
"if",
"PhactoriDbg",
"(",
"100",
")",
":",
"myDebugPrint3",
"(",
"\"PhactoriImagesetBlock::WriteImagesPassedOnOffFilter returning\\n\"",
")"
] |
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/phactori.py#L9527-L9690
|
||
nasa/astrobee
|
9241e67e6692810d6e275abb3165b6d02f4ca5ef
|
scripts/git/cpplint.py
|
python
|
FileInfo.Split
|
(self)
|
return (project,) + os.path.splitext(rest)
|
Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
|
Splits the file into the directory, basename, and extension.
|
[
"Splits",
"the",
"file",
"into",
"the",
"directory",
"basename",
"and",
"extension",
"."
] |
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
|
[
"def",
"Split",
"(",
"self",
")",
":",
"googlename",
"=",
"self",
".",
"RepositoryName",
"(",
")",
"project",
",",
"rest",
"=",
"os",
".",
"path",
".",
"split",
"(",
"googlename",
")",
"return",
"(",
"project",
",",
")",
"+",
"os",
".",
"path",
".",
"splitext",
"(",
"rest",
")"
] |
https://github.com/nasa/astrobee/blob/9241e67e6692810d6e275abb3165b6d02f4ca5ef/scripts/git/cpplint.py#L1078-L1090
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scikit-learn/py3/sklearn/linear_model/_logistic.py
|
python
|
LogisticRegression.fit
|
(self, X, y, sample_weight=None)
|
return self
|
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self
Fitted estimator.
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
|
Fit the model according to the given training data.
|
[
"Fit",
"the",
"model",
"according",
"to",
"the",
"given",
"training",
"data",
"."
] |
def fit(self, X, y, sample_weight=None):
"""
Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,) default=None
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self
Fitted estimator.
Notes
-----
The SAGA solver supports both float64 and float32 bit arrays.
"""
solver = _check_solver(self.solver, self.penalty, self.dual)
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if self.penalty == 'elasticnet':
if (not isinstance(self.l1_ratio, numbers.Number) or
self.l1_ratio < 0 or self.l1_ratio > 1):
raise ValueError("l1_ratio must be between 0 and 1;"
" got (l1_ratio=%r)" % self.l1_ratio)
elif self.l1_ratio is not None:
warnings.warn("l1_ratio parameter is only used when penalty is "
"'elasticnet'. Got "
"(penalty={})".format(self.penalty))
if self.penalty == 'none':
if self.C != 1.0: # default values
warnings.warn(
"Setting penalty='none' will ignore the C and l1_ratio "
"parameters"
)
# Note that check for l1_ratio is done right above
C_ = np.inf
penalty = 'l2'
else:
C_ = self.C
penalty = self.penalty
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
if solver == 'lbfgs':
_dtype = np.float64
else:
_dtype = [np.float64, np.float32]
X, y = check_X_y(X, y, accept_sparse='csr', dtype=_dtype, order="C",
accept_large_sparse=solver != 'liblinear')
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
multi_class = _check_multi_class(self.multi_class, solver,
len(self.classes_))
if solver == 'liblinear':
if effective_n_jobs(self.n_jobs) != 1:
warnings.warn("'n_jobs' > 1 does not have any effect when"
" 'solver' is set to 'liblinear'. Got 'n_jobs'"
" = {}.".format(effective_n_jobs(self.n_jobs)))
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if solver in ['sag', 'saga']:
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
if solver in ['sag', 'saga']:
prefer = 'threads'
else:
prefer = 'processes'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
**_joblib_parallel_args(prefer=prefer))(
path_func(X, y, pos_class=class_, Cs=[C_],
l1_ratio=self.l1_ratio, fit_intercept=self.fit_intercept,
tol=self.tol, verbose=self.verbose, solver=solver,
multi_class=multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
penalty=penalty, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"sample_weight",
"=",
"None",
")",
":",
"solver",
"=",
"_check_solver",
"(",
"self",
".",
"solver",
",",
"self",
".",
"penalty",
",",
"self",
".",
"dual",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"C",
",",
"numbers",
".",
"Number",
")",
"or",
"self",
".",
"C",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Penalty term must be positive; got (C=%r)\"",
"%",
"self",
".",
"C",
")",
"if",
"self",
".",
"penalty",
"==",
"'elasticnet'",
":",
"if",
"(",
"not",
"isinstance",
"(",
"self",
".",
"l1_ratio",
",",
"numbers",
".",
"Number",
")",
"or",
"self",
".",
"l1_ratio",
"<",
"0",
"or",
"self",
".",
"l1_ratio",
">",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"l1_ratio must be between 0 and 1;\"",
"\" got (l1_ratio=%r)\"",
"%",
"self",
".",
"l1_ratio",
")",
"elif",
"self",
".",
"l1_ratio",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"l1_ratio parameter is only used when penalty is \"",
"\"'elasticnet'. Got \"",
"\"(penalty={})\"",
".",
"format",
"(",
"self",
".",
"penalty",
")",
")",
"if",
"self",
".",
"penalty",
"==",
"'none'",
":",
"if",
"self",
".",
"C",
"!=",
"1.0",
":",
"# default values",
"warnings",
".",
"warn",
"(",
"\"Setting penalty='none' will ignore the C and l1_ratio \"",
"\"parameters\"",
")",
"# Note that check for l1_ratio is done right above",
"C_",
"=",
"np",
".",
"inf",
"penalty",
"=",
"'l2'",
"else",
":",
"C_",
"=",
"self",
".",
"C",
"penalty",
"=",
"self",
".",
"penalty",
"if",
"not",
"isinstance",
"(",
"self",
".",
"max_iter",
",",
"numbers",
".",
"Number",
")",
"or",
"self",
".",
"max_iter",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Maximum number of iteration must be positive;\"",
"\" got (max_iter=%r)\"",
"%",
"self",
".",
"max_iter",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"tol",
",",
"numbers",
".",
"Number",
")",
"or",
"self",
".",
"tol",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Tolerance for stopping criteria must be \"",
"\"positive; got (tol=%r)\"",
"%",
"self",
".",
"tol",
")",
"if",
"solver",
"==",
"'lbfgs'",
":",
"_dtype",
"=",
"np",
".",
"float64",
"else",
":",
"_dtype",
"=",
"[",
"np",
".",
"float64",
",",
"np",
".",
"float32",
"]",
"X",
",",
"y",
"=",
"check_X_y",
"(",
"X",
",",
"y",
",",
"accept_sparse",
"=",
"'csr'",
",",
"dtype",
"=",
"_dtype",
",",
"order",
"=",
"\"C\"",
",",
"accept_large_sparse",
"=",
"solver",
"!=",
"'liblinear'",
")",
"check_classification_targets",
"(",
"y",
")",
"self",
".",
"classes_",
"=",
"np",
".",
"unique",
"(",
"y",
")",
"n_samples",
",",
"n_features",
"=",
"X",
".",
"shape",
"multi_class",
"=",
"_check_multi_class",
"(",
"self",
".",
"multi_class",
",",
"solver",
",",
"len",
"(",
"self",
".",
"classes_",
")",
")",
"if",
"solver",
"==",
"'liblinear'",
":",
"if",
"effective_n_jobs",
"(",
"self",
".",
"n_jobs",
")",
"!=",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"'n_jobs' > 1 does not have any effect when\"",
"\" 'solver' is set to 'liblinear'. Got 'n_jobs'\"",
"\" = {}.\"",
".",
"format",
"(",
"effective_n_jobs",
"(",
"self",
".",
"n_jobs",
")",
")",
")",
"self",
".",
"coef_",
",",
"self",
".",
"intercept_",
",",
"n_iter_",
"=",
"_fit_liblinear",
"(",
"X",
",",
"y",
",",
"self",
".",
"C",
",",
"self",
".",
"fit_intercept",
",",
"self",
".",
"intercept_scaling",
",",
"self",
".",
"class_weight",
",",
"self",
".",
"penalty",
",",
"self",
".",
"dual",
",",
"self",
".",
"verbose",
",",
"self",
".",
"max_iter",
",",
"self",
".",
"tol",
",",
"self",
".",
"random_state",
",",
"sample_weight",
"=",
"sample_weight",
")",
"self",
".",
"n_iter_",
"=",
"np",
".",
"array",
"(",
"[",
"n_iter_",
"]",
")",
"return",
"self",
"if",
"solver",
"in",
"[",
"'sag'",
",",
"'saga'",
"]",
":",
"max_squared_sum",
"=",
"row_norms",
"(",
"X",
",",
"squared",
"=",
"True",
")",
".",
"max",
"(",
")",
"else",
":",
"max_squared_sum",
"=",
"None",
"n_classes",
"=",
"len",
"(",
"self",
".",
"classes_",
")",
"classes_",
"=",
"self",
".",
"classes_",
"if",
"n_classes",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"This solver needs samples of at least 2 classes\"",
"\" in the data, but the data contains only one\"",
"\" class: %r\"",
"%",
"classes_",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"self",
".",
"classes_",
")",
"==",
"2",
":",
"n_classes",
"=",
"1",
"classes_",
"=",
"classes_",
"[",
"1",
":",
"]",
"if",
"self",
".",
"warm_start",
":",
"warm_start_coef",
"=",
"getattr",
"(",
"self",
",",
"'coef_'",
",",
"None",
")",
"else",
":",
"warm_start_coef",
"=",
"None",
"if",
"warm_start_coef",
"is",
"not",
"None",
"and",
"self",
".",
"fit_intercept",
":",
"warm_start_coef",
"=",
"np",
".",
"append",
"(",
"warm_start_coef",
",",
"self",
".",
"intercept_",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
",",
"axis",
"=",
"1",
")",
"self",
".",
"coef_",
"=",
"list",
"(",
")",
"self",
".",
"intercept_",
"=",
"np",
".",
"zeros",
"(",
"n_classes",
")",
"# Hack so that we iterate only once for the multinomial case.",
"if",
"multi_class",
"==",
"'multinomial'",
":",
"classes_",
"=",
"[",
"None",
"]",
"warm_start_coef",
"=",
"[",
"warm_start_coef",
"]",
"if",
"warm_start_coef",
"is",
"None",
":",
"warm_start_coef",
"=",
"[",
"None",
"]",
"*",
"n_classes",
"path_func",
"=",
"delayed",
"(",
"_logistic_regression_path",
")",
"# The SAG solver releases the GIL so it's more efficient to use",
"# threads for this solver.",
"if",
"solver",
"in",
"[",
"'sag'",
",",
"'saga'",
"]",
":",
"prefer",
"=",
"'threads'",
"else",
":",
"prefer",
"=",
"'processes'",
"fold_coefs_",
"=",
"Parallel",
"(",
"n_jobs",
"=",
"self",
".",
"n_jobs",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"*",
"*",
"_joblib_parallel_args",
"(",
"prefer",
"=",
"prefer",
")",
")",
"(",
"path_func",
"(",
"X",
",",
"y",
",",
"pos_class",
"=",
"class_",
",",
"Cs",
"=",
"[",
"C_",
"]",
",",
"l1_ratio",
"=",
"self",
".",
"l1_ratio",
",",
"fit_intercept",
"=",
"self",
".",
"fit_intercept",
",",
"tol",
"=",
"self",
".",
"tol",
",",
"verbose",
"=",
"self",
".",
"verbose",
",",
"solver",
"=",
"solver",
",",
"multi_class",
"=",
"multi_class",
",",
"max_iter",
"=",
"self",
".",
"max_iter",
",",
"class_weight",
"=",
"self",
".",
"class_weight",
",",
"check_input",
"=",
"False",
",",
"random_state",
"=",
"self",
".",
"random_state",
",",
"coef",
"=",
"warm_start_coef_",
",",
"penalty",
"=",
"penalty",
",",
"max_squared_sum",
"=",
"max_squared_sum",
",",
"sample_weight",
"=",
"sample_weight",
")",
"for",
"class_",
",",
"warm_start_coef_",
"in",
"zip",
"(",
"classes_",
",",
"warm_start_coef",
")",
")",
"fold_coefs_",
",",
"_",
",",
"n_iter_",
"=",
"zip",
"(",
"*",
"fold_coefs_",
")",
"self",
".",
"n_iter_",
"=",
"np",
".",
"asarray",
"(",
"n_iter_",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"[",
":",
",",
"0",
"]",
"if",
"multi_class",
"==",
"'multinomial'",
":",
"self",
".",
"coef_",
"=",
"fold_coefs_",
"[",
"0",
"]",
"[",
"0",
"]",
"else",
":",
"self",
".",
"coef_",
"=",
"np",
".",
"asarray",
"(",
"fold_coefs_",
")",
"self",
".",
"coef_",
"=",
"self",
".",
"coef_",
".",
"reshape",
"(",
"n_classes",
",",
"n_features",
"+",
"int",
"(",
"self",
".",
"fit_intercept",
")",
")",
"if",
"self",
".",
"fit_intercept",
":",
"self",
".",
"intercept_",
"=",
"self",
".",
"coef_",
"[",
":",
",",
"-",
"1",
"]",
"self",
".",
"coef_",
"=",
"self",
".",
"coef_",
"[",
":",
",",
":",
"-",
"1",
"]",
"return",
"self"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/linear_model/_logistic.py#L1459-L1617
|
|
aws/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
dev/Tools/Python/3.7.10/windows/Lib/pathlib.py
|
python
|
PurePath.relative_to
|
(self, *other)
|
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
|
Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
|
Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
|
[
"Return",
"the",
"relative",
"path",
"to",
"another",
"path",
"identified",
"by",
"the",
"passed",
"arguments",
".",
"If",
"the",
"operation",
"is",
"not",
"possible",
"(",
"because",
"this",
"is",
"not",
"a",
"subpath",
"of",
"the",
"other",
"path",
")",
"raise",
"ValueError",
"."
] |
def relative_to(self, *other):
"""Return the relative path to another path identified by the passed
arguments. If the operation is not possible (because this is not
a subpath of the other path), raise ValueError.
"""
# For the purpose of this method, drive and root are considered
# separate parts, i.e.:
# Path('c:/').relative_to('c:') gives Path('/')
# Path('c:/').relative_to('/') raise ValueError
if not other:
raise TypeError("need at least one argument")
parts = self._parts
drv = self._drv
root = self._root
if root:
abs_parts = [drv, root] + parts[1:]
else:
abs_parts = parts
to_drv, to_root, to_parts = self._parse_args(other)
if to_root:
to_abs_parts = [to_drv, to_root] + to_parts[1:]
else:
to_abs_parts = to_parts
n = len(to_abs_parts)
cf = self._flavour.casefold_parts
if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts):
formatted = self._format_parsed_parts(to_drv, to_root, to_parts)
raise ValueError("{!r} does not start with {!r}"
.format(str(self), str(formatted)))
return self._from_parsed_parts('', root if n == 1 else '',
abs_parts[n:])
|
[
"def",
"relative_to",
"(",
"self",
",",
"*",
"other",
")",
":",
"# For the purpose of this method, drive and root are considered",
"# separate parts, i.e.:",
"# Path('c:/').relative_to('c:') gives Path('/')",
"# Path('c:/').relative_to('/') raise ValueError",
"if",
"not",
"other",
":",
"raise",
"TypeError",
"(",
"\"need at least one argument\"",
")",
"parts",
"=",
"self",
".",
"_parts",
"drv",
"=",
"self",
".",
"_drv",
"root",
"=",
"self",
".",
"_root",
"if",
"root",
":",
"abs_parts",
"=",
"[",
"drv",
",",
"root",
"]",
"+",
"parts",
"[",
"1",
":",
"]",
"else",
":",
"abs_parts",
"=",
"parts",
"to_drv",
",",
"to_root",
",",
"to_parts",
"=",
"self",
".",
"_parse_args",
"(",
"other",
")",
"if",
"to_root",
":",
"to_abs_parts",
"=",
"[",
"to_drv",
",",
"to_root",
"]",
"+",
"to_parts",
"[",
"1",
":",
"]",
"else",
":",
"to_abs_parts",
"=",
"to_parts",
"n",
"=",
"len",
"(",
"to_abs_parts",
")",
"cf",
"=",
"self",
".",
"_flavour",
".",
"casefold_parts",
"if",
"(",
"root",
"or",
"drv",
")",
"if",
"n",
"==",
"0",
"else",
"cf",
"(",
"abs_parts",
"[",
":",
"n",
"]",
")",
"!=",
"cf",
"(",
"to_abs_parts",
")",
":",
"formatted",
"=",
"self",
".",
"_format_parsed_parts",
"(",
"to_drv",
",",
"to_root",
",",
"to_parts",
")",
"raise",
"ValueError",
"(",
"\"{!r} does not start with {!r}\"",
".",
"format",
"(",
"str",
"(",
"self",
")",
",",
"str",
"(",
"formatted",
")",
")",
")",
"return",
"self",
".",
"_from_parsed_parts",
"(",
"''",
",",
"root",
"if",
"n",
"==",
"1",
"else",
"''",
",",
"abs_parts",
"[",
"n",
":",
"]",
")"
] |
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/pathlib.py#L872-L902
|
|
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/python/ops/control_flow_ops.py
|
python
|
ControlFlowState.ZerosLikeForExit
|
(self, val)
|
return result
|
Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
|
Create zeros_like gradient for a loop exit.
|
[
"Create",
"zeros_like",
"gradient",
"for",
"a",
"loop",
"exit",
"."
] |
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
# Only the shape of value is needed for backprop.
forward_ctxt.outer_context.Enter()
shape = array_ops.shape_internal(val, optimize=False)
forward_ctxt.outer_context.Exit()
# Save the shape to a stack.
history_shape = outer_grad_state.AddForwardAccumulator(shape)
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val, optimize=False)
return result
|
[
"def",
"ZerosLikeForExit",
"(",
"self",
",",
"val",
")",
":",
"val_shape",
"=",
"val",
".",
"get_shape",
"(",
")",
"forward_ctxt",
"=",
"val",
".",
"op",
".",
"_get_control_flow_context",
"(",
")",
"outer_forward_ctxt",
"=",
"forward_ctxt",
".",
"outer_context",
"if",
"outer_forward_ctxt",
":",
"outer_forward_ctxt",
"=",
"outer_forward_ctxt",
".",
"GetWhileContext",
"(",
")",
"outer_grad_state",
"=",
"None",
"if",
"outer_forward_ctxt",
":",
"outer_grad_state",
"=",
"self",
".",
"_map",
".",
"get",
"(",
"outer_forward_ctxt",
")",
"if",
"outer_grad_state",
":",
"# This is a nested loop.",
"if",
"val_shape",
".",
"is_fully_defined",
"(",
")",
":",
"# If the shape is known statically, just create a zero tensor",
"# with the right shape in the right context.",
"outer_grad_state",
".",
"grad_context",
".",
"Enter",
"(",
")",
"result",
"=",
"array_ops",
".",
"zeros",
"(",
"val_shape",
".",
"dims",
",",
"val",
".",
"dtype",
")",
"outer_grad_state",
".",
"grad_context",
".",
"Exit",
"(",
")",
"else",
":",
"# Only the shape of value is needed for backprop.",
"forward_ctxt",
".",
"outer_context",
".",
"Enter",
"(",
")",
"shape",
"=",
"array_ops",
".",
"shape_internal",
"(",
"val",
",",
"optimize",
"=",
"False",
")",
"forward_ctxt",
".",
"outer_context",
".",
"Exit",
"(",
")",
"# Save the shape to a stack.",
"history_shape",
"=",
"outer_grad_state",
".",
"AddForwardAccumulator",
"(",
"shape",
")",
"# Get the shape back from the stack.",
"outer_grad_ctxt",
"=",
"outer_grad_state",
".",
"grad_context",
"outer_grad_ctxt",
".",
"Enter",
"(",
")",
"real_shape",
"=",
"outer_grad_state",
".",
"AddBackpropAccumulatedValue",
"(",
"history_shape",
",",
"shape",
")",
"result",
"=",
"array_ops",
".",
"zeros",
"(",
"real_shape",
",",
"val",
".",
"dtype",
")",
"outer_grad_ctxt",
".",
"Exit",
"(",
")",
"else",
":",
"# This is not a nested loop.",
"if",
"val_shape",
".",
"is_fully_defined",
"(",
")",
":",
"# If the shape is known statically, just create a zero tensor",
"# with the right shape.",
"result",
"=",
"array_ops",
".",
"zeros",
"(",
"val_shape",
".",
"dims",
",",
"val",
".",
"dtype",
")",
"else",
":",
"result",
"=",
"array_ops",
".",
"zeros_like",
"(",
"val",
",",
"optimize",
"=",
"False",
")",
"return",
"result"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/control_flow_ops.py#L1137-L1190
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/distributions/util.py
|
python
|
log_combinations
|
(n, counts, name="log_combinations")
|
Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
|
Multinomial coefficient.
|
[
"Multinomial",
"coefficient",
"."
] |
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
|
[
"def",
"log_combinations",
"(",
"n",
",",
"counts",
",",
"name",
"=",
"\"log_combinations\"",
")",
":",
"# First a bit about the number of ways counts could have come in:",
"# E.g. if counts = [1, 2], then this is 3 choose 2.",
"# In general, this is (sum counts)! / sum(counts!)",
"# The sum should be along the last dimension of counts. This is the",
"# \"distribution\" dimension. Here n a priori represents the sum of counts.",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"values",
"=",
"[",
"n",
",",
"counts",
"]",
")",
":",
"n",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"n",
",",
"name",
"=",
"\"n\"",
")",
"counts",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"counts",
",",
"name",
"=",
"\"counts\"",
")",
"total_permutations",
"=",
"math_ops",
".",
"lgamma",
"(",
"n",
"+",
"1",
")",
"counts_factorial",
"=",
"math_ops",
".",
"lgamma",
"(",
"counts",
"+",
"1",
")",
"redundant_permutations",
"=",
"math_ops",
".",
"reduce_sum",
"(",
"counts_factorial",
",",
"axis",
"=",
"[",
"-",
"1",
"]",
")",
"return",
"total_permutations",
"-",
"redundant_permutations"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/distributions/util.py#L487-L518
|
||
ChromiumWebApps/chromium
|
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
|
tools/cr/cr/base/host.py
|
python
|
Host.Matches
|
(self)
|
return False
|
Detects whether this is the correct host implementation.
This method is overridden by the concrete implementations.
Returns:
true if the plugin matches the machine it is running on.
|
Detects whether this is the correct host implementation.
|
[
"Detects",
"whether",
"this",
"is",
"the",
"correct",
"host",
"implementation",
"."
] |
def Matches(self):
"""Detects whether this is the correct host implementation.
This method is overridden by the concrete implementations.
Returns:
true if the plugin matches the machine it is running on.
"""
return False
|
[
"def",
"Matches",
"(",
"self",
")",
":",
"return",
"False"
] |
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/cr/cr/base/host.py#L29-L36
|
|
hanpfei/chromium-net
|
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
|
tools/perf/profile_creators/profile_safe_url_list.py
|
python
|
GetSafeUrls
|
()
|
Returns a list of safe urls by loading them from a pre-generated file.
|
Returns a list of safe urls by loading them from a pre-generated file.
|
[
"Returns",
"a",
"list",
"of",
"safe",
"urls",
"by",
"loading",
"them",
"from",
"a",
"pre",
"-",
"generated",
"file",
"."
] |
def GetSafeUrls():
"""Returns a list of safe urls by loading them from a pre-generated file."""
safe_url_dir = os.path.dirname(os.path.realpath(__file__))
safe_url_path = os.path.join(safe_url_dir, "profile_safe_url_list.json")
with open(safe_url_path, "r") as safe_url_file:
return json.load(safe_url_file)
|
[
"def",
"GetSafeUrls",
"(",
")",
":",
"safe_url_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"safe_url_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"safe_url_dir",
",",
"\"profile_safe_url_list.json\"",
")",
"with",
"open",
"(",
"safe_url_path",
",",
"\"r\"",
")",
"as",
"safe_url_file",
":",
"return",
"json",
".",
"load",
"(",
"safe_url_file",
")"
] |
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/perf/profile_creators/profile_safe_url_list.py#L21-L26
|
||
RobotLocomotion/drake
|
0e18a34604c45ed65bc9018a54f7610f91cdad5b
|
tools/lint/drakelint.py
|
python
|
_check_unguarded_openmp_uses
|
(filename)
|
return 0
|
Return 0 if all OpenMP uses in @p filename are properly guarded by
#if defined(_OPENMP), and 1 otherwise.
|
Return 0 if all OpenMP uses in
|
[
"Return",
"0",
"if",
"all",
"OpenMP",
"uses",
"in"
] |
def _check_unguarded_openmp_uses(filename):
"""Return 0 if all OpenMP uses in @p filename are properly guarded by
#if defined(_OPENMP), and 1 otherwise.
"""
openmp_include = "#include <omp.h>"
openmp_pragma = "#pragma omp"
openmp_pre_guard = "#if defined(_OPENMP)"
openmp_post_guard = "#endif"
with open(filename, mode='r', encoding='utf-8') as file:
lines = file.readlines()
for index, current_line in enumerate(lines):
if openmp_include in current_line or openmp_pragma in current_line:
previous_line = lines[index - 1] if (index - 1) >= 0 else ""
next_line = lines[index + 1] if (index + 1) < len(lines) else ""
missing_pre_guard = previous_line.strip() != openmp_pre_guard
missing_post_guard = next_line.strip() != openmp_post_guard
if missing_pre_guard or missing_post_guard:
print(f"ERROR: {filename}:{index + 1}: "
"OpenMP includes and directives must be guarded by "
f"{openmp_pre_guard} on the previous line and "
f"{openmp_post_guard} on the following line")
return 1
return 0
|
[
"def",
"_check_unguarded_openmp_uses",
"(",
"filename",
")",
":",
"openmp_include",
"=",
"\"#include <omp.h>\"",
"openmp_pragma",
"=",
"\"#pragma omp\"",
"openmp_pre_guard",
"=",
"\"#if defined(_OPENMP)\"",
"openmp_post_guard",
"=",
"\"#endif\"",
"with",
"open",
"(",
"filename",
",",
"mode",
"=",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"file",
":",
"lines",
"=",
"file",
".",
"readlines",
"(",
")",
"for",
"index",
",",
"current_line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"openmp_include",
"in",
"current_line",
"or",
"openmp_pragma",
"in",
"current_line",
":",
"previous_line",
"=",
"lines",
"[",
"index",
"-",
"1",
"]",
"if",
"(",
"index",
"-",
"1",
")",
">=",
"0",
"else",
"\"\"",
"next_line",
"=",
"lines",
"[",
"index",
"+",
"1",
"]",
"if",
"(",
"index",
"+",
"1",
")",
"<",
"len",
"(",
"lines",
")",
"else",
"\"\"",
"missing_pre_guard",
"=",
"previous_line",
".",
"strip",
"(",
")",
"!=",
"openmp_pre_guard",
"missing_post_guard",
"=",
"next_line",
".",
"strip",
"(",
")",
"!=",
"openmp_post_guard",
"if",
"missing_pre_guard",
"or",
"missing_post_guard",
":",
"print",
"(",
"f\"ERROR: {filename}:{index + 1}: \"",
"\"OpenMP includes and directives must be guarded by \"",
"f\"{openmp_pre_guard} on the previous line and \"",
"f\"{openmp_post_guard} on the following line\"",
")",
"return",
"1",
"return",
"0"
] |
https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/tools/lint/drakelint.py#L7-L35
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/scipy/py2/scipy/special/basic.py
|
python
|
pbvv_seq
|
(v, x)
|
return dv[:n1+1], dp[:n1+1]
|
Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
|
Parabolic cylinder functions Vv(x) and derivatives.
|
[
"Parabolic",
"cylinder",
"functions",
"Vv",
"(",
"x",
")",
"and",
"derivatives",
"."
] |
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
|
[
"def",
"pbvv_seq",
"(",
"v",
",",
"x",
")",
":",
"if",
"not",
"(",
"isscalar",
"(",
"v",
")",
"and",
"isscalar",
"(",
"x",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"arguments must be scalars.\"",
")",
"n",
"=",
"int",
"(",
"v",
")",
"v0",
"=",
"v",
"-",
"n",
"if",
"(",
"n",
"<=",
"1",
")",
":",
"n1",
"=",
"1",
"else",
":",
"n1",
"=",
"n",
"v1",
"=",
"n1",
"+",
"v0",
"dv",
",",
"dp",
",",
"pdf",
",",
"pdd",
"=",
"specfun",
".",
"pbvv",
"(",
"v1",
",",
"x",
")",
"return",
"dv",
"[",
":",
"n1",
"+",
"1",
"]",
",",
"dp",
"[",
":",
"n1",
"+",
"1",
"]"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/special/basic.py#L1588-L1622
|
|
PaddlePaddle/Paddle
|
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
|
python/paddle/tensor/linalg.py
|
python
|
norm
|
(x, p='fro', axis=None, keepdim=False, name=None)
|
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
.. note::
This norm API is different from `numpy.linalg.norm`.
This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.
But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.
For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.
Args:
x (Tensor): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,
`inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.
Default value is `fro`.
axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
shape=[2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# [ 3.7416575 11.224972 19.131126]]
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
#out_pnorm.numpy() = [12.]
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
#out_pnorm.numpy(): [0.]
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
|
[] |
def norm(x, p='fro', axis=None, keepdim=False, name=None):
"""
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
.. note::
This norm API is different from `numpy.linalg.norm`.
This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.
But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.
For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.
Args:
x (Tensor): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,
`inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.
Default value is `fro`.
axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
shape=[2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# [ 3.7416575 11.224972 19.131126]]
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
#out_pnorm.numpy() = [12.]
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
#out_pnorm.numpy(): [0.]
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
"""
def frobenius_norm(input, dim=None, keepdim=False, name=None):
"""
The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
dim (list, optional): None for last two dimensions.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
if in_dygraph_mode():
if dim is None:
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim,
'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim,
'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if dim is None:
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='frobenius_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def vector_norm(input,
porder=None,
axis=None,
keepdim=False,
asvector=False,
name=None):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if in_dygraph_mode():
if axis is None: axis = -1
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis,
'keepdim', keepdim, 'asvector', asvector)
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'p_norm')
attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'asvector': asvector,
'epsilon': 1e-12,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def inf_norm(input,
porder=None,
axis=axis,
keepdim=False,
asvector=False,
name=None):
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
reduce_out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
reduce_all = True if axis == None or axis == [] or asvector == True else False
axis = axis if axis != None and axis != [] else [0]
reduce_type = 'reduce_max' if porder == np.float(
'inf') else 'reduce_min'
helper.append_op(
type=reduce_type,
inputs={'X': out},
outputs={'Out': reduce_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
return reduce_out
def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):
"""
NOTE:
This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.
"""
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': abs_out},
outputs={'Out': pow_out},
attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': True if axis is None else False
})
porder
block.append_op(
type='pow',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
if axis is None and p is not None:
if isinstance(p, str):
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
porder=p,
axis=axis,
keepdim=keepdim,
asvector=True,
name=name)
else:
raise ValueError("only valid p type is string or float, found {}".
format(type(p)))
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
#calculate vector norm, where axis is int or list with only one integer
if isinstance(axis, int):
if isinstance(p, str):
if p == "fro":
return vector_norm(
x,
porder=2,
axis=axis,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
axis=axis,
porder=p,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"unspport p for p-order vector norm. except float, found {}".
format(p))
#calculate matrix norm, where axis is list with two integers
elif isinstance(axis, list) and len(axis) == 2:
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
elif p == np.inf or p == -np.inf:
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif p == 0:
raise ValueError(
"just suport axis type int or list (length of list <=1) if p = 0, found {}".
format(axis))
else:
return p_matrix_norm(
x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"except axis type int or list (length of list <=2), found {}".
format(axis))
|
[
"def",
"norm",
"(",
"x",
",",
"p",
"=",
"'fro'",
",",
"axis",
"=",
"None",
",",
"keepdim",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"def",
"frobenius_norm",
"(",
"input",
",",
"dim",
"=",
"None",
",",
"keepdim",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"\"\"\"\n The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n dim (list, optional): None for last two dimensions.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n \"\"\"",
"if",
"dim",
"is",
"not",
"None",
"and",
"not",
"(",
"isinstance",
"(",
"dim",
",",
"list",
")",
"and",
"len",
"(",
"dim",
")",
"==",
"2",
")",
":",
"raise",
"ValueError",
"(",
"\"The dim of frobenius norm op should be None or two elements list!\"",
")",
"if",
"in_dygraph_mode",
"(",
")",
":",
"if",
"dim",
"is",
"None",
":",
"return",
"_C_ops",
".",
"frobenius_norm",
"(",
"input",
",",
"'keep_dim'",
",",
"keepdim",
",",
"'reduce_all'",
",",
"True",
")",
"return",
"_C_ops",
".",
"frobenius_norm",
"(",
"input",
",",
"'dim'",
",",
"dim",
",",
"'keep_dim'",
",",
"keepdim",
",",
"'reduce_all'",
",",
"False",
")",
"attrs",
"=",
"{",
"'dim'",
":",
"dim",
",",
"'keep_dim'",
":",
"keepdim",
",",
"'reduce_all'",
":",
"False",
"}",
"if",
"dim",
"is",
"None",
":",
"attrs",
"[",
"'reduce_all'",
"]",
"=",
"True",
"check_variable_and_dtype",
"(",
"input",
",",
"'input'",
",",
"[",
"'float32'",
",",
"'float64'",
"]",
",",
"'frobenius_norm'",
")",
"helper",
"=",
"LayerHelper",
"(",
"'frobenius_norm'",
",",
"*",
"*",
"locals",
"(",
")",
")",
"out",
"=",
"helper",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"helper",
".",
"input_dtype",
"(",
")",
")",
"helper",
".",
"append_op",
"(",
"type",
"=",
"'frobenius_norm'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"input",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"out",
"}",
",",
"attrs",
"=",
"attrs",
")",
"return",
"out",
"def",
"vector_norm",
"(",
"input",
",",
"porder",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"keepdim",
"=",
"False",
",",
"asvector",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"\"\"\"\n Calculate the p-order vector norm for certain dimension of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n porder (float, optional): None for porder=2.0.\n axis (int, optional): None for last dimension.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n \"\"\"",
"if",
"in_dygraph_mode",
"(",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"-",
"1",
"return",
"_C_ops",
".",
"p_norm",
"(",
"input",
",",
"'porder'",
",",
"porder",
",",
"'axis'",
",",
"axis",
",",
"'keepdim'",
",",
"keepdim",
",",
"'asvector'",
",",
"asvector",
")",
"if",
"porder",
"is",
"not",
"None",
":",
"check_type",
"(",
"porder",
",",
"'porder'",
",",
"(",
"float",
",",
"int",
")",
",",
"'p_norm'",
")",
"if",
"axis",
"is",
"not",
"None",
":",
"check_type",
"(",
"axis",
",",
"'axis'",
",",
"(",
"int",
")",
",",
"'p_norm'",
")",
"check_variable_and_dtype",
"(",
"input",
",",
"'input'",
",",
"[",
"'float32'",
",",
"'float64'",
"]",
",",
"'p_norm'",
")",
"attrs",
"=",
"{",
"'axis'",
":",
"axis",
"if",
"axis",
"is",
"not",
"None",
"else",
"-",
"1",
",",
"'porder'",
":",
"float",
"(",
"porder",
")",
"if",
"porder",
"is",
"not",
"None",
"else",
"2.0",
",",
"'keepdim'",
":",
"keepdim",
",",
"'asvector'",
":",
"asvector",
",",
"'epsilon'",
":",
"1e-12",
",",
"}",
"helper",
"=",
"LayerHelper",
"(",
"'p_norm'",
",",
"*",
"*",
"locals",
"(",
")",
")",
"out",
"=",
"helper",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"helper",
".",
"input_dtype",
"(",
")",
")",
"helper",
".",
"append_op",
"(",
"type",
"=",
"'p_norm'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"input",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"out",
"}",
",",
"attrs",
"=",
"attrs",
")",
"return",
"out",
"def",
"inf_norm",
"(",
"input",
",",
"porder",
"=",
"None",
",",
"axis",
"=",
"axis",
",",
"keepdim",
"=",
"False",
",",
"asvector",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"helper",
"=",
"LayerHelper",
"(",
"'frobenius_norm'",
",",
"*",
"*",
"locals",
"(",
")",
")",
"out",
"=",
"helper",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"helper",
".",
"input_dtype",
"(",
")",
")",
"helper",
".",
"append_op",
"(",
"type",
"=",
"'abs'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"input",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"out",
"}",
")",
"reduce_out",
"=",
"helper",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"helper",
".",
"input_dtype",
"(",
")",
")",
"reduce_all",
"=",
"True",
"if",
"axis",
"==",
"None",
"or",
"axis",
"==",
"[",
"]",
"or",
"asvector",
"==",
"True",
"else",
"False",
"axis",
"=",
"axis",
"if",
"axis",
"!=",
"None",
"and",
"axis",
"!=",
"[",
"]",
"else",
"[",
"0",
"]",
"reduce_type",
"=",
"'reduce_max'",
"if",
"porder",
"==",
"np",
".",
"float",
"(",
"'inf'",
")",
"else",
"'reduce_min'",
"helper",
".",
"append_op",
"(",
"type",
"=",
"reduce_type",
",",
"inputs",
"=",
"{",
"'X'",
":",
"out",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"reduce_out",
"}",
",",
"attrs",
"=",
"{",
"'dim'",
":",
"axis",
",",
"'keep_dim'",
":",
"keepdim",
",",
"'reduce_all'",
":",
"reduce_all",
"}",
")",
"return",
"reduce_out",
"def",
"p_matrix_norm",
"(",
"input",
",",
"porder",
"=",
"1.",
",",
"axis",
"=",
"axis",
",",
"keepdim",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"\"\"\"\n NOTE:\n This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.\n \"\"\"",
"block",
"=",
"LayerHelper",
"(",
"'norm'",
",",
"*",
"*",
"locals",
"(",
")",
")",
"out",
"=",
"block",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"block",
".",
"input_dtype",
"(",
")",
")",
"abs_out",
"=",
"block",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"block",
".",
"input_dtype",
"(",
")",
")",
"block",
".",
"append_op",
"(",
"type",
"=",
"'abs'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"input",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"abs_out",
"}",
")",
"pow_out",
"=",
"block",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"block",
".",
"input_dtype",
"(",
")",
")",
"block",
".",
"append_op",
"(",
"type",
"=",
"'pow'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"abs_out",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"pow_out",
"}",
",",
"attrs",
"=",
"{",
"'factor'",
":",
"porder",
"}",
")",
"sum_out",
"=",
"block",
".",
"create_variable_for_type_inference",
"(",
"dtype",
"=",
"block",
".",
"input_dtype",
"(",
")",
")",
"block",
".",
"append_op",
"(",
"type",
"=",
"'reduce_sum'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"pow_out",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"sum_out",
"}",
",",
"attrs",
"=",
"{",
"'dim'",
":",
"axis",
",",
"'keep_dim'",
":",
"keepdim",
",",
"'reduce_all'",
":",
"True",
"if",
"axis",
"is",
"None",
"else",
"False",
"}",
")",
"porder",
"block",
".",
"append_op",
"(",
"type",
"=",
"'pow'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"sum_out",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"out",
"}",
",",
"attrs",
"=",
"{",
"'factor'",
":",
"float",
"(",
"1.",
"/",
"porder",
")",
"}",
")",
"return",
"out",
"if",
"axis",
"is",
"None",
"and",
"p",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"p",
",",
"str",
")",
":",
"if",
"p",
"==",
"\"fro\"",
":",
"return",
"frobenius_norm",
"(",
"x",
",",
"dim",
"=",
"axis",
",",
"keepdim",
"=",
"keepdim",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"only valid string values are 'fro', found {}\"",
".",
"format",
"(",
"p",
")",
")",
"elif",
"isinstance",
"(",
"p",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"return",
"vector_norm",
"(",
"x",
",",
"porder",
"=",
"p",
",",
"axis",
"=",
"axis",
",",
"keepdim",
"=",
"keepdim",
",",
"asvector",
"=",
"True",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"only valid p type is string or float, found {}\"",
".",
"format",
"(",
"type",
"(",
"p",
")",
")",
")",
"if",
"isinstance",
"(",
"axis",
",",
"tuple",
")",
":",
"axis",
"=",
"list",
"(",
"axis",
")",
"if",
"isinstance",
"(",
"axis",
",",
"list",
")",
"and",
"len",
"(",
"axis",
")",
"==",
"1",
":",
"axis",
"=",
"axis",
"[",
"0",
"]",
"#calculate vector norm, where axis is int or list with only one integer",
"if",
"isinstance",
"(",
"axis",
",",
"int",
")",
":",
"if",
"isinstance",
"(",
"p",
",",
"str",
")",
":",
"if",
"p",
"==",
"\"fro\"",
":",
"return",
"vector_norm",
"(",
"x",
",",
"porder",
"=",
"2",
",",
"axis",
"=",
"axis",
",",
"keepdim",
"=",
"keepdim",
",",
"asvector",
"=",
"False",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"only valid string values are 'fro', found {}\"",
".",
"format",
"(",
"p",
")",
")",
"elif",
"isinstance",
"(",
"p",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"return",
"vector_norm",
"(",
"x",
",",
"axis",
"=",
"axis",
",",
"porder",
"=",
"p",
",",
"keepdim",
"=",
"keepdim",
",",
"asvector",
"=",
"False",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"unspport p for p-order vector norm. except float, found {}\"",
".",
"format",
"(",
"p",
")",
")",
"#calculate matrix norm, where axis is list with two integers",
"elif",
"isinstance",
"(",
"axis",
",",
"list",
")",
"and",
"len",
"(",
"axis",
")",
"==",
"2",
":",
"if",
"p",
"==",
"\"fro\"",
":",
"return",
"frobenius_norm",
"(",
"x",
",",
"dim",
"=",
"axis",
",",
"keepdim",
"=",
"keepdim",
",",
"name",
"=",
"name",
")",
"elif",
"p",
"==",
"np",
".",
"inf",
"or",
"p",
"==",
"-",
"np",
".",
"inf",
":",
"return",
"inf_norm",
"(",
"x",
",",
"porder",
"=",
"p",
",",
"axis",
"=",
"axis",
",",
"keepdim",
"=",
"keepdim",
",",
"name",
"=",
"name",
")",
"elif",
"p",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"just suport axis type int or list (length of list <=1) if p = 0, found {}\"",
".",
"format",
"(",
"axis",
")",
")",
"else",
":",
"return",
"p_matrix_norm",
"(",
"x",
",",
"porder",
"=",
"p",
",",
"axis",
"=",
"axis",
",",
"keepdim",
"=",
"keepdim",
",",
"name",
"=",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"except axis type int or list (length of list <=2), found {}\"",
".",
"format",
"(",
"axis",
")",
")"
] |
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/tensor/linalg.py#L164-L448
|
|||
mindspore-ai/mindspore
|
fb8fd3338605bb34fa5cea054e535a8b1d753fab
|
mindspore/python/mindspore/ops/_grad/grad_implementations.py
|
python
|
bprop_identity
|
(x, out, dout)
|
return (dout,)
|
Backpropagator for primitive `identity`.
|
Backpropagator for primitive `identity`.
|
[
"Backpropagator",
"for",
"primitive",
"identity",
"."
] |
def bprop_identity(x, out, dout):
"""Backpropagator for primitive `identity`."""
return (dout,)
|
[
"def",
"bprop_identity",
"(",
"x",
",",
"out",
",",
"dout",
")",
":",
"return",
"(",
"dout",
",",
")"
] |
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_grad/grad_implementations.py#L149-L151
|
|
h0x91b/redis-v8
|
ac8b9d49701d75bcee3719892a2a6a50b437e47a
|
redis/deps/v8/tools/grokdump.py
|
python
|
FullDump
|
(reader, heap)
|
Dump all available memory regions.
|
Dump all available memory regions.
|
[
"Dump",
"all",
"available",
"memory",
"regions",
"."
] |
def FullDump(reader, heap):
"""Dump all available memory regions."""
def dump_region(reader, start, size, location):
print
while start & 3 != 0:
start += 1
size -= 1
location += 1
is_executable = reader.IsProbableExecutableRegion(location, size)
is_ascii = reader.IsProbableASCIIRegion(location, size)
if is_executable is not False:
lines = reader.GetDisasmLines(start, size)
for line in lines:
print FormatDisasmLine(start, heap, line)
print
if is_ascii is not False:
# Output in the same format as the Unix hd command
addr = start
for slot in xrange(location, location + size, 16):
hex_line = ""
asc_line = ""
for i in xrange(0, 16):
if slot + i < location + size:
byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
if byte >= 0x20 and byte < 0x7f:
asc_line += chr(byte)
else:
asc_line += "."
hex_line += " %02x" % (byte)
else:
hex_line += " "
if i == 7:
hex_line += " "
print "%s %s |%s|" % (reader.FormatIntPtr(addr),
hex_line,
asc_line)
addr += 16
if is_executable is not True and is_ascii is not True:
print "%s - %s" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size))
for slot in xrange(start,
start + size,
reader.PointerSize()):
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
print "%s: %s" % (reader.FormatIntPtr(slot),
reader.FormatIntPtr(maybe_address))
if heap_object:
heap_object.Print(Printer())
print
reader.ForEachMemoryRegion(dump_region)
|
[
"def",
"FullDump",
"(",
"reader",
",",
"heap",
")",
":",
"def",
"dump_region",
"(",
"reader",
",",
"start",
",",
"size",
",",
"location",
")",
":",
"print",
"while",
"start",
"&",
"3",
"!=",
"0",
":",
"start",
"+=",
"1",
"size",
"-=",
"1",
"location",
"+=",
"1",
"is_executable",
"=",
"reader",
".",
"IsProbableExecutableRegion",
"(",
"location",
",",
"size",
")",
"is_ascii",
"=",
"reader",
".",
"IsProbableASCIIRegion",
"(",
"location",
",",
"size",
")",
"if",
"is_executable",
"is",
"not",
"False",
":",
"lines",
"=",
"reader",
".",
"GetDisasmLines",
"(",
"start",
",",
"size",
")",
"for",
"line",
"in",
"lines",
":",
"print",
"FormatDisasmLine",
"(",
"start",
",",
"heap",
",",
"line",
")",
"print",
"if",
"is_ascii",
"is",
"not",
"False",
":",
"# Output in the same format as the Unix hd command",
"addr",
"=",
"start",
"for",
"slot",
"in",
"xrange",
"(",
"location",
",",
"location",
"+",
"size",
",",
"16",
")",
":",
"hex_line",
"=",
"\"\"",
"asc_line",
"=",
"\"\"",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"16",
")",
":",
"if",
"slot",
"+",
"i",
"<",
"location",
"+",
"size",
":",
"byte",
"=",
"ctypes",
".",
"c_uint8",
".",
"from_buffer",
"(",
"reader",
".",
"minidump",
",",
"slot",
"+",
"i",
")",
".",
"value",
"if",
"byte",
">=",
"0x20",
"and",
"byte",
"<",
"0x7f",
":",
"asc_line",
"+=",
"chr",
"(",
"byte",
")",
"else",
":",
"asc_line",
"+=",
"\".\"",
"hex_line",
"+=",
"\" %02x\"",
"%",
"(",
"byte",
")",
"else",
":",
"hex_line",
"+=",
"\" \"",
"if",
"i",
"==",
"7",
":",
"hex_line",
"+=",
"\" \"",
"print",
"\"%s %s |%s|\"",
"%",
"(",
"reader",
".",
"FormatIntPtr",
"(",
"addr",
")",
",",
"hex_line",
",",
"asc_line",
")",
"addr",
"+=",
"16",
"if",
"is_executable",
"is",
"not",
"True",
"and",
"is_ascii",
"is",
"not",
"True",
":",
"print",
"\"%s - %s\"",
"%",
"(",
"reader",
".",
"FormatIntPtr",
"(",
"start",
")",
",",
"reader",
".",
"FormatIntPtr",
"(",
"start",
"+",
"size",
")",
")",
"for",
"slot",
"in",
"xrange",
"(",
"start",
",",
"start",
"+",
"size",
",",
"reader",
".",
"PointerSize",
"(",
")",
")",
":",
"maybe_address",
"=",
"reader",
".",
"ReadUIntPtr",
"(",
"slot",
")",
"heap_object",
"=",
"heap",
".",
"FindObject",
"(",
"maybe_address",
")",
"print",
"\"%s: %s\"",
"%",
"(",
"reader",
".",
"FormatIntPtr",
"(",
"slot",
")",
",",
"reader",
".",
"FormatIntPtr",
"(",
"maybe_address",
")",
")",
"if",
"heap_object",
":",
"heap_object",
".",
"Print",
"(",
"Printer",
"(",
")",
")",
"print",
"reader",
".",
"ForEachMemoryRegion",
"(",
"dump_region",
")"
] |
https://github.com/h0x91b/redis-v8/blob/ac8b9d49701d75bcee3719892a2a6a50b437e47a/redis/deps/v8/tools/grokdump.py#L111-L165
|
||
hakuna-m/wubiuefi
|
caec1af0a09c78fd5a345180ada1fe45e0c63493
|
src/pypack/modulegraph/pkg_resources.py
|
python
|
ResourceManager.resource_filename
|
(self, package_or_requirement, resource_name)
|
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
|
Return a true filesystem path for specified resource
|
Return a true filesystem path for specified resource
|
[
"Return",
"a",
"true",
"filesystem",
"path",
"for",
"specified",
"resource"
] |
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
|
[
"def",
"resource_filename",
"(",
"self",
",",
"package_or_requirement",
",",
"resource_name",
")",
":",
"return",
"get_provider",
"(",
"package_or_requirement",
")",
".",
"get_resource_filename",
"(",
"self",
",",
"resource_name",
")"
] |
https://github.com/hakuna-m/wubiuefi/blob/caec1af0a09c78fd5a345180ada1fe45e0c63493/src/pypack/modulegraph/pkg_resources.py#L734-L738
|
|
epiqc/ScaffCC
|
66a79944ee4cd116b27bc1a69137276885461db8
|
llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py
|
python
|
WriteLLVMChecksums
|
(checksums, f)
|
Writes checksums to a text file.
Args:
checksums: a dict mapping from project name to project checksum (result of
ComputeLLVMChecksums).
f: a file object to write into.
|
Writes checksums to a text file.
|
[
"Writes",
"checksums",
"to",
"a",
"text",
"file",
"."
] |
def WriteLLVMChecksums(checksums, f):
"""Writes checksums to a text file.
Args:
checksums: a dict mapping from project name to project checksum (result of
ComputeLLVMChecksums).
f: a file object to write into.
"""
for proj in sorted(checksums.keys()):
f.write("{} {}\n".format(checksums[proj], proj))
|
[
"def",
"WriteLLVMChecksums",
"(",
"checksums",
",",
"f",
")",
":",
"for",
"proj",
"in",
"sorted",
"(",
"checksums",
".",
"keys",
"(",
")",
")",
":",
"f",
".",
"write",
"(",
"\"{} {}\\n\"",
".",
"format",
"(",
"checksums",
"[",
"proj",
"]",
",",
"proj",
")",
")"
] |
https://github.com/epiqc/ScaffCC/blob/66a79944ee4cd116b27bc1a69137276885461db8/llvm/utils/docker/scripts/llvm_checksum/llvm_checksum.py#L131-L141
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
wx/tools/Editra/src/eclib/ctrlbox.py
|
python
|
ControlBar.DoPaintBackground
|
(self, dc, rect, color, color2)
|
Paint the background of the given rect based on the style of
the control bar.
@param dc: DC to draw on
@param rect: wx.Rect
@param color: Pen/Base gradient color
@param color2: Gradient end color
|
Paint the background of the given rect based on the style of
the control bar.
@param dc: DC to draw on
@param rect: wx.Rect
@param color: Pen/Base gradient color
@param color2: Gradient end color
|
[
"Paint",
"the",
"background",
"of",
"the",
"given",
"rect",
"based",
"on",
"the",
"style",
"of",
"the",
"control",
"bar",
".",
"@param",
"dc",
":",
"DC",
"to",
"draw",
"on",
"@param",
"rect",
":",
"wx",
".",
"Rect",
"@param",
"color",
":",
"Pen",
"/",
"Base",
"gradient",
"color",
"@param",
"color2",
":",
"Gradient",
"end",
"color"
] |
def DoPaintBackground(self, dc, rect, color, color2):
"""Paint the background of the given rect based on the style of
the control bar.
@param dc: DC to draw on
@param rect: wx.Rect
@param color: Pen/Base gradient color
@param color2: Gradient end color
"""
# Paint the gradient
if self._style & CTRLBAR_STYLE_GRADIENT:
if isinstance(dc, wx.GCDC):
gc = dc.GetGraphicsContext()
else:
gc = wx.GraphicsContext.Create(dc)
if gc is None:
return
if not self.IsVerticalMode():
grad = gc.CreateLinearGradientBrush(rect.x, rect.y, rect.x,
rect.x+rect.height,
color2, color)
else:
grad = gc.CreateLinearGradientBrush(rect.x, rect.y,
rect.x+rect.width,
rect.y,
color2, color)
gc.SetPen(gc.CreatePen(self._pen))
gc.SetBrush(grad)
gc.DrawRectangle(rect.x, rect.y, rect.Width - 0.5, rect.Height - 0.5)
dc.SetPen(wx.Pen(color, 1))
# TODO: handle vertical mode
if not self.IsVerticalMode():
# Add a border to the bottom
if self._style & CTRLBAR_STYLE_BORDER_BOTTOM:
dc.DrawLine(rect.x, rect.GetHeight() - 1,
rect.GetWidth(), rect.GetHeight() - 1)
# Add a border to the top
if self._style & CTRLBAR_STYLE_BORDER_TOP:
dc.DrawLine(rect.x, 1, rect.GetWidth(), 1)
|
[
"def",
"DoPaintBackground",
"(",
"self",
",",
"dc",
",",
"rect",
",",
"color",
",",
"color2",
")",
":",
"# Paint the gradient",
"if",
"self",
".",
"_style",
"&",
"CTRLBAR_STYLE_GRADIENT",
":",
"if",
"isinstance",
"(",
"dc",
",",
"wx",
".",
"GCDC",
")",
":",
"gc",
"=",
"dc",
".",
"GetGraphicsContext",
"(",
")",
"else",
":",
"gc",
"=",
"wx",
".",
"GraphicsContext",
".",
"Create",
"(",
"dc",
")",
"if",
"gc",
"is",
"None",
":",
"return",
"if",
"not",
"self",
".",
"IsVerticalMode",
"(",
")",
":",
"grad",
"=",
"gc",
".",
"CreateLinearGradientBrush",
"(",
"rect",
".",
"x",
",",
"rect",
".",
"y",
",",
"rect",
".",
"x",
",",
"rect",
".",
"x",
"+",
"rect",
".",
"height",
",",
"color2",
",",
"color",
")",
"else",
":",
"grad",
"=",
"gc",
".",
"CreateLinearGradientBrush",
"(",
"rect",
".",
"x",
",",
"rect",
".",
"y",
",",
"rect",
".",
"x",
"+",
"rect",
".",
"width",
",",
"rect",
".",
"y",
",",
"color2",
",",
"color",
")",
"gc",
".",
"SetPen",
"(",
"gc",
".",
"CreatePen",
"(",
"self",
".",
"_pen",
")",
")",
"gc",
".",
"SetBrush",
"(",
"grad",
")",
"gc",
".",
"DrawRectangle",
"(",
"rect",
".",
"x",
",",
"rect",
".",
"y",
",",
"rect",
".",
"Width",
"-",
"0.5",
",",
"rect",
".",
"Height",
"-",
"0.5",
")",
"dc",
".",
"SetPen",
"(",
"wx",
".",
"Pen",
"(",
"color",
",",
"1",
")",
")",
"# TODO: handle vertical mode",
"if",
"not",
"self",
".",
"IsVerticalMode",
"(",
")",
":",
"# Add a border to the bottom",
"if",
"self",
".",
"_style",
"&",
"CTRLBAR_STYLE_BORDER_BOTTOM",
":",
"dc",
".",
"DrawLine",
"(",
"rect",
".",
"x",
",",
"rect",
".",
"GetHeight",
"(",
")",
"-",
"1",
",",
"rect",
".",
"GetWidth",
"(",
")",
",",
"rect",
".",
"GetHeight",
"(",
")",
"-",
"1",
")",
"# Add a border to the top",
"if",
"self",
".",
"_style",
"&",
"CTRLBAR_STYLE_BORDER_TOP",
":",
"dc",
".",
"DrawLine",
"(",
"rect",
".",
"x",
",",
"1",
",",
"rect",
".",
"GetWidth",
"(",
")",
",",
"1",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/eclib/ctrlbox.py#L507-L551
|
||
flexflow/FlexFlow
|
581fad8ba8d10a16a3102ee2b406b0319586df24
|
python/flexflow/keras/datasets/cifar10.py
|
python
|
load_data
|
(num_samples=40000)
|
return (x_train, y_train), (x_test, y_test)
|
Loads CIFAR10 dataset.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
|
Loads CIFAR10 dataset.
|
[
"Loads",
"CIFAR10",
"dataset",
"."
] |
def load_data(num_samples=40000):
"""Loads CIFAR10 dataset.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = num_samples
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, int(num_samples/10000)+1):
fpath = os.path.join(path, 'data_batch_' + str(i))
(x_train[(i - 1) * 10000: i * 10000, :, :, :],
y_train[(i - 1) * 10000: i * 10000]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
# if K.image_data_format() == 'channels_last':
# x_train = x_train.transpose(0, 2, 3, 1)
# x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
[
"def",
"load_data",
"(",
"num_samples",
"=",
"40000",
")",
":",
"dirname",
"=",
"'cifar-10-batches-py'",
"origin",
"=",
"'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'",
"path",
"=",
"get_file",
"(",
"dirname",
",",
"origin",
"=",
"origin",
",",
"untar",
"=",
"True",
")",
"num_train_samples",
"=",
"num_samples",
"x_train",
"=",
"np",
".",
"empty",
"(",
"(",
"num_train_samples",
",",
"3",
",",
"32",
",",
"32",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"y_train",
"=",
"np",
".",
"empty",
"(",
"(",
"num_train_samples",
",",
")",
",",
"dtype",
"=",
"'uint8'",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"int",
"(",
"num_samples",
"/",
"10000",
")",
"+",
"1",
")",
":",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'data_batch_'",
"+",
"str",
"(",
"i",
")",
")",
"(",
"x_train",
"[",
"(",
"i",
"-",
"1",
")",
"*",
"10000",
":",
"i",
"*",
"10000",
",",
":",
",",
":",
",",
":",
"]",
",",
"y_train",
"[",
"(",
"i",
"-",
"1",
")",
"*",
"10000",
":",
"i",
"*",
"10000",
"]",
")",
"=",
"load_batch",
"(",
"fpath",
")",
"fpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'test_batch'",
")",
"x_test",
",",
"y_test",
"=",
"load_batch",
"(",
"fpath",
")",
"y_train",
"=",
"np",
".",
"reshape",
"(",
"y_train",
",",
"(",
"len",
"(",
"y_train",
")",
",",
"1",
")",
")",
"y_test",
"=",
"np",
".",
"reshape",
"(",
"y_test",
",",
"(",
"len",
"(",
"y_test",
")",
",",
"1",
")",
")",
"# if K.image_data_format() == 'channels_last':",
"# x_train = x_train.transpose(0, 2, 3, 1)",
"# x_test = x_test.transpose(0, 2, 3, 1)",
"return",
"(",
"x_train",
",",
"y_train",
")",
",",
"(",
"x_test",
",",
"y_test",
")"
] |
https://github.com/flexflow/FlexFlow/blob/581fad8ba8d10a16a3102ee2b406b0319586df24/python/flexflow/keras/datasets/cifar10.py#L13-L43
|
|
facebook/fboss
|
60063db1df37c2ec0e7dcd0955c54885ea9bf7f0
|
build/fbcode_builder/fbcode_builder.py
|
python
|
FBCodeBuilder.fb_github_project_workdir
|
(self, project_and_path, github_org="facebook")
|
return self.github_project_workdir(github_org + "/" + project, path)
|
This helper lets Facebook-internal CI special-cases FB projects
|
This helper lets Facebook-internal CI special-cases FB projects
|
[
"This",
"helper",
"lets",
"Facebook",
"-",
"internal",
"CI",
"special",
"-",
"cases",
"FB",
"projects"
] |
def fb_github_project_workdir(self, project_and_path, github_org="facebook"):
"This helper lets Facebook-internal CI special-cases FB projects"
project, path = project_and_path.split("/", 1)
return self.github_project_workdir(github_org + "/" + project, path)
|
[
"def",
"fb_github_project_workdir",
"(",
"self",
",",
"project_and_path",
",",
"github_org",
"=",
"\"facebook\"",
")",
":",
"project",
",",
"path",
"=",
"project_and_path",
".",
"split",
"(",
"\"/\"",
",",
"1",
")",
"return",
"self",
".",
"github_project_workdir",
"(",
"github_org",
"+",
"\"/\"",
"+",
"project",
",",
"path",
")"
] |
https://github.com/facebook/fboss/blob/60063db1df37c2ec0e7dcd0955c54885ea9bf7f0/build/fbcode_builder/fbcode_builder.py#L393-L396
|
|
libLAS/libLAS
|
e6a1aaed412d638687b8aec44f7b12df7ca2bbbb
|
python/liblas/header.py
|
python
|
Header.get_systemid
|
(self)
|
return str(core.las.LASHeader_GetSystemId(self.handle).decode())
|
Returns the system identifier specified in the file
|
Returns the system identifier specified in the file
|
[
"Returns",
"the",
"system",
"identifier",
"specified",
"in",
"the",
"file"
] |
def get_systemid(self):
"""Returns the system identifier specified in the file"""
return str(core.las.LASHeader_GetSystemId(self.handle).decode())
|
[
"def",
"get_systemid",
"(",
"self",
")",
":",
"return",
"str",
"(",
"core",
".",
"las",
".",
"LASHeader_GetSystemId",
"(",
"self",
".",
"handle",
")",
".",
"decode",
"(",
")",
")"
] |
https://github.com/libLAS/libLAS/blob/e6a1aaed412d638687b8aec44f7b12df7ca2bbbb/python/liblas/header.py#L238-L240
|
|
Yelp/MOE
|
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
|
moe/optimal_learning/python/interfaces/domain_interface.py
|
python
|
DomainInterface.get_bounding_box
|
(self)
|
Return a list of ClosedIntervals representing a bounding box for this domain.
|
Return a list of ClosedIntervals representing a bounding box for this domain.
|
[
"Return",
"a",
"list",
"of",
"ClosedIntervals",
"representing",
"a",
"bounding",
"box",
"for",
"this",
"domain",
"."
] |
def get_bounding_box(self):
"""Return a list of ClosedIntervals representing a bounding box for this domain."""
pass
|
[
"def",
"get_bounding_box",
"(",
"self",
")",
":",
"pass"
] |
https://github.com/Yelp/MOE/blob/5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c/moe/optimal_learning/python/interfaces/domain_interface.py#L30-L32
|
||
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/_gdi.py
|
python
|
GraphicsContext.DrawRotatedText
|
(*args, **kwargs)
|
return _gdi_.GraphicsContext_DrawRotatedText(*args, **kwargs)
|
DrawRotatedText(self, String str, Double x, Double y, Double angle, GraphicsBrush backgroundBrush=NullGraphicsBrush)
Draws a text string at the defined position, at the specified angle,
which is given in radians.
|
DrawRotatedText(self, String str, Double x, Double y, Double angle, GraphicsBrush backgroundBrush=NullGraphicsBrush)
|
[
"DrawRotatedText",
"(",
"self",
"String",
"str",
"Double",
"x",
"Double",
"y",
"Double",
"angle",
"GraphicsBrush",
"backgroundBrush",
"=",
"NullGraphicsBrush",
")"
] |
def DrawRotatedText(*args, **kwargs):
"""
DrawRotatedText(self, String str, Double x, Double y, Double angle, GraphicsBrush backgroundBrush=NullGraphicsBrush)
Draws a text string at the defined position, at the specified angle,
which is given in radians.
"""
return _gdi_.GraphicsContext_DrawRotatedText(*args, **kwargs)
|
[
"def",
"DrawRotatedText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"GraphicsContext_DrawRotatedText",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L6391-L6398
|
|
GJDuck/LowFat
|
ecf6a0f0fa1b73a27a626cf493cc39e477b6faea
|
llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py
|
python
|
Diagnostic.category_number
|
(self)
|
return conf.lib.clang_getDiagnosticCategory(self)
|
The category number for this diagnostic or 0 if unavailable.
|
The category number for this diagnostic or 0 if unavailable.
|
[
"The",
"category",
"number",
"for",
"this",
"diagnostic",
"or",
"0",
"if",
"unavailable",
"."
] |
def category_number(self):
"""The category number for this diagnostic or 0 if unavailable."""
return conf.lib.clang_getDiagnosticCategory(self)
|
[
"def",
"category_number",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getDiagnosticCategory",
"(",
"self",
")"
] |
https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/tools/clang/bindings/python/clang/cindex.py#L388-L390
|
|
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/python2_version/klampt/math/symbolic.py
|
python
|
Context.bindFunction
|
(self,function,remapping=None)
|
return function(*args)
|
Produces an Expression that evalutes the function, where its arguments are bound to
variables / user data in the current environment. The argument names should map to
similarly named variables or user data.
If remapping is provided, then it maps function arguments to variables or values
- a dictionary mapping a function argument arg to remapping[arg].
- a list or tuple mapping the i'th function argument to remapping[i].
|
Produces an Expression that evalutes the function, where its arguments are bound to
variables / user data in the current environment. The argument names should map to
similarly named variables or user data.
|
[
"Produces",
"an",
"Expression",
"that",
"evalutes",
"the",
"function",
"where",
"its",
"arguments",
"are",
"bound",
"to",
"variables",
"/",
"user",
"data",
"in",
"the",
"current",
"environment",
".",
"The",
"argument",
"names",
"should",
"map",
"to",
"similarly",
"named",
"variables",
"or",
"user",
"data",
"."
] |
def bindFunction(self,function,remapping=None):
"""Produces an Expression that evalutes the function, where its arguments are bound to
variables / user data in the current environment. The argument names should map to
similarly named variables or user data.
If remapping is provided, then it maps function arguments to variables or values
- a dictionary mapping a function argument arg to remapping[arg].
- a list or tuple mapping the i'th function argument to remapping[i].
"""
if isinstance(function,str):
function = self.customFunctions[function]
assert isinstance(function,Function)
args = []
for aindex,arg in enumerate(function.argNames):
if remapping is not None:
if isinstance(remapping,dict):
if arg in remapping:
var = remapping[arg]
if isinstance(var,str) and var in self.variableDict:
#Do we want to map it to the corresponding variable? Or just keep it as a userData reference?
pass
args.append(var)
continue
else:
#its a list
var = remapping[aindex]
if isinstance(var,str) and var in self.variableDict:
#Do we want to map it to the corresponding variable? Or just keep it as a userData reference?
pass
args.append(var)
continue
if arg in self.variableDict:
args.append(self.variableDict[arg])
elif arg in self.userData:
args.append(arg)
else:
raise ValueError("Function %s argument %s does not exist in the current context"%(function.name,arg))
return function(*args)
|
[
"def",
"bindFunction",
"(",
"self",
",",
"function",
",",
"remapping",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"function",
",",
"str",
")",
":",
"function",
"=",
"self",
".",
"customFunctions",
"[",
"function",
"]",
"assert",
"isinstance",
"(",
"function",
",",
"Function",
")",
"args",
"=",
"[",
"]",
"for",
"aindex",
",",
"arg",
"in",
"enumerate",
"(",
"function",
".",
"argNames",
")",
":",
"if",
"remapping",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"remapping",
",",
"dict",
")",
":",
"if",
"arg",
"in",
"remapping",
":",
"var",
"=",
"remapping",
"[",
"arg",
"]",
"if",
"isinstance",
"(",
"var",
",",
"str",
")",
"and",
"var",
"in",
"self",
".",
"variableDict",
":",
"#Do we want to map it to the corresponding variable? Or just keep it as a userData reference?",
"pass",
"args",
".",
"append",
"(",
"var",
")",
"continue",
"else",
":",
"#its a list",
"var",
"=",
"remapping",
"[",
"aindex",
"]",
"if",
"isinstance",
"(",
"var",
",",
"str",
")",
"and",
"var",
"in",
"self",
".",
"variableDict",
":",
"#Do we want to map it to the corresponding variable? Or just keep it as a userData reference?",
"pass",
"args",
".",
"append",
"(",
"var",
")",
"continue",
"if",
"arg",
"in",
"self",
".",
"variableDict",
":",
"args",
".",
"append",
"(",
"self",
".",
"variableDict",
"[",
"arg",
"]",
")",
"elif",
"arg",
"in",
"self",
".",
"userData",
":",
"args",
".",
"append",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Function %s argument %s does not exist in the current context\"",
"%",
"(",
"function",
".",
"name",
",",
"arg",
")",
")",
"return",
"function",
"(",
"*",
"args",
")"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/math/symbolic.py#L1223-L1260
|
|
Xilinx/Vitis-AI
|
fc74d404563d9951b57245443c73bef389f3657f
|
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/debug/cli/debugger_cli_common.py
|
python
|
RichTextLines.extend
|
(self, other)
|
Extend this instance of RichTextLines with another instance.
The extension takes effect on the text lines, the font attribute segments,
as well as the annotations. The line indices in the font attribute
segments and the annotations are adjusted to account for the existing
lines. If there are duplicate, non-line-index fields in the annotations,
the value from the input argument "other" will override that in this
instance.
Args:
other: (RichTextLines) The other RichTextLines instance to be appended at
the end of this instance.
|
Extend this instance of RichTextLines with another instance.
|
[
"Extend",
"this",
"instance",
"of",
"RichTextLines",
"with",
"another",
"instance",
"."
] |
def extend(self, other):
"""Extend this instance of RichTextLines with another instance.
The extension takes effect on the text lines, the font attribute segments,
as well as the annotations. The line indices in the font attribute
segments and the annotations are adjusted to account for the existing
lines. If there are duplicate, non-line-index fields in the annotations,
the value from the input argument "other" will override that in this
instance.
Args:
other: (RichTextLines) The other RichTextLines instance to be appended at
the end of this instance.
"""
orig_num_lines = self.num_lines() # Record original number of lines.
# Merge the lines.
self._lines.extend(other.lines)
# Merge the font_attr_segs.
for line_index in other.font_attr_segs:
self._font_attr_segs[orig_num_lines + line_index] = (
other.font_attr_segs[line_index])
# Merge the annotations.
for key in other.annotations:
if isinstance(key, int):
self._annotations[orig_num_lines + key] = (other.annotations[key])
else:
self._annotations[key] = other.annotations[key]
|
[
"def",
"extend",
"(",
"self",
",",
"other",
")",
":",
"orig_num_lines",
"=",
"self",
".",
"num_lines",
"(",
")",
"# Record original number of lines.",
"# Merge the lines.",
"self",
".",
"_lines",
".",
"extend",
"(",
"other",
".",
"lines",
")",
"# Merge the font_attr_segs.",
"for",
"line_index",
"in",
"other",
".",
"font_attr_segs",
":",
"self",
".",
"_font_attr_segs",
"[",
"orig_num_lines",
"+",
"line_index",
"]",
"=",
"(",
"other",
".",
"font_attr_segs",
"[",
"line_index",
"]",
")",
"# Merge the annotations.",
"for",
"key",
"in",
"other",
".",
"annotations",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"self",
".",
"_annotations",
"[",
"orig_num_lines",
"+",
"key",
"]",
"=",
"(",
"other",
".",
"annotations",
"[",
"key",
"]",
")",
"else",
":",
"self",
".",
"_annotations",
"[",
"key",
"]",
"=",
"other",
".",
"annotations",
"[",
"key",
"]"
] |
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/debug/cli/debugger_cli_common.py#L270-L300
|
||
benoitsteiner/tensorflow-opencl
|
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
|
tensorflow/python/ops/control_flow_ops.py
|
python
|
_GetOutputContext
|
(op)
|
return ctxt
|
Return the control flow context for the output of an op.
|
Return the control flow context for the output of an op.
|
[
"Return",
"the",
"control",
"flow",
"context",
"for",
"the",
"output",
"of",
"an",
"op",
"."
] |
def _GetOutputContext(op):
"""Return the control flow context for the output of an op."""
ctxt = op._get_control_flow_context()
if IsLoopExit(op):
ctxt = ctxt.outer_context
return ctxt
|
[
"def",
"_GetOutputContext",
"(",
"op",
")",
":",
"ctxt",
"=",
"op",
".",
"_get_control_flow_context",
"(",
")",
"if",
"IsLoopExit",
"(",
"op",
")",
":",
"ctxt",
"=",
"ctxt",
".",
"outer_context",
"return",
"ctxt"
] |
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/control_flow_ops.py#L484-L489
|
|
natanielruiz/android-yolo
|
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
|
jni-build/jni/include/tensorflow/python/summary/impl/reservoir.py
|
python
|
_ReservoirBucket.Items
|
(self)
|
Get all the items in the bucket.
|
Get all the items in the bucket.
|
[
"Get",
"all",
"the",
"items",
"in",
"the",
"bucket",
"."
] |
def Items(self):
"""Get all the items in the bucket."""
with self._mutex:
return self.items
|
[
"def",
"Items",
"(",
"self",
")",
":",
"with",
"self",
".",
"_mutex",
":",
"return",
"self",
".",
"items"
] |
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/summary/impl/reservoir.py#L231-L234
|
||
cms-sw/cmssw
|
fd9de012d503d3405420bcbeec0ec879baa57cf2
|
FWCore/ParameterSet/python/Mixins.py
|
python
|
_Parameterizable.getParameter
|
(self, params)
|
return lastParam
|
_getParameter_
Retrieve the specified parameter from the PSet Provided
given the attribute chain
returns None if not found
|
_getParameter_
|
[
"_getParameter_"
] |
def getParameter(self, params):
"""
_getParameter_
Retrieve the specified parameter from the PSet Provided
given the attribute chain
returns None if not found
"""
lastParam = self
# Don't accidentally iterate over letters in a string
if type(params).__name__ == 'str':
return getattr(self, params, None)
for param in params:
lastParam = getattr(lastParam, param, None)
print(str(lastParam))
if lastParam == None:
return None
return lastParam
|
[
"def",
"getParameter",
"(",
"self",
",",
"params",
")",
":",
"lastParam",
"=",
"self",
"# Don't accidentally iterate over letters in a string",
"if",
"type",
"(",
"params",
")",
".",
"__name__",
"==",
"'str'",
":",
"return",
"getattr",
"(",
"self",
",",
"params",
",",
"None",
")",
"for",
"param",
"in",
"params",
":",
"lastParam",
"=",
"getattr",
"(",
"lastParam",
",",
"param",
",",
"None",
")",
"print",
"(",
"str",
"(",
"lastParam",
")",
")",
"if",
"lastParam",
"==",
"None",
":",
"return",
"None",
"return",
"lastParam"
] |
https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/FWCore/ParameterSet/python/Mixins.py#L207-L225
|
|
RamadhanAmizudin/malware
|
2c6c53c8b0d556f5d8078d6ca0fc4448f4697cf1
|
Fuzzbunch/fuzzbunch/pyreadline/modes/emacs.py
|
python
|
EmacsMode.re_read_init_file
|
(self, e)
|
Read in the contents of the inputrc file, and incorporate any
bindings or variable assignments found there.
|
Read in the contents of the inputrc file, and incorporate any
bindings or variable assignments found there.
|
[
"Read",
"in",
"the",
"contents",
"of",
"the",
"inputrc",
"file",
"and",
"incorporate",
"any",
"bindings",
"or",
"variable",
"assignments",
"found",
"there",
"."
] |
def re_read_init_file(self, e): # (C-x C-r)
'''Read in the contents of the inputrc file, and incorporate any
bindings or variable assignments found there.'''
pass
|
[
"def",
"re_read_init_file",
"(",
"self",
",",
"e",
")",
":",
"# (C-x C-r)",
"pass"
] |
https://github.com/RamadhanAmizudin/malware/blob/2c6c53c8b0d556f5d8078d6ca0fc4448f4697cf1/Fuzzbunch/fuzzbunch/pyreadline/modes/emacs.py#L454-L457
|
||
natanielruiz/android-yolo
|
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
|
jni-build/jni/include/tensorflow/contrib/graph_editor/match.py
|
python
|
OpMatcher.__call__
|
(self, op)
|
return True
|
Evaluate if the op matches or not.
|
Evaluate if the op matches or not.
|
[
"Evaluate",
"if",
"the",
"op",
"matches",
"or",
"not",
"."
] |
def __call__(self, op):
"""Evaluate if the op matches or not."""
if not isinstance(op, tf_ops.Operation):
raise TypeError("Expect tf.Operation, got: {}".format(type(op)))
for positive_filter in self.positive_filters:
if not positive_filter(op):
return False
if self.input_op_matches is not None:
if len(op.inputs) != len(self.input_op_matches):
return False
for input_t, input_op_match in zip(op.inputs, self.input_op_matches):
if input_op_match is None:
continue
if not input_op_match(input_t.op):
return False
if self.control_input_op_matches is not None:
if len(op.control_inputs) != len(self.control_input_op_matches):
return False
for cinput_op, cinput_op_match in zip(op.control_inputs,
self.control_input_op_matches):
if cinput_op_match is None:
continue
if not cinput_op_match(cinput_op):
return False
if self.output_op_matches is not None:
if len(op.outputs) != len(self.output_op_matches):
return False
for output_t, output_op_matches in zip(op.outputs,
self.output_op_matches):
if output_op_matches is None:
continue
if len(output_t.consumers()) != len(output_op_matches):
return False
for consumer_op, consumer_op_match in zip(output_t.consumers(),
output_op_matches):
if consumer_op_match is None:
continue
if not consumer_op_match(consumer_op):
return False
return True
|
[
"def",
"__call__",
"(",
"self",
",",
"op",
")",
":",
"if",
"not",
"isinstance",
"(",
"op",
",",
"tf_ops",
".",
"Operation",
")",
":",
"raise",
"TypeError",
"(",
"\"Expect tf.Operation, got: {}\"",
".",
"format",
"(",
"type",
"(",
"op",
")",
")",
")",
"for",
"positive_filter",
"in",
"self",
".",
"positive_filters",
":",
"if",
"not",
"positive_filter",
"(",
"op",
")",
":",
"return",
"False",
"if",
"self",
".",
"input_op_matches",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"op",
".",
"inputs",
")",
"!=",
"len",
"(",
"self",
".",
"input_op_matches",
")",
":",
"return",
"False",
"for",
"input_t",
",",
"input_op_match",
"in",
"zip",
"(",
"op",
".",
"inputs",
",",
"self",
".",
"input_op_matches",
")",
":",
"if",
"input_op_match",
"is",
"None",
":",
"continue",
"if",
"not",
"input_op_match",
"(",
"input_t",
".",
"op",
")",
":",
"return",
"False",
"if",
"self",
".",
"control_input_op_matches",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"op",
".",
"control_inputs",
")",
"!=",
"len",
"(",
"self",
".",
"control_input_op_matches",
")",
":",
"return",
"False",
"for",
"cinput_op",
",",
"cinput_op_match",
"in",
"zip",
"(",
"op",
".",
"control_inputs",
",",
"self",
".",
"control_input_op_matches",
")",
":",
"if",
"cinput_op_match",
"is",
"None",
":",
"continue",
"if",
"not",
"cinput_op_match",
"(",
"cinput_op",
")",
":",
"return",
"False",
"if",
"self",
".",
"output_op_matches",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"op",
".",
"outputs",
")",
"!=",
"len",
"(",
"self",
".",
"output_op_matches",
")",
":",
"return",
"False",
"for",
"output_t",
",",
"output_op_matches",
"in",
"zip",
"(",
"op",
".",
"outputs",
",",
"self",
".",
"output_op_matches",
")",
":",
"if",
"output_op_matches",
"is",
"None",
":",
"continue",
"if",
"len",
"(",
"output_t",
".",
"consumers",
"(",
")",
")",
"!=",
"len",
"(",
"output_op_matches",
")",
":",
"return",
"False",
"for",
"consumer_op",
",",
"consumer_op_match",
"in",
"zip",
"(",
"output_t",
".",
"consumers",
"(",
")",
",",
"output_op_matches",
")",
":",
"if",
"consumer_op_match",
"is",
"None",
":",
"continue",
"if",
"not",
"consumer_op_match",
"(",
"consumer_op",
")",
":",
"return",
"False",
"return",
"True"
] |
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/graph_editor/match.py#L81-L120
|
|
wxWidgets/wxPython-Classic
|
19571e1ae65f1ac445f5491474121998c97a1bf0
|
src/osx_cocoa/grid.py
|
python
|
Grid.GetCellAlignment
|
(*args, **kwargs)
|
return _grid.Grid_GetCellAlignment(*args, **kwargs)
|
GetCellAlignment(int row, int col) -> (horiz, vert)
|
GetCellAlignment(int row, int col) -> (horiz, vert)
|
[
"GetCellAlignment",
"(",
"int",
"row",
"int",
"col",
")",
"-",
">",
"(",
"horiz",
"vert",
")"
] |
def GetCellAlignment(*args, **kwargs):
"""GetCellAlignment(int row, int col) -> (horiz, vert)"""
return _grid.Grid_GetCellAlignment(*args, **kwargs)
|
[
"def",
"GetCellAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"Grid_GetCellAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] |
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/grid.py#L1798-L1800
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/tools/python3/src/Lib/email/_header_value_parser.py
|
python
|
get_section
|
(value)
|
return section, value
|
'*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
|
'*' digits
|
[
"*",
"digits"
] |
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError(
"section number has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
|
[
"def",
"get_section",
"(",
"value",
")",
":",
"section",
"=",
"Section",
"(",
")",
"if",
"not",
"value",
"or",
"value",
"[",
"0",
"]",
"!=",
"'*'",
":",
"raise",
"errors",
".",
"HeaderParseError",
"(",
"\"Expected section but found {}\"",
".",
"format",
"(",
"value",
")",
")",
"section",
".",
"append",
"(",
"ValueTerminal",
"(",
"'*'",
",",
"'section-marker'",
")",
")",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"if",
"not",
"value",
"or",
"not",
"value",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"raise",
"errors",
".",
"HeaderParseError",
"(",
"\"Expected section number but \"",
"\"found {}\"",
".",
"format",
"(",
"value",
")",
")",
"digits",
"=",
"''",
"while",
"value",
"and",
"value",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"digits",
"+=",
"value",
"[",
"0",
"]",
"value",
"=",
"value",
"[",
"1",
":",
"]",
"if",
"digits",
"[",
"0",
"]",
"==",
"'0'",
"and",
"digits",
"!=",
"'0'",
":",
"section",
".",
"defects",
".",
"append",
"(",
"errors",
".",
"InvalidHeaderError",
"(",
"\"section number has an invalid leading 0\"",
")",
")",
"section",
".",
"number",
"=",
"int",
"(",
"digits",
")",
"section",
".",
"append",
"(",
"ValueTerminal",
"(",
"digits",
",",
"'digits'",
")",
")",
"return",
"section",
",",
"value"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/email/_header_value_parser.py#L2359-L2386
|
|
codilime/veles
|
e65de5a7c268129acffcdb03034efd8d256d025c
|
python/veles/dis/isa/falcon.py
|
python
|
FalconIsa.parse_ab
|
(form)
|
return [
ParseWord(fields.b),
ParseInsn(form),
]
|
Generates the remainder of a parser for two-byte forms (a, b).
|
Generates the remainder of a parser for two-byte forms (a, b).
|
[
"Generates",
"the",
"remainder",
"of",
"a",
"parser",
"for",
"two",
"-",
"byte",
"forms",
"(",
"a",
"b",
")",
"."
] |
def parse_ab(form):
"""
Generates the remainder of a parser for two-byte forms (a, b).
"""
fields = FalconFields
return [
ParseWord(fields.b),
ParseInsn(form),
]
|
[
"def",
"parse_ab",
"(",
"form",
")",
":",
"fields",
"=",
"FalconFields",
"return",
"[",
"ParseWord",
"(",
"fields",
".",
"b",
")",
",",
"ParseInsn",
"(",
"form",
")",
",",
"]"
] |
https://github.com/codilime/veles/blob/e65de5a7c268129acffcdb03034efd8d256d025c/python/veles/dis/isa/falcon.py#L709-L717
|
|
SpenceKonde/megaTinyCore
|
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
|
megaavr/tools/libs/pyedbglib/serialport/wincdc.py
|
python
|
CDC.func_name
|
(self)
|
return "%s::%s" % (__name__, sys._getframe(1).f_code.co_name)
|
Get function name
|
Get function name
|
[
"Get",
"function",
"name"
] |
def func_name(self):
"""
Get function name
"""
return "%s::%s" % (__name__, sys._getframe(1).f_code.co_name)
|
[
"def",
"func_name",
"(",
"self",
")",
":",
"return",
"\"%s::%s\"",
"%",
"(",
"__name__",
",",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_code",
".",
"co_name",
")"
] |
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pyedbglib/serialport/wincdc.py#L35-L39
|
|
baidu-research/tensorflow-allreduce
|
66d5b855e90b0949e9fa5cca5599fd729a70e874
|
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py
|
python
|
StateSpaceModel.transition_to_powers
|
(self, powers)
|
return math_utils.matrix_to_powers(
ops.convert_to_tensor(self.get_state_transition(), dtype=self.dtype),
powers)
|
Raise the transition matrix to a batch of powers.
Computes state_transition^powers. If special cases are available, overriding
this function can lead to more efficient inferences.
Args:
powers: A [...] shape integer Tensor with powers to raise the transition
matrix to.
Returns:
The computed matrix powers, with shape [..., state dimension, state
dimension].
|
Raise the transition matrix to a batch of powers.
|
[
"Raise",
"the",
"transition",
"matrix",
"to",
"a",
"batch",
"of",
"powers",
"."
] |
def transition_to_powers(self, powers):
"""Raise the transition matrix to a batch of powers.
Computes state_transition^powers. If special cases are available, overriding
this function can lead to more efficient inferences.
Args:
powers: A [...] shape integer Tensor with powers to raise the transition
matrix to.
Returns:
The computed matrix powers, with shape [..., state dimension, state
dimension].
"""
return math_utils.matrix_to_powers(
ops.convert_to_tensor(self.get_state_transition(), dtype=self.dtype),
powers)
|
[
"def",
"transition_to_powers",
"(",
"self",
",",
"powers",
")",
":",
"return",
"math_utils",
".",
"matrix_to_powers",
"(",
"ops",
".",
"convert_to_tensor",
"(",
"self",
".",
"get_state_transition",
"(",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
",",
"powers",
")"
] |
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py#L290-L305
|
|
catboost/catboost
|
167f64f237114a4d10b2b4ee42adb4569137debe
|
contrib/python/setuptools/py2/pkg_resources/_vendor/pyparsing.py
|
python
|
_xml_escape
|
(data)
|
return data
|
Escape &, <, >, ", ', etc. in a string of data.
|
Escape &, <, >, ", ', etc. in a string of data.
|
[
"Escape",
"&",
"<",
">",
"etc",
".",
"in",
"a",
"string",
"of",
"data",
"."
] |
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
|
[
"def",
"_xml_escape",
"(",
"data",
")",
":",
"# ampersand must be replaced first",
"from_symbols",
"=",
"'&><\"\\''",
"to_symbols",
"=",
"(",
"'&'",
"+",
"s",
"+",
"';'",
"for",
"s",
"in",
"\"amp gt lt quot apos\"",
".",
"split",
"(",
")",
")",
"for",
"from_",
",",
"to_",
"in",
"zip",
"(",
"from_symbols",
",",
"to_symbols",
")",
":",
"data",
"=",
"data",
".",
"replace",
"(",
"from_",
",",
"to_",
")",
"return",
"data"
] |
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/pkg_resources/_vendor/pyparsing.py#L185-L193
|
|
idaholab/moose
|
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
|
python/mooseutils/ReporterReader.py
|
python
|
ReporterReader.__bool__
|
(self)
|
return self._index < len(self._data['time_steps'])
|
Allows this object to be used in boolean cases.
```python
data = ReporterReader('file.json')
if not data:
print 'No data found!'
```
|
Allows this object to be used in boolean cases.
|
[
"Allows",
"this",
"object",
"to",
"be",
"used",
"in",
"boolean",
"cases",
"."
] |
def __bool__(self):
"""
Allows this object to be used in boolean cases.
```python
data = ReporterReader('file.json')
if not data:
print 'No data found!'
```
"""
return self._index < len(self._data['time_steps'])
|
[
"def",
"__bool__",
"(",
"self",
")",
":",
"return",
"self",
".",
"_index",
"<",
"len",
"(",
"self",
".",
"_data",
"[",
"'time_steps'",
"]",
")"
] |
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/mooseutils/ReporterReader.py#L99-L109
|
|
nfrechette/acl-ue4-plugin
|
2a5433b5e7521ca5f3f66ba1afd91a63cce1af7a
|
Tools/stat_parser.py
|
python
|
print_progress
|
(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40)
|
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
|
Call in a loop to create terminal progress bar
|
[
"Call",
"in",
"a",
"loop",
"to",
"create",
"terminal",
"progress",
"bar"
] |
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# With minor tweaks
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# We need to clear any previous line we might have to ensure we have no visual artifacts
# Note that if this function is called too quickly, the text might flicker
terminal_width = 80
sys.stdout.write('{}\r'.format(' ' * terminal_width))
sys.stdout.flush()
sys.stdout.write('%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
|
[
"def",
"print_progress",
"(",
"iteration",
",",
"total",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
",",
"decimals",
"=",
"1",
",",
"bar_length",
"=",
"40",
")",
":",
"# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console",
"# With minor tweaks",
"str_format",
"=",
"\"{0:.\"",
"+",
"str",
"(",
"decimals",
")",
"+",
"\"f}\"",
"percents",
"=",
"str_format",
".",
"format",
"(",
"100",
"*",
"(",
"iteration",
"/",
"float",
"(",
"total",
")",
")",
")",
"filled_length",
"=",
"int",
"(",
"round",
"(",
"bar_length",
"*",
"iteration",
"/",
"float",
"(",
"total",
")",
")",
")",
"bar",
"=",
"'█' *",
"f",
"lled_length +",
"'",
"' *",
"(",
"a",
"r_length -",
"f",
"lled_length)",
"",
"# We need to clear any previous line we might have to ensure we have no visual artifacts",
"# Note that if this function is called too quickly, the text might flicker",
"terminal_width",
"=",
"80",
"sys",
".",
"stdout",
".",
"write",
"(",
"'{}\\r'",
".",
"format",
"(",
"' '",
"*",
"terminal_width",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"'%s |%s| %s%s %s\\r'",
"%",
"(",
"prefix",
",",
"bar",
",",
"percents",
",",
"'%'",
",",
"suffix",
")",
")",
",",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"if",
"iteration",
"==",
"total",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"'\\n'",
")"
] |
https://github.com/nfrechette/acl-ue4-plugin/blob/2a5433b5e7521ca5f3f66ba1afd91a63cce1af7a/Tools/stat_parser.py#L222-L250
|
||
mantidproject/mantid
|
03deeb89254ec4289edb8771e0188c2090a02f32
|
scripts/Inelastic/Direct/ISISDirecInelasticConfig.py
|
python
|
MantidConfigDirectInelastic._parse_replacement_info
|
(self, repl_info)
|
return (source, dest)
|
process dom element 'replacement' and
returns the variables with its correspondent value
to replace variable by their value.
If value contains one or more of the supported variables as its part, this
variable is replaced by its value.
Supported variables are defined by global list USER_PROPERTIES
and their values are taken from current self._user class
|
process dom element 'replacement' and
returns the variables with its correspondent value
to replace variable by their value.
|
[
"process",
"dom",
"element",
"replacement",
"and",
"returns",
"the",
"variables",
"with",
"its",
"correspondent",
"value",
"to",
"replace",
"variable",
"by",
"their",
"value",
"."
] |
def _parse_replacement_info(self, repl_info):
"""process dom element 'replacement' and
returns the variables with its correspondent value
to replace variable by their value.
If value contains one or more of the supported variables as its part, this
variable is replaced by its value.
Supported variables are defined by global list USER_PROPERTIES
and their values are taken from current self._user class
"""
# what should be replaced in the file
source = repl_info.getAttribute("var")
if len(source) == 0:
raise ValueError(
'"replace" field of {0} file for instrument {1} has to contain attribute "var" and its value'
.format(self._user_files_descr, self._user.instrument))
# what should be placed instead of the replacement
dest = repl_info.getAttribute("by_var")
if len(dest) == 0:
raise ValueError(
'"replace" field of {0} file for instrument {1} has to contain attribute "by_var" and its value'
.format(self._user_files_descr, self._user.instrument))
# replace use-specific variables by their values
if '$' in dest:
dest = self._user.replace_variables(dest)
return (source, dest)
|
[
"def",
"_parse_replacement_info",
"(",
"self",
",",
"repl_info",
")",
":",
"# what should be replaced in the file",
"source",
"=",
"repl_info",
".",
"getAttribute",
"(",
"\"var\"",
")",
"if",
"len",
"(",
"source",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'\"replace\" field of {0} file for instrument {1} has to contain attribute \"var\" and its value'",
".",
"format",
"(",
"self",
".",
"_user_files_descr",
",",
"self",
".",
"_user",
".",
"instrument",
")",
")",
"# what should be placed instead of the replacement",
"dest",
"=",
"repl_info",
".",
"getAttribute",
"(",
"\"by_var\"",
")",
"if",
"len",
"(",
"dest",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'\"replace\" field of {0} file for instrument {1} has to contain attribute \"by_var\" and its value'",
".",
"format",
"(",
"self",
".",
"_user_files_descr",
",",
"self",
".",
"_user",
".",
"instrument",
")",
")",
"# replace use-specific variables by their values",
"if",
"'$'",
"in",
"dest",
":",
"dest",
"=",
"self",
".",
"_user",
".",
"replace_variables",
"(",
"dest",
")",
"return",
"(",
"source",
",",
"dest",
")"
] |
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/Inelastic/Direct/ISISDirecInelasticConfig.py#L624-L650
|
|
openvinotoolkit/openvino
|
dedcbeafa8b84cccdc55ca64b8da516682b381c7
|
tools/pot/openvino/tools/pot/statistics/functions/activations.py
|
python
|
calculate_per_channel_stats
|
(acts, fn, axis=1)
|
return fn(t, axis=2)
|
Calculates per-channel statistics for activations using a specific function
:param act: activation
:param fn: function to calculate per-channel statistics
:return statistics generated by fn for each activation in the batch
|
Calculates per-channel statistics for activations using a specific function
:param act: activation
:param fn: function to calculate per-channel statistics
:return statistics generated by fn for each activation in the batch
|
[
"Calculates",
"per",
"-",
"channel",
"statistics",
"for",
"activations",
"using",
"a",
"specific",
"function",
":",
"param",
"act",
":",
"activation",
":",
"param",
"fn",
":",
"function",
"to",
"calculate",
"per",
"-",
"channel",
"statistics",
":",
"return",
"statistics",
"generated",
"by",
"fn",
"for",
"each",
"activation",
"in",
"the",
"batch"
] |
def calculate_per_channel_stats(acts, fn, axis=1):
""" Calculates per-channel statistics for activations using a specific function
:param act: activation
:param fn: function to calculate per-channel statistics
:return statistics generated by fn for each activation in the batch
"""
if len(acts.shape) < 3:
return acts
acts = np.moveaxis(acts, axis, 1)
t = acts.reshape(acts.shape[0], acts.shape[1], -1)
return fn(t, axis=2)
|
[
"def",
"calculate_per_channel_stats",
"(",
"acts",
",",
"fn",
",",
"axis",
"=",
"1",
")",
":",
"if",
"len",
"(",
"acts",
".",
"shape",
")",
"<",
"3",
":",
"return",
"acts",
"acts",
"=",
"np",
".",
"moveaxis",
"(",
"acts",
",",
"axis",
",",
"1",
")",
"t",
"=",
"acts",
".",
"reshape",
"(",
"acts",
".",
"shape",
"[",
"0",
"]",
",",
"acts",
".",
"shape",
"[",
"1",
"]",
",",
"-",
"1",
")",
"return",
"fn",
"(",
"t",
",",
"axis",
"=",
"2",
")"
] |
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/pot/openvino/tools/pot/statistics/functions/activations.py#L17-L27
|
|
ROCmSoftwarePlatform/rocBLAS
|
3738f8b098cdc1db1bdfc164ceb689d073116c98
|
scripts/performance/blas/commandrunner.py
|
python
|
Comparison._get_sweep_keys
|
(self)
|
return []
|
The keys that are collapsed when collecting results. E.g. Used to make the x-axis of a plot.
|
The keys that are collapsed when collecting results. E.g. Used to make the x-axis of a plot.
|
[
"The",
"keys",
"that",
"are",
"collapsed",
"when",
"collecting",
"results",
".",
"E",
".",
"g",
".",
"Used",
"to",
"make",
"the",
"x",
"-",
"axis",
"of",
"a",
"plot",
"."
] |
def _get_sweep_keys(self):
'''The keys that are collapsed when collecting results. E.g. Used to make the x-axis of a plot.'''
return []
|
[
"def",
"_get_sweep_keys",
"(",
"self",
")",
":",
"return",
"[",
"]"
] |
https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/3738f8b098cdc1db1bdfc164ceb689d073116c98/scripts/performance/blas/commandrunner.py#L793-L795
|
|
krishauser/Klampt
|
972cc83ea5befac3f653c1ba20f80155768ad519
|
Python/klampt/src/robotsim.py
|
python
|
RobotModel.__init__
|
(self)
|
r"""
__init__(RobotModel self) -> RobotModel
|
r"""
__init__(RobotModel self) -> RobotModel
|
[
"r",
"__init__",
"(",
"RobotModel",
"self",
")",
"-",
">",
"RobotModel"
] |
def __init__(self):
r"""
__init__(RobotModel self) -> RobotModel
"""
_robotsim.RobotModel_swiginit(self, _robotsim.new_RobotModel())
|
[
"def",
"__init__",
"(",
"self",
")",
":",
"_robotsim",
".",
"RobotModel_swiginit",
"(",
"self",
",",
"_robotsim",
".",
"new_RobotModel",
"(",
")",
")"
] |
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/src/robotsim.py#L4818-L4824
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.