nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/sorting.py | python | decons_obs_group_ids | (comp_ids: np.ndarray, obs_ids, shape, labels, xnull: bool) | return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels] | Reconstruct labels from observed group ids.
Parameters
----------
comp_ids : np.ndarray[np.intp]
xnull : bool
If nulls are excluded; i.e. -1 labels are passed through. | Reconstruct labels from observed group ids. | [
"Reconstruct",
"labels",
"from",
"observed",
"group",
"ids",
"."
] | def decons_obs_group_ids(comp_ids: np.ndarray, obs_ids, shape, labels, xnull: bool):
"""
Reconstruct labels from observed group ids.
Parameters
----------
comp_ids : np.ndarray[np.intp]
xnull : bool
If nulls are excluded; i.e. -1 labels are passed through.
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype="i8")
shape = np.asarray(shape, dtype="i8") + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() else [x - y for x, y in zip(out, lift)]
# TODO: unique_label_indices only used here, should take ndarray[np.intp]
indexer = unique_label_indices(ensure_int64(comp_ids))
return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels] | [
"def",
"decons_obs_group_ids",
"(",
"comp_ids",
":",
"np",
".",
"ndarray",
",",
"obs_ids",
",",
"shape",
",",
"labels",
",",
"xnull",
":",
"bool",
")",
":",
"if",
"not",
"xnull",
":",
"lift",
"=",
"np",
".",
"fromiter",
"(",
"(",
"(",
"a",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
"for",
"a",
"in",
"labels",
")",
",",
"dtype",
"=",
"\"i8\"",
")",
"shape",
"=",
"np",
".",
"asarray",
"(",
"shape",
",",
"dtype",
"=",
"\"i8\"",
")",
"+",
"lift",
"if",
"not",
"is_int64_overflow_possible",
"(",
"shape",
")",
":",
"# obs ids are deconstructable! take the fast route!",
"out",
"=",
"decons_group_index",
"(",
"obs_ids",
",",
"shape",
")",
"return",
"out",
"if",
"xnull",
"or",
"not",
"lift",
".",
"any",
"(",
")",
"else",
"[",
"x",
"-",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"out",
",",
"lift",
")",
"]",
"# TODO: unique_label_indices only used here, should take ndarray[np.intp]",
"indexer",
"=",
"unique_label_indices",
"(",
"ensure_int64",
"(",
"comp_ids",
")",
")",
"return",
"[",
"lab",
"[",
"indexer",
"]",
".",
"astype",
"(",
"np",
".",
"intp",
",",
"subok",
"=",
"False",
",",
"copy",
"=",
"True",
")",
"for",
"lab",
"in",
"labels",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/sorting.py#L239-L260 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/contrib/rnn/python/ops/fused_rnn_cell.py | python | FusedRNNCell.__call__ | (self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None) | Run this fused RNN on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]`
or a list of `time_len` tensors of shape `[batch_size x input_size]`.
initial_state: either a tensor with shape `[batch_size x state_size]`
or a tuple with shapes `[batch_size x s] for s in state_size`, if the
cell takes tuples. If this is not provided, the cell is expected to
create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)`.
Defaults to `time_len` for each element.
scope: `VariableScope` or `string` for the created subgraph; defaults to
class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len x batch_size x output_size]`
or a list of `time_len` tensors of shape `[batch_size x output_size]`,
to match the type of the `inputs`.
- Final state: Either a single `2-D` tensor, or a tuple of tensors
matching the arity and shapes of `initial_state`. | Run this fused RNN on inputs, starting from the given state. | [
"Run",
"this",
"fused",
"RNN",
"on",
"inputs",
"starting",
"from",
"the",
"given",
"state",
"."
] | def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this fused RNN on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len x batch_size x input_size]`
or a list of `time_len` tensors of shape `[batch_size x input_size]`.
initial_state: either a tensor with shape `[batch_size x state_size]`
or a tuple with shapes `[batch_size x s] for s in state_size`, if the
cell takes tuples. If this is not provided, the cell is expected to
create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)`.
Defaults to `time_len` for each element.
scope: `VariableScope` or `string` for the created subgraph; defaults to
class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len x batch_size x output_size]`
or a list of `time_len` tensors of shape `[batch_size x output_size]`,
to match the type of the `inputs`.
- Final state: Either a single `2-D` tensor, or a tuple of tensors
matching the arity and shapes of `initial_state`.
"""
pass | [
"def",
"__call__",
"(",
"self",
",",
"inputs",
",",
"initial_state",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"sequence_length",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"pass"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/rnn/python/ops/fused_rnn_cell.py#L44-L78 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/richtext.py | python | TextBoxAttr.GetTopMargin | (*args) | return _richtext.TextBoxAttr_GetTopMargin(*args) | GetTopMargin(self) -> TextAttrDimension
GetTopMargin(self) -> TextAttrDimension | GetTopMargin(self) -> TextAttrDimension
GetTopMargin(self) -> TextAttrDimension | [
"GetTopMargin",
"(",
"self",
")",
"-",
">",
"TextAttrDimension",
"GetTopMargin",
"(",
"self",
")",
"-",
">",
"TextAttrDimension"
] | def GetTopMargin(*args):
"""
GetTopMargin(self) -> TextAttrDimension
GetTopMargin(self) -> TextAttrDimension
"""
return _richtext.TextBoxAttr_GetTopMargin(*args) | [
"def",
"GetTopMargin",
"(",
"*",
"args",
")",
":",
"return",
"_richtext",
".",
"TextBoxAttr_GetTopMargin",
"(",
"*",
"args",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L649-L654 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/io/excel.py | python | _XlwtWriter.save | (self) | return self.book.save(self.path) | Save workbook to disk. | Save workbook to disk. | [
"Save",
"workbook",
"to",
"disk",
"."
] | def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path) | [
"def",
"save",
"(",
"self",
")",
":",
"return",
"self",
".",
"book",
".",
"save",
"(",
"self",
".",
"path",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/io/excel.py#L1673-L1677 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/ragged/ragged_tensor.py | python | _shape_as_tensor | (shape, dtype) | return constant_op.constant(shape, dtype=dtype) | Takes shape and coerces it to a shape as a tensor.
If the object is already a tensor, simply passes it on (result is guaranteed
to be int64 or int32, but not necessarily dtype).
If not, creates a tensor of type dtype.
Result is either a scalar equal to -1 if the shape is unknown_rank.
Otherwise, it is a vector, where unknown dimensions are represented with a
value of -1.
In C++, see TensorShapeFromTensor for parsing shapes in kernels, and
InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for
use in the shape inference function.
Args:
shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]],
Tuple[Optional[Int]].
dtype: tf.int64 or tf.int32
Returns:
a scalar or vector tensor of dtype tf.int32 or tf.int64. | Takes shape and coerces it to a shape as a tensor. | [
"Takes",
"shape",
"and",
"coerces",
"it",
"to",
"a",
"shape",
"as",
"a",
"tensor",
"."
] | def _shape_as_tensor(shape, dtype):
"""Takes shape and coerces it to a shape as a tensor.
If the object is already a tensor, simply passes it on (result is guaranteed
to be int64 or int32, but not necessarily dtype).
If not, creates a tensor of type dtype.
Result is either a scalar equal to -1 if the shape is unknown_rank.
Otherwise, it is a vector, where unknown dimensions are represented with a
value of -1.
In C++, see TensorShapeFromTensor for parsing shapes in kernels, and
InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for
use in the shape inference function.
Args:
shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]],
Tuple[Optional[Int]].
dtype: tf.int64 or tf.int32
Returns:
a scalar or vector tensor of dtype tf.int32 or tf.int64.
"""
if dtype != dtypes.int64 and dtype != dtypes.int32:
raise ValueError(f"Expected int64 or int32 for dtype: got {dtype}.")
if isinstance(shape, ops.Tensor):
if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:
return math_ops.cast(shape, dtype)
return shape
shape = tensor_shape.as_shape(shape)
if not shape:
# Imply rank is unknown using a -1 scalar.
return constant_op.constant(-1, dtype=dtype)
shape = [(-1 if x is None else x) for x in shape.as_list()]
# At this point, shape is List[Int].
return constant_op.constant(shape, dtype=dtype) | [
"def",
"_shape_as_tensor",
"(",
"shape",
",",
"dtype",
")",
":",
"if",
"dtype",
"!=",
"dtypes",
".",
"int64",
"and",
"dtype",
"!=",
"dtypes",
".",
"int32",
":",
"raise",
"ValueError",
"(",
"f\"Expected int64 or int32 for dtype: got {dtype}.\"",
")",
"if",
"isinstance",
"(",
"shape",
",",
"ops",
".",
"Tensor",
")",
":",
"if",
"shape",
".",
"dtype",
"!=",
"dtypes",
".",
"int64",
"and",
"shape",
".",
"dtype",
"!=",
"dtypes",
".",
"int32",
":",
"return",
"math_ops",
".",
"cast",
"(",
"shape",
",",
"dtype",
")",
"return",
"shape",
"shape",
"=",
"tensor_shape",
".",
"as_shape",
"(",
"shape",
")",
"if",
"not",
"shape",
":",
"# Imply rank is unknown using a -1 scalar.",
"return",
"constant_op",
".",
"constant",
"(",
"-",
"1",
",",
"dtype",
"=",
"dtype",
")",
"shape",
"=",
"[",
"(",
"-",
"1",
"if",
"x",
"is",
"None",
"else",
"x",
")",
"for",
"x",
"in",
"shape",
".",
"as_list",
"(",
")",
"]",
"# At this point, shape is List[Int].",
"return",
"constant_op",
".",
"constant",
"(",
"shape",
",",
"dtype",
"=",
"dtype",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/ragged/ragged_tensor.py#L2942-L2978 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/traceback.py | python | print_tb | (tb, limit=None, file=None) | Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method. | Print up to 'limit' stack trace entries from the traceback 'tb'. | [
"Print",
"up",
"to",
"limit",
"stack",
"trace",
"entries",
"from",
"the",
"traceback",
"tb",
"."
] | def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1 | [
"def",
"print_tb",
"(",
"tb",
",",
"limit",
"=",
"None",
",",
"file",
"=",
"None",
")",
":",
"if",
"file",
"is",
"None",
":",
"file",
"=",
"sys",
".",
"stderr",
"if",
"limit",
"is",
"None",
":",
"if",
"hasattr",
"(",
"sys",
",",
"'tracebacklimit'",
")",
":",
"limit",
"=",
"sys",
".",
"tracebacklimit",
"n",
"=",
"0",
"while",
"tb",
"is",
"not",
"None",
"and",
"(",
"limit",
"is",
"None",
"or",
"n",
"<",
"limit",
")",
":",
"f",
"=",
"tb",
".",
"tb_frame",
"lineno",
"=",
"tb",
".",
"tb_lineno",
"co",
"=",
"f",
".",
"f_code",
"filename",
"=",
"co",
".",
"co_filename",
"name",
"=",
"co",
".",
"co_name",
"_print",
"(",
"file",
",",
"' File \"%s\", line %d, in %s'",
"%",
"(",
"filename",
",",
"lineno",
",",
"name",
")",
")",
"linecache",
".",
"checkcache",
"(",
"filename",
")",
"line",
"=",
"linecache",
".",
"getline",
"(",
"filename",
",",
"lineno",
",",
"f",
".",
"f_globals",
")",
"if",
"line",
":",
"_print",
"(",
"file",
",",
"' '",
"+",
"line",
".",
"strip",
"(",
")",
")",
"tb",
"=",
"tb",
".",
"tb_next",
"n",
"=",
"n",
"+",
"1"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/traceback.py#L46-L72 |
||
larroy/clearskies_core | 3574ddf0edc8555454c7044126e786a6c29444dc | tools/gyp/pylib/gyp/generator/ninja.py | python | NinjaWriter.WriteSourcesForArch | (self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None) | return outputs | Write build rules to compile all of |sources|. | Write build rules to compile all of |sources|. | [
"Write",
"build",
"rules",
"to",
"compile",
"all",
"of",
"|sources|",
"."
] | def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs | [
"def",
"WriteSourcesForArch",
"(",
"self",
",",
"ninja_file",
",",
"config_name",
",",
"config",
",",
"sources",
",",
"predepends",
",",
"precompiled_header",
",",
"spec",
",",
"arch",
"=",
"None",
")",
":",
"extra_defines",
"=",
"[",
"]",
"if",
"self",
".",
"flavor",
"==",
"'mac'",
":",
"cflags",
"=",
"self",
".",
"xcode_settings",
".",
"GetCflags",
"(",
"config_name",
",",
"arch",
"=",
"arch",
")",
"cflags_c",
"=",
"self",
".",
"xcode_settings",
".",
"GetCflagsC",
"(",
"config_name",
")",
"cflags_cc",
"=",
"self",
".",
"xcode_settings",
".",
"GetCflagsCC",
"(",
"config_name",
")",
"cflags_objc",
"=",
"[",
"'$cflags_c'",
"]",
"+",
"self",
".",
"xcode_settings",
".",
"GetCflagsObjC",
"(",
"config_name",
")",
"cflags_objcc",
"=",
"[",
"'$cflags_cc'",
"]",
"+",
"self",
".",
"xcode_settings",
".",
"GetCflagsObjCC",
"(",
"config_name",
")",
"elif",
"self",
".",
"flavor",
"==",
"'win'",
":",
"cflags",
"=",
"self",
".",
"msvs_settings",
".",
"GetCflags",
"(",
"config_name",
")",
"cflags_c",
"=",
"self",
".",
"msvs_settings",
".",
"GetCflagsC",
"(",
"config_name",
")",
"cflags_cc",
"=",
"self",
".",
"msvs_settings",
".",
"GetCflagsCC",
"(",
"config_name",
")",
"extra_defines",
"=",
"self",
".",
"msvs_settings",
".",
"GetComputedDefines",
"(",
"config_name",
")",
"# See comment at cc_command for why there's two .pdb files.",
"pdbpath_c",
"=",
"pdbpath_cc",
"=",
"self",
".",
"msvs_settings",
".",
"GetCompilerPdbName",
"(",
"config_name",
",",
"self",
".",
"ExpandSpecial",
")",
"if",
"not",
"pdbpath_c",
":",
"obj",
"=",
"'obj'",
"if",
"self",
".",
"toolset",
"!=",
"'target'",
":",
"obj",
"+=",
"'.'",
"+",
"self",
".",
"toolset",
"pdbpath",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"obj",
",",
"self",
".",
"base_dir",
",",
"self",
".",
"name",
")",
")",
"pdbpath_c",
"=",
"pdbpath",
"+",
"'.c.pdb'",
"pdbpath_cc",
"=",
"pdbpath",
"+",
"'.cc.pdb'",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'pdbname_c'",
",",
"[",
"pdbpath_c",
"]",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'pdbname_cc'",
",",
"[",
"pdbpath_cc",
"]",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'pchprefix'",
",",
"[",
"self",
".",
"name",
"]",
")",
"else",
":",
"cflags",
"=",
"config",
".",
"get",
"(",
"'cflags'",
",",
"[",
"]",
")",
"cflags_c",
"=",
"config",
".",
"get",
"(",
"'cflags_c'",
",",
"[",
"]",
")",
"cflags_cc",
"=",
"config",
".",
"get",
"(",
"'cflags_cc'",
",",
"[",
"]",
")",
"# Respect environment variables related to build, but target-specific",
"# flags can still override them.",
"if",
"self",
".",
"toolset",
"==",
"'target'",
":",
"cflags_c",
"=",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'CPPFLAGS'",
",",
"''",
")",
".",
"split",
"(",
")",
"+",
"os",
".",
"environ",
".",
"get",
"(",
"'CFLAGS'",
",",
"''",
")",
".",
"split",
"(",
")",
"+",
"cflags_c",
")",
"cflags_cc",
"=",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'CPPFLAGS'",
",",
"''",
")",
".",
"split",
"(",
")",
"+",
"os",
".",
"environ",
".",
"get",
"(",
"'CXXFLAGS'",
",",
"''",
")",
".",
"split",
"(",
")",
"+",
"cflags_cc",
")",
"defines",
"=",
"config",
".",
"get",
"(",
"'defines'",
",",
"[",
"]",
")",
"+",
"extra_defines",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'defines'",
",",
"[",
"Define",
"(",
"d",
",",
"self",
".",
"flavor",
")",
"for",
"d",
"in",
"defines",
"]",
")",
"if",
"self",
".",
"flavor",
"==",
"'win'",
":",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'rcflags'",
",",
"[",
"QuoteShellArgument",
"(",
"self",
".",
"ExpandSpecial",
"(",
"f",
")",
",",
"self",
".",
"flavor",
")",
"for",
"f",
"in",
"self",
".",
"msvs_settings",
".",
"GetRcflags",
"(",
"config_name",
",",
"self",
".",
"GypPathToNinja",
")",
"]",
")",
"include_dirs",
"=",
"config",
".",
"get",
"(",
"'include_dirs'",
",",
"[",
"]",
")",
"env",
"=",
"self",
".",
"GetSortedXcodeEnv",
"(",
")",
"if",
"self",
".",
"flavor",
"==",
"'win'",
":",
"env",
"=",
"self",
".",
"msvs_settings",
".",
"GetVSMacroEnv",
"(",
"'$!PRODUCT_DIR'",
",",
"config",
"=",
"config_name",
")",
"include_dirs",
"=",
"self",
".",
"msvs_settings",
".",
"AdjustIncludeDirs",
"(",
"include_dirs",
",",
"config_name",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'includes'",
",",
"[",
"QuoteShellArgument",
"(",
"'-I'",
"+",
"self",
".",
"GypPathToNinja",
"(",
"i",
",",
"env",
")",
",",
"self",
".",
"flavor",
")",
"for",
"i",
"in",
"include_dirs",
"]",
")",
"pch_commands",
"=",
"precompiled_header",
".",
"GetPchBuildCommands",
"(",
"arch",
")",
"if",
"self",
".",
"flavor",
"==",
"'mac'",
":",
"# Most targets use no precompiled headers, so only write these if needed.",
"for",
"ext",
",",
"var",
"in",
"[",
"(",
"'c'",
",",
"'cflags_pch_c'",
")",
",",
"(",
"'cc'",
",",
"'cflags_pch_cc'",
")",
",",
"(",
"'m'",
",",
"'cflags_pch_objc'",
")",
",",
"(",
"'mm'",
",",
"'cflags_pch_objcc'",
")",
"]",
":",
"include",
"=",
"precompiled_header",
".",
"GetInclude",
"(",
"ext",
",",
"arch",
")",
"if",
"include",
":",
"ninja_file",
".",
"variable",
"(",
"var",
",",
"include",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'cflags'",
",",
"map",
"(",
"self",
".",
"ExpandSpecial",
",",
"cflags",
")",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'cflags_c'",
",",
"map",
"(",
"self",
".",
"ExpandSpecial",
",",
"cflags_c",
")",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'cflags_cc'",
",",
"map",
"(",
"self",
".",
"ExpandSpecial",
",",
"cflags_cc",
")",
")",
"if",
"self",
".",
"flavor",
"==",
"'mac'",
":",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'cflags_objc'",
",",
"map",
"(",
"self",
".",
"ExpandSpecial",
",",
"cflags_objc",
")",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'cflags_objcc'",
",",
"map",
"(",
"self",
".",
"ExpandSpecial",
",",
"cflags_objcc",
")",
")",
"ninja_file",
".",
"newline",
"(",
")",
"outputs",
"=",
"[",
"]",
"has_rc_source",
"=",
"False",
"for",
"source",
"in",
"sources",
":",
"filename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"source",
")",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"obj_ext",
"=",
"self",
".",
"obj_ext",
"if",
"ext",
"in",
"(",
"'cc'",
",",
"'cpp'",
",",
"'cxx'",
")",
":",
"command",
"=",
"'cxx'",
"self",
".",
"uses_cpp",
"=",
"True",
"elif",
"ext",
"==",
"'c'",
"or",
"(",
"ext",
"==",
"'S'",
"and",
"self",
".",
"flavor",
"!=",
"'win'",
")",
":",
"command",
"=",
"'cc'",
"elif",
"ext",
"==",
"'s'",
"and",
"self",
".",
"flavor",
"!=",
"'win'",
":",
"# Doesn't generate .o.d files.",
"command",
"=",
"'cc_s'",
"elif",
"(",
"self",
".",
"flavor",
"==",
"'win'",
"and",
"ext",
"==",
"'asm'",
"and",
"self",
".",
"msvs_settings",
".",
"GetArch",
"(",
"config_name",
")",
"==",
"'x86'",
"and",
"not",
"self",
".",
"msvs_settings",
".",
"HasExplicitAsmRules",
"(",
"spec",
")",
")",
":",
"# Asm files only get auto assembled for x86 (not x64).",
"command",
"=",
"'asm'",
"# Add the _asm suffix as msvs is capable of handling .cc and",
"# .asm files of the same name without collision.",
"obj_ext",
"=",
"'_asm.obj'",
"elif",
"self",
".",
"flavor",
"==",
"'mac'",
"and",
"ext",
"==",
"'m'",
":",
"command",
"=",
"'objc'",
"elif",
"self",
".",
"flavor",
"==",
"'mac'",
"and",
"ext",
"==",
"'mm'",
":",
"command",
"=",
"'objcxx'",
"self",
".",
"uses_cpp",
"=",
"True",
"elif",
"self",
".",
"flavor",
"==",
"'win'",
"and",
"ext",
"==",
"'rc'",
":",
"command",
"=",
"'rc'",
"obj_ext",
"=",
"'.res'",
"has_rc_source",
"=",
"True",
"else",
":",
"# Ignore unhandled extensions.",
"continue",
"input",
"=",
"self",
".",
"GypPathToNinja",
"(",
"source",
")",
"output",
"=",
"self",
".",
"GypPathToUniqueOutput",
"(",
"filename",
"+",
"obj_ext",
")",
"if",
"arch",
"is",
"not",
"None",
":",
"output",
"=",
"AddArch",
"(",
"output",
",",
"arch",
")",
"implicit",
"=",
"precompiled_header",
".",
"GetObjDependencies",
"(",
"[",
"input",
"]",
",",
"[",
"output",
"]",
",",
"arch",
")",
"variables",
"=",
"[",
"]",
"if",
"self",
".",
"flavor",
"==",
"'win'",
":",
"variables",
",",
"output",
",",
"implicit",
"=",
"precompiled_header",
".",
"GetFlagsModifications",
"(",
"input",
",",
"output",
",",
"implicit",
",",
"command",
",",
"cflags_c",
",",
"cflags_cc",
",",
"self",
".",
"ExpandSpecial",
")",
"ninja_file",
".",
"build",
"(",
"output",
",",
"command",
",",
"input",
",",
"implicit",
"=",
"[",
"gch",
"for",
"_",
",",
"_",
",",
"gch",
"in",
"implicit",
"]",
",",
"order_only",
"=",
"predepends",
",",
"variables",
"=",
"variables",
")",
"outputs",
".",
"append",
"(",
"output",
")",
"if",
"has_rc_source",
":",
"resource_include_dirs",
"=",
"config",
".",
"get",
"(",
"'resource_include_dirs'",
",",
"include_dirs",
")",
"self",
".",
"WriteVariableList",
"(",
"ninja_file",
",",
"'resource_includes'",
",",
"[",
"QuoteShellArgument",
"(",
"'-I'",
"+",
"self",
".",
"GypPathToNinja",
"(",
"i",
",",
"env",
")",
",",
"self",
".",
"flavor",
")",
"for",
"i",
"in",
"resource_include_dirs",
"]",
")",
"self",
".",
"WritePchTargets",
"(",
"ninja_file",
",",
"pch_commands",
")",
"ninja_file",
".",
"newline",
"(",
")",
"return",
"outputs"
] | https://github.com/larroy/clearskies_core/blob/3574ddf0edc8555454c7044126e786a6c29444dc/tools/gyp/pylib/gyp/generator/ninja.py#L802-L943 |
|
bundy-dns/bundy | 3d41934996b82b0cd2fe22dd74d2abc1daba835d | src/lib/python/bundy/config/cfgmgr.py | python | ConfigManagerData.check_for_updates | (file_config) | return config | Given the parsed JSON data from the config file,
check whether it needs updating due to version changes.
Return the data with updates (or the original data if no
updates were necessary).
Even though it is at this moment not technically necessary, this
function makes and returns a copy of the given data. | Given the parsed JSON data from the config file,
check whether it needs updating due to version changes.
Return the data with updates (or the original data if no
updates were necessary).
Even though it is at this moment not technically necessary, this
function makes and returns a copy of the given data. | [
"Given",
"the",
"parsed",
"JSON",
"data",
"from",
"the",
"config",
"file",
"check",
"whether",
"it",
"needs",
"updating",
"due",
"to",
"version",
"changes",
".",
"Return",
"the",
"data",
"with",
"updates",
"(",
"or",
"the",
"original",
"data",
"if",
"no",
"updates",
"were",
"necessary",
")",
".",
"Even",
"though",
"it",
"is",
"at",
"this",
"moment",
"not",
"technically",
"necessary",
"this",
"function",
"makes",
"and",
"returns",
"a",
"copy",
"of",
"the",
"given",
"data",
"."
] | def check_for_updates(file_config):
"""
Given the parsed JSON data from the config file,
check whether it needs updating due to version changes.
Return the data with updates (or the original data if no
updates were necessary).
Even though it is at this moment not technically necessary, this
function makes and returns a copy of the given data.
"""
config = copy.deepcopy(file_config)
if 'version' in config:
data_version = config['version']
else:
# If it is not present, assume latest or earliest?
data_version = 1
# For efficiency, if up-to-date, return now
if data_version == config_data.BUNDY_CONFIG_DATA_VERSION:
return config
# Don't know what to do if it is more recent
if data_version > config_data.BUNDY_CONFIG_DATA_VERSION:
raise ConfigManagerDataReadError(
"Cannot load configuration file: version "
"%d not yet supported" % config['version'])
# At some point we might give up supporting older versions
if data_version < 1:
raise ConfigManagerDataReadError(
"Cannot load configuration file: version "
"%d no longer supported" % config['version'])
# Ok, so we have a still-supported older version. Apply all
# updates
new_data_version = data_version
if new_data_version == 1:
# only format change, no other changes necessary
new_data_version = 2
if new_data_version == 2:
# 'Boss' got changed to 'Init'; If for some reason both are
# present, simply ignore the old one
if 'Boss' in config:
if not 'Init' in config:
config['Init'] = config['Boss']
del config['Boss']
else:
# This should not happen, but we don't want to overwrite
# any config in this case, so warn about it
logger.warn(CFGMGR_CONFIG_UPDATE_BOSS_AND_INIT_FOUND)
new_data_version = 3
if new_data_version == 3:
# items beginning with '_' are now reserved for internal system
# use. any possible conflict (none known) is rejected here.
for mod, mod_conf in config.items():
if mod == 'version':
continue
reserved = [x for x in mod_conf.keys() if x and x[0] == '_']
if reserved:
raise ConfigManagerDataReadError('system reserved items '
'should not exist until '
'version 4: ' +
', '.join(reserved))
# _generation_id is currently the only defined system reserved
# item.
mod_conf['_generation_id'] = 1
new_data_version = 4
config['version'] = new_data_version
logger.info(CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE, data_version,
new_data_version)
return config | [
"def",
"check_for_updates",
"(",
"file_config",
")",
":",
"config",
"=",
"copy",
".",
"deepcopy",
"(",
"file_config",
")",
"if",
"'version'",
"in",
"config",
":",
"data_version",
"=",
"config",
"[",
"'version'",
"]",
"else",
":",
"# If it is not present, assume latest or earliest?",
"data_version",
"=",
"1",
"# For efficiency, if up-to-date, return now",
"if",
"data_version",
"==",
"config_data",
".",
"BUNDY_CONFIG_DATA_VERSION",
":",
"return",
"config",
"# Don't know what to do if it is more recent",
"if",
"data_version",
">",
"config_data",
".",
"BUNDY_CONFIG_DATA_VERSION",
":",
"raise",
"ConfigManagerDataReadError",
"(",
"\"Cannot load configuration file: version \"",
"\"%d not yet supported\"",
"%",
"config",
"[",
"'version'",
"]",
")",
"# At some point we might give up supporting older versions",
"if",
"data_version",
"<",
"1",
":",
"raise",
"ConfigManagerDataReadError",
"(",
"\"Cannot load configuration file: version \"",
"\"%d no longer supported\"",
"%",
"config",
"[",
"'version'",
"]",
")",
"# Ok, so we have a still-supported older version. Apply all",
"# updates",
"new_data_version",
"=",
"data_version",
"if",
"new_data_version",
"==",
"1",
":",
"# only format change, no other changes necessary",
"new_data_version",
"=",
"2",
"if",
"new_data_version",
"==",
"2",
":",
"# 'Boss' got changed to 'Init'; If for some reason both are",
"# present, simply ignore the old one",
"if",
"'Boss'",
"in",
"config",
":",
"if",
"not",
"'Init'",
"in",
"config",
":",
"config",
"[",
"'Init'",
"]",
"=",
"config",
"[",
"'Boss'",
"]",
"del",
"config",
"[",
"'Boss'",
"]",
"else",
":",
"# This should not happen, but we don't want to overwrite",
"# any config in this case, so warn about it",
"logger",
".",
"warn",
"(",
"CFGMGR_CONFIG_UPDATE_BOSS_AND_INIT_FOUND",
")",
"new_data_version",
"=",
"3",
"if",
"new_data_version",
"==",
"3",
":",
"# items beginning with '_' are now reserved for internal system",
"# use. any possible conflict (none known) is rejected here.",
"for",
"mod",
",",
"mod_conf",
"in",
"config",
".",
"items",
"(",
")",
":",
"if",
"mod",
"==",
"'version'",
":",
"continue",
"reserved",
"=",
"[",
"x",
"for",
"x",
"in",
"mod_conf",
".",
"keys",
"(",
")",
"if",
"x",
"and",
"x",
"[",
"0",
"]",
"==",
"'_'",
"]",
"if",
"reserved",
":",
"raise",
"ConfigManagerDataReadError",
"(",
"'system reserved items '",
"'should not exist until '",
"'version 4: '",
"+",
"', '",
".",
"join",
"(",
"reserved",
")",
")",
"# _generation_id is currently the only defined system reserved",
"# item.",
"mod_conf",
"[",
"'_generation_id'",
"]",
"=",
"1",
"new_data_version",
"=",
"4",
"config",
"[",
"'version'",
"]",
"=",
"new_data_version",
"logger",
".",
"info",
"(",
"CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE",
",",
"data_version",
",",
"new_data_version",
")",
"return",
"config"
] | https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/config/cfgmgr.py#L72-L142 |
|
GJDuck/LowFat | ecf6a0f0fa1b73a27a626cf493cc39e477b6faea | llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/report.py | python | read_crashes | (output_dir) | return (parse_crash(filename)
for filename in glob.iglob(os.path.join(output_dir, 'failures',
'*.info.txt'))) | Generate a unique sequence of crashes from given output directory. | Generate a unique sequence of crashes from given output directory. | [
"Generate",
"a",
"unique",
"sequence",
"of",
"crashes",
"from",
"given",
"output",
"directory",
"."
] | def read_crashes(output_dir):
""" Generate a unique sequence of crashes from given output directory. """
return (parse_crash(filename)
for filename in glob.iglob(os.path.join(output_dir, 'failures',
'*.info.txt'))) | [
"def",
"read_crashes",
"(",
"output_dir",
")",
":",
"return",
"(",
"parse_crash",
"(",
"filename",
")",
"for",
"filename",
"in",
"glob",
".",
"iglob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"'failures'",
",",
"'*.info.txt'",
")",
")",
")"
] | https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/tools/clang/tools/scan-build-py/libscanbuild/report.py#L286-L291 |
|
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/contributed/sumopy/plugins/mapmatching/wxgui.py | python | WxGui.on_edgesresults_to_shapefile | (self, event=None) | Export edge results to shape file. | Export edge results to shape file. | [
"Export",
"edge",
"results",
"to",
"shape",
"file",
"."
] | def on_edgesresults_to_shapefile(self, event=None):
"""
Export edge results to shape file.
"""
print 'on_nodes_to_shapefile'
scenario = self._mapmatching.get_scenario()
dirpath = scenario.get_workdirpath()
#defaultFile = scenario.get_rootfilename()+'.edgeres.shp'
wildcards_all = 'All files (*.*)|*.*|SHP files (*.shp)|*.shp'
dlg = wx.FileDialog(None, message='Export edge results to shapefile',
defaultDir=dirpath,
# defaultFile=defaultFile,
wildcard=wildcards_all, style=wx.SAVE | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
filepath = dlg.GetPath()
else:
return
mapmatching.edgesresults_to_shapefile(self._mapmatching,
self._results,
filepath,
log=self._mainframe.get_logger()) | [
"def",
"on_edgesresults_to_shapefile",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"print",
"'on_nodes_to_shapefile'",
"scenario",
"=",
"self",
".",
"_mapmatching",
".",
"get_scenario",
"(",
")",
"dirpath",
"=",
"scenario",
".",
"get_workdirpath",
"(",
")",
"#defaultFile = scenario.get_rootfilename()+'.edgeres.shp'",
"wildcards_all",
"=",
"'All files (*.*)|*.*|SHP files (*.shp)|*.shp'",
"dlg",
"=",
"wx",
".",
"FileDialog",
"(",
"None",
",",
"message",
"=",
"'Export edge results to shapefile'",
",",
"defaultDir",
"=",
"dirpath",
",",
"# defaultFile=defaultFile,",
"wildcard",
"=",
"wildcards_all",
",",
"style",
"=",
"wx",
".",
"SAVE",
"|",
"wx",
".",
"CHANGE_DIR",
")",
"if",
"dlg",
".",
"ShowModal",
"(",
")",
"==",
"wx",
".",
"ID_OK",
":",
"filepath",
"=",
"dlg",
".",
"GetPath",
"(",
")",
"else",
":",
"return",
"mapmatching",
".",
"edgesresults_to_shapefile",
"(",
"self",
".",
"_mapmatching",
",",
"self",
".",
"_results",
",",
"filepath",
",",
"log",
"=",
"self",
".",
"_mainframe",
".",
"get_logger",
"(",
")",
")"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/plugins/mapmatching/wxgui.py#L946-L968 |
||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py | python | Barrier.incomplete_size | (self, name=None) | return gen_data_flow_ops._barrier_incomplete_size(
self._barrier_ref, name=name) | Compute the number of incomplete elements in the given barrier.
Args:
name: A name for the operation (optional).
Returns:
A single-element tensor containing the number of incomplete elements in
the given barrier. | Compute the number of incomplete elements in the given barrier. | [
"Compute",
"the",
"number",
"of",
"incomplete",
"elements",
"in",
"the",
"given",
"barrier",
"."
] | def incomplete_size(self, name=None):
"""Compute the number of incomplete elements in the given barrier.
Args:
name: A name for the operation (optional).
Returns:
A single-element tensor containing the number of incomplete elements in
the given barrier.
"""
if name is None:
name = "%s_BarrierIncompleteSize" % self._name
return gen_data_flow_ops._barrier_incomplete_size(
self._barrier_ref, name=name) | [
"def",
"incomplete_size",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"%s_BarrierIncompleteSize\"",
"%",
"self",
".",
"_name",
"return",
"gen_data_flow_ops",
".",
"_barrier_incomplete_size",
"(",
"self",
".",
"_barrier_ref",
",",
"name",
"=",
"name",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/data_flow_ops.py#L994-L1007 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/py/py/_code/source.py | python | Source.getstatementrange | (self, lineno, assertion=False) | return start, end | return (start, end) tuple which spans the minimal
statement region which containing the given lineno. | return (start, end) tuple which spans the minimal
statement region which containing the given lineno. | [
"return",
"(",
"start",
"end",
")",
"tuple",
"which",
"spans",
"the",
"minimal",
"statement",
"region",
"which",
"containing",
"the",
"given",
"lineno",
"."
] | def getstatementrange(self, lineno, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end | [
"def",
"getstatementrange",
"(",
"self",
",",
"lineno",
",",
"assertion",
"=",
"False",
")",
":",
"if",
"not",
"(",
"0",
"<=",
"lineno",
"<",
"len",
"(",
"self",
")",
")",
":",
"raise",
"IndexError",
"(",
"\"lineno out of range\"",
")",
"ast",
",",
"start",
",",
"end",
"=",
"getstatementrange_ast",
"(",
"lineno",
",",
"self",
")",
"return",
"start",
",",
"end"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/py/py/_code/source.py#L110-L117 |
|
mhammond/pywin32 | 44afd86ba8485194df93234639243252deeb40d5 | com/win32com/client/dynamic.py | python | CDispatch._get_good_object_ | (self, ob, userName=None, ReturnCLSID=None) | Given an object (usually the retval from a method), make it a good object to return.
Basically checks if it is a COM object, and wraps it up.
Also handles the fact that a retval may be a tuple of retvals | Given an object (usually the retval from a method), make it a good object to return.
Basically checks if it is a COM object, and wraps it up.
Also handles the fact that a retval may be a tuple of retvals | [
"Given",
"an",
"object",
"(",
"usually",
"the",
"retval",
"from",
"a",
"method",
")",
"make",
"it",
"a",
"good",
"object",
"to",
"return",
".",
"Basically",
"checks",
"if",
"it",
"is",
"a",
"COM",
"object",
"and",
"wraps",
"it",
"up",
".",
"Also",
"handles",
"the",
"fact",
"that",
"a",
"retval",
"may",
"be",
"a",
"tuple",
"of",
"retvals"
] | def _get_good_object_(self, ob, userName=None, ReturnCLSID=None):
"""Given an object (usually the retval from a method), make it a good object to return.
Basically checks if it is a COM object, and wraps it up.
Also handles the fact that a retval may be a tuple of retvals"""
if ob is None: # Quick exit!
return None
elif isinstance(ob, tuple):
return tuple(
map(
lambda o, s=self, oun=userName, rc=ReturnCLSID: s._get_good_single_object_(
o, oun, rc
),
ob,
)
)
else:
return self._get_good_single_object_(ob) | [
"def",
"_get_good_object_",
"(",
"self",
",",
"ob",
",",
"userName",
"=",
"None",
",",
"ReturnCLSID",
"=",
"None",
")",
":",
"if",
"ob",
"is",
"None",
":",
"# Quick exit!",
"return",
"None",
"elif",
"isinstance",
"(",
"ob",
",",
"tuple",
")",
":",
"return",
"tuple",
"(",
"map",
"(",
"lambda",
"o",
",",
"s",
"=",
"self",
",",
"oun",
"=",
"userName",
",",
"rc",
"=",
"ReturnCLSID",
":",
"s",
".",
"_get_good_single_object_",
"(",
"o",
",",
"oun",
",",
"rc",
")",
",",
"ob",
",",
")",
")",
"else",
":",
"return",
"self",
".",
"_get_good_single_object_",
"(",
"ob",
")"
] | https://github.com/mhammond/pywin32/blob/44afd86ba8485194df93234639243252deeb40d5/com/win32com/client/dynamic.py#L393-L409 |
||
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/pyedbglib/protocols/avr8protocol.py | python | Avr8Protocol.set_variant | (self, variant) | Sets the variant field in the config context
:param variant: type of device | Sets the variant field in the config context | [
"Sets",
"the",
"variant",
"field",
"in",
"the",
"config",
"context"
] | def set_variant(self, variant):
"""
Sets the variant field in the config context
:param variant: type of device
"""
self.set_byte(self.AVR8_CTXT_CONFIG, self.AVR8_CONFIG_VARIANT, variant) | [
"def",
"set_variant",
"(",
"self",
",",
"variant",
")",
":",
"self",
".",
"set_byte",
"(",
"self",
".",
"AVR8_CTXT_CONFIG",
",",
"self",
".",
"AVR8_CONFIG_VARIANT",
",",
"variant",
")"
] | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pyedbglib/protocols/avr8protocol.py#L224-L230 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqt/mantidqt/widgets/samplelogs/presenter.py | python | SampleLogs.search_key_changed | (self) | When the line edit is changed, print the logs that match the search key,
and display the data. | When the line edit is changed, print the logs that match the search key,
and display the data. | [
"When",
"the",
"line",
"edit",
"is",
"changed",
"print",
"the",
"logs",
"that",
"match",
"the",
"search",
"key",
"and",
"display",
"the",
"data",
"."
] | def search_key_changed(self):
"""When the line edit is changed, print the logs that match the search key,
and display the data.
"""
self.update_table_with_matching_logs() | [
"def",
"search_key_changed",
"(",
"self",
")",
":",
"self",
".",
"update_table_with_matching_logs",
"(",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/samplelogs/presenter.py#L122-L126 |
||
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | python | OrderedDict.iteritems | (self) | od.iteritems -> an iterator over the (key, value) items in od | od.iteritems -> an iterator over the (key, value) items in od | [
"od",
".",
"iteritems",
"-",
">",
"an",
"iterator",
"over",
"the",
"(",
"key",
"value",
")",
"items",
"in",
"od"
] | def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k]) | [
"def",
"iteritems",
"(",
"self",
")",
":",
"for",
"k",
"in",
"self",
":",
"yield",
"(",
"k",
",",
"self",
"[",
"k",
"]",
")"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py#L164-L167 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/control_flow_v2_toggles.py | python | output_all_intermediates | (state) | Whether to output all intermediates from functional control flow ops.
The "default" behavior to is to output all intermediates when using v2 control
flow inside Keras models in graph mode (possibly inside Estimators). This is
needed to support taking gradients of v2 control flow. In graph mode, Keras
can sometimes freeze the forward graph before the gradient computation which
does not work for v2 control flow since it requires updating the forward ops
to output the needed intermediates. We work around this by proactively
outputting the needed intermediates when building the forward pass itself.
Ideally any such extra tensors should be pruned out at runtime. However, if
for any reason this doesn't work for you or if you have an infernce-only model
you can turn this behavior off using
`tf.compat.v1.experimental.output_all_intermediates(False)`.
If with the default behavior you are still seeing errors of the form
"Connecting to invalid output X of source node Y which has Z outputs" try
setting `tf.compat.v1.experimental.output_all_intermediates(True)` and
please file an issue at https://github.com/tensorflow/tensorflow/issues.
Args:
state: True, False or None. None restores the default behavior. | Whether to output all intermediates from functional control flow ops. | [
"Whether",
"to",
"output",
"all",
"intermediates",
"from",
"functional",
"control",
"flow",
"ops",
"."
] | def output_all_intermediates(state): # pylint: disable=invalid-name
"""Whether to output all intermediates from functional control flow ops.
The "default" behavior to is to output all intermediates when using v2 control
flow inside Keras models in graph mode (possibly inside Estimators). This is
needed to support taking gradients of v2 control flow. In graph mode, Keras
can sometimes freeze the forward graph before the gradient computation which
does not work for v2 control flow since it requires updating the forward ops
to output the needed intermediates. We work around this by proactively
outputting the needed intermediates when building the forward pass itself.
Ideally any such extra tensors should be pruned out at runtime. However, if
for any reason this doesn't work for you or if you have an infernce-only model
you can turn this behavior off using
`tf.compat.v1.experimental.output_all_intermediates(False)`.
If with the default behavior you are still seeing errors of the form
"Connecting to invalid output X of source node Y which has Z outputs" try
setting `tf.compat.v1.experimental.output_all_intermediates(True)` and
please file an issue at https://github.com/tensorflow/tensorflow/issues.
Args:
state: True, False or None. None restores the default behavior.
"""
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = state | [
"def",
"output_all_intermediates",
"(",
"state",
")",
":",
"# pylint: disable=invalid-name",
"control_flow_util_v2",
".",
"_EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE",
"=",
"state"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/control_flow_v2_toggles.py#L71-L94 |
||
musescore/MuseScore | a817fea23e3c2be30847b7fde5b01746222c252e | tools/crashdump/posix/generate_breakpad_symbols.py | python | mkdir_p | (path) | Simulates mkdir -p. | Simulates mkdir -p. | [
"Simulates",
"mkdir",
"-",
"p",
"."
] | def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise | [
"def",
"mkdir_p",
"(",
"path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"pass",
"else",
":",
"raise"
] | https://github.com/musescore/MuseScore/blob/a817fea23e3c2be30847b7fde5b01746222c252e/tools/crashdump/posix/generate_breakpad_symbols.py#L265-L272 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py | python | Text.tag_lower | (self, tagName, belowThis=None) | Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS. | Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS. | [
"Change",
"the",
"priority",
"of",
"tag",
"TAGNAME",
"such",
"that",
"it",
"is",
"lower",
"than",
"the",
"priority",
"of",
"BELOWTHIS",
"."
] | def tag_lower(self, tagName, belowThis=None):
"""Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS."""
self.tk.call(self._w, 'tag', 'lower', tagName, belowThis) | [
"def",
"tag_lower",
"(",
"self",
",",
"tagName",
",",
"belowThis",
"=",
"None",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"'tag'",
",",
"'lower'",
",",
"tagName",
",",
"belowThis",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L3375-L3378 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/series.py | python | Series._set_as_cached | (self, item, cacher) | Set the _cacher attribute on the calling object with a weakref to
cacher. | Set the _cacher attribute on the calling object with a weakref to
cacher. | [
"Set",
"the",
"_cacher",
"attribute",
"on",
"the",
"calling",
"object",
"with",
"a",
"weakref",
"to",
"cacher",
"."
] | def _set_as_cached(self, item, cacher) -> None:
"""
Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher)) | [
"def",
"_set_as_cached",
"(",
"self",
",",
"item",
",",
"cacher",
")",
"->",
"None",
":",
"self",
".",
"_cacher",
"=",
"(",
"item",
",",
"weakref",
".",
"ref",
"(",
"cacher",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/series.py#L1194-L1199 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/genpy/src/genpy/rostime.py | python | Duration.__truediv__ | (self, val) | Divide this duration by an integer or float
:param val: division factor ``int/float``, or :class:`Duration` to divide by
:returns: :class:`Duration` divided by val - a :class:`Duration` if divided by a number, or a number if divided by a duration | Divide this duration by an integer or float
:param val: division factor ``int/float``, or :class:`Duration` to divide by
:returns: :class:`Duration` divided by val - a :class:`Duration` if divided by a number, or a number if divided by a duration | [
"Divide",
"this",
"duration",
"by",
"an",
"integer",
"or",
"float",
":",
"param",
"val",
":",
"division",
"factor",
"int",
"/",
"float",
"or",
":",
"class",
":",
"Duration",
"to",
"divide",
"by",
":",
"returns",
":",
":",
"class",
":",
"Duration",
"divided",
"by",
"val",
"-",
"a",
":",
"class",
":",
"Duration",
"if",
"divided",
"by",
"a",
"number",
"or",
"a",
"number",
"if",
"divided",
"by",
"a",
"duration"
] | def __truediv__(self, val):
"""
Divide this duration by an integer or float
:param val: division factor ``int/float``, or :class:`Duration` to divide by
:returns: :class:`Duration` divided by val - a :class:`Duration` if divided by a number, or a number if divided by a duration
"""
if type(val) in (int, long, float):
return Duration.from_sec(self.to_sec() / val)
elif isinstance(val, Duration):
return self.to_sec() / val.to_sec()
else:
return NotImplemented | [
"def",
"__truediv__",
"(",
"self",
",",
"val",
")",
":",
"if",
"type",
"(",
"val",
")",
"in",
"(",
"int",
",",
"long",
",",
"float",
")",
":",
"return",
"Duration",
".",
"from_sec",
"(",
"self",
".",
"to_sec",
"(",
")",
"/",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"Duration",
")",
":",
"return",
"self",
".",
"to_sec",
"(",
")",
"/",
"val",
".",
"to_sec",
"(",
")",
"else",
":",
"return",
"NotImplemented"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/genpy/src/genpy/rostime.py#L436-L447 |
||
Tracktion/tracktion_engine | a52de2e05f1b831582c7c615371f82fb0f9b64e6 | doxygen/process_source_files.py | python | get_curly_brace_scope_end | (string, start_pos) | return -1 | Given a string and a starting position of an opening brace, find the
position of the closing brace. | Given a string and a starting position of an opening brace, find the
position of the closing brace. | [
"Given",
"a",
"string",
"and",
"a",
"starting",
"position",
"of",
"an",
"opening",
"brace",
"find",
"the",
"position",
"of",
"the",
"closing",
"brace",
"."
] | def get_curly_brace_scope_end(string, start_pos):
"""Given a string and a starting position of an opening brace, find the
position of the closing brace.
"""
start_pos += 1
string_end = len(string)
bracket_counter = 1
while start_pos < string_end:
if string[start_pos] == "{":
bracket_counter += 1
elif string[start_pos] == "}":
bracket_counter -= 1
if bracket_counter == 0:
return start_pos
start_pos += 1
return -1 | [
"def",
"get_curly_brace_scope_end",
"(",
"string",
",",
"start_pos",
")",
":",
"start_pos",
"+=",
"1",
"string_end",
"=",
"len",
"(",
"string",
")",
"bracket_counter",
"=",
"1",
"while",
"start_pos",
"<",
"string_end",
":",
"if",
"string",
"[",
"start_pos",
"]",
"==",
"\"{\"",
":",
"bracket_counter",
"+=",
"1",
"elif",
"string",
"[",
"start_pos",
"]",
"==",
"\"}\"",
":",
"bracket_counter",
"-=",
"1",
"if",
"bracket_counter",
"==",
"0",
":",
"return",
"start_pos",
"start_pos",
"+=",
"1",
"return",
"-",
"1"
] | https://github.com/Tracktion/tracktion_engine/blob/a52de2e05f1b831582c7c615371f82fb0f9b64e6/doxygen/process_source_files.py#L9-L24 |
|
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/pylib/gyp/msvs_emulation.py | python | MsvsSettings.AdjustMidlIncludeDirs | (self, midl_include_dirs, config) | return [self.ConvertVSMacros(p, config=config) for p in includes] | Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar. | Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar. | [
"Updates",
"midl_include_dirs",
"to",
"expand",
"VS",
"specific",
"paths",
"and",
"adds",
"the",
"system",
"include",
"dirs",
"used",
"for",
"platform",
"SDK",
"and",
"similar",
"."
] | def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes] | [
"def",
"AdjustMidlIncludeDirs",
"(",
"self",
",",
"midl_include_dirs",
",",
"config",
")",
":",
"config",
"=",
"self",
".",
"_TargetConfig",
"(",
"config",
")",
"includes",
"=",
"midl_include_dirs",
"+",
"self",
".",
"msvs_system_include_dirs",
"[",
"config",
"]",
"includes",
".",
"extend",
"(",
"self",
".",
"_Setting",
"(",
"(",
"'VCMIDLTool'",
",",
"'AdditionalIncludeDirectories'",
")",
",",
"config",
",",
"default",
"=",
"[",
"]",
")",
")",
"return",
"[",
"self",
".",
"ConvertVSMacros",
"(",
"p",
",",
"config",
"=",
"config",
")",
"for",
"p",
"in",
"includes",
"]"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/tools/gyp/pylib/gyp/msvs_emulation.py#L341-L348 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/pydoc.py | python | Helper.getline | (self, prompt) | Read one line, using raw_input when available. | Read one line, using raw_input when available. | [
"Read",
"one",
"line",
"using",
"raw_input",
"when",
"available",
"."
] | def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline() | [
"def",
"getline",
"(",
"self",
",",
"prompt",
")",
":",
"if",
"self",
".",
"input",
"is",
"sys",
".",
"stdin",
":",
"return",
"raw_input",
"(",
"prompt",
")",
"else",
":",
"self",
".",
"output",
".",
"write",
"(",
"prompt",
")",
"self",
".",
"output",
".",
"flush",
"(",
")",
"return",
"self",
".",
"input",
".",
"readline",
"(",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/pydoc.py#L1771-L1778 |
||
etotheipi/BitcoinArmory | 2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98 | armoryd.py | python | Armory_Json_Rpc_Server.jsonrpc_listloadedwallets | (self) | return walletList | DESCRIPTION:
List all wallets loaded onto the armoryd server.
PARAMETERS:
None
RETURN:
A dictionary with the Base58 values of all wallets loaded in armoryd. | DESCRIPTION:
List all wallets loaded onto the armoryd server.
PARAMETERS:
None
RETURN:
A dictionary with the Base58 values of all wallets loaded in armoryd. | [
"DESCRIPTION",
":",
"List",
"all",
"wallets",
"loaded",
"onto",
"the",
"armoryd",
"server",
".",
"PARAMETERS",
":",
"None",
"RETURN",
":",
"A",
"dictionary",
"with",
"the",
"Base58",
"values",
"of",
"all",
"wallets",
"loaded",
"in",
"armoryd",
"."
] | def jsonrpc_listloadedwallets(self):
"""
DESCRIPTION:
List all wallets loaded onto the armoryd server.
PARAMETERS:
None
RETURN:
A dictionary with the Base58 values of all wallets loaded in armoryd.
"""
# Return a dictionary with a string as the key and a wallet B58 value as
# the value.
curKey = 1
walletList = {}
for k in self.serverWltMap.keys():
curWltStr = 'Wallet %04d' % curKey
walletList[curWltStr] = k
curKey += 1
return walletList | [
"def",
"jsonrpc_listloadedwallets",
"(",
"self",
")",
":",
"# Return a dictionary with a string as the key and a wallet B58 value as",
"# the value.",
"curKey",
"=",
"1",
"walletList",
"=",
"{",
"}",
"for",
"k",
"in",
"self",
".",
"serverWltMap",
".",
"keys",
"(",
")",
":",
"curWltStr",
"=",
"'Wallet %04d'",
"%",
"curKey",
"walletList",
"[",
"curWltStr",
"]",
"=",
"k",
"curKey",
"+=",
"1",
"return",
"walletList"
] | https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/armoryd.py#L2616-L2634 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py3/sklearn/covariance/_graph_lasso.py | python | graphical_lasso | (emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False) | l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphicalLasso, GraphicalLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized. | l1-penalized covariance estimator | [
"l1",
"-",
"penalized",
"covariance",
"estimator"
] | def graphical_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphicalLasso, GraphicalLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
# set a sub_covariance buffer
sub_covariance = np.copy(covariance_[1:, 1:], order='C')
for i in range(max_iter):
for idx in range(n_features):
# To keep the contiguous matrix `sub_covariance` equal to
# covariance_[indices != idx].T[indices != idx]
# we only need to update 1 column and 1 line when idx changes
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance,
row, row, max_iter, enet_tol,
check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path_gram(
Xy=row, Gram=sub_covariance, n_samples=row.size,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
eps=eps, method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
if not np.isfinite(precision_.sum()):
raise FloatingPointError('The system is too ill-conditioned '
'for this solver')
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print('[graphical_lasso] Iteration '
'% 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graphical_lasso: did not converge after '
'%i iteration: dual gap: %.3e'
% (max_iter, d_gap), ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_ | [
"def",
"graphical_lasso",
"(",
"emp_cov",
",",
"alpha",
",",
"cov_init",
"=",
"None",
",",
"mode",
"=",
"'cd'",
",",
"tol",
"=",
"1e-4",
",",
"enet_tol",
"=",
"1e-4",
",",
"max_iter",
"=",
"100",
",",
"verbose",
"=",
"False",
",",
"return_costs",
"=",
"False",
",",
"eps",
"=",
"np",
".",
"finfo",
"(",
"np",
".",
"float64",
")",
".",
"eps",
",",
"return_n_iter",
"=",
"False",
")",
":",
"_",
",",
"n_features",
"=",
"emp_cov",
".",
"shape",
"if",
"alpha",
"==",
"0",
":",
"if",
"return_costs",
":",
"precision_",
"=",
"linalg",
".",
"inv",
"(",
"emp_cov",
")",
"cost",
"=",
"-",
"2.",
"*",
"log_likelihood",
"(",
"emp_cov",
",",
"precision_",
")",
"cost",
"+=",
"n_features",
"*",
"np",
".",
"log",
"(",
"2",
"*",
"np",
".",
"pi",
")",
"d_gap",
"=",
"np",
".",
"sum",
"(",
"emp_cov",
"*",
"precision_",
")",
"-",
"n_features",
"if",
"return_n_iter",
":",
"return",
"emp_cov",
",",
"precision_",
",",
"(",
"cost",
",",
"d_gap",
")",
",",
"0",
"else",
":",
"return",
"emp_cov",
",",
"precision_",
",",
"(",
"cost",
",",
"d_gap",
")",
"else",
":",
"if",
"return_n_iter",
":",
"return",
"emp_cov",
",",
"linalg",
".",
"inv",
"(",
"emp_cov",
")",
",",
"0",
"else",
":",
"return",
"emp_cov",
",",
"linalg",
".",
"inv",
"(",
"emp_cov",
")",
"if",
"cov_init",
"is",
"None",
":",
"covariance_",
"=",
"emp_cov",
".",
"copy",
"(",
")",
"else",
":",
"covariance_",
"=",
"cov_init",
".",
"copy",
"(",
")",
"# As a trivial regularization (Tikhonov like), we scale down the",
"# off-diagonal coefficients of our starting point: This is needed, as",
"# in the cross-validation the cov_init can easily be",
"# ill-conditioned, and the CV loop blows. Beside, this takes",
"# conservative stand-point on the initial conditions, and it tends to",
"# make the convergence go faster.",
"covariance_",
"*=",
"0.95",
"diagonal",
"=",
"emp_cov",
".",
"flat",
"[",
":",
":",
"n_features",
"+",
"1",
"]",
"covariance_",
".",
"flat",
"[",
":",
":",
"n_features",
"+",
"1",
"]",
"=",
"diagonal",
"precision_",
"=",
"linalg",
".",
"pinvh",
"(",
"covariance_",
")",
"indices",
"=",
"np",
".",
"arange",
"(",
"n_features",
")",
"costs",
"=",
"list",
"(",
")",
"# The different l1 regression solver have different numerical errors",
"if",
"mode",
"==",
"'cd'",
":",
"errors",
"=",
"dict",
"(",
"over",
"=",
"'raise'",
",",
"invalid",
"=",
"'ignore'",
")",
"else",
":",
"errors",
"=",
"dict",
"(",
"invalid",
"=",
"'raise'",
")",
"try",
":",
"# be robust to the max_iter=0 edge case, see:",
"# https://github.com/scikit-learn/scikit-learn/issues/4134",
"d_gap",
"=",
"np",
".",
"inf",
"# set a sub_covariance buffer",
"sub_covariance",
"=",
"np",
".",
"copy",
"(",
"covariance_",
"[",
"1",
":",
",",
"1",
":",
"]",
",",
"order",
"=",
"'C'",
")",
"for",
"i",
"in",
"range",
"(",
"max_iter",
")",
":",
"for",
"idx",
"in",
"range",
"(",
"n_features",
")",
":",
"# To keep the contiguous matrix `sub_covariance` equal to",
"# covariance_[indices != idx].T[indices != idx]",
"# we only need to update 1 column and 1 line when idx changes",
"if",
"idx",
">",
"0",
":",
"di",
"=",
"idx",
"-",
"1",
"sub_covariance",
"[",
"di",
"]",
"=",
"covariance_",
"[",
"di",
"]",
"[",
"indices",
"!=",
"idx",
"]",
"sub_covariance",
"[",
":",
",",
"di",
"]",
"=",
"covariance_",
"[",
":",
",",
"di",
"]",
"[",
"indices",
"!=",
"idx",
"]",
"else",
":",
"sub_covariance",
"[",
":",
"]",
"=",
"covariance_",
"[",
"1",
":",
",",
"1",
":",
"]",
"row",
"=",
"emp_cov",
"[",
"idx",
",",
"indices",
"!=",
"idx",
"]",
"with",
"np",
".",
"errstate",
"(",
"*",
"*",
"errors",
")",
":",
"if",
"mode",
"==",
"'cd'",
":",
"# Use coordinate descent",
"coefs",
"=",
"-",
"(",
"precision_",
"[",
"indices",
"!=",
"idx",
",",
"idx",
"]",
"/",
"(",
"precision_",
"[",
"idx",
",",
"idx",
"]",
"+",
"1000",
"*",
"eps",
")",
")",
"coefs",
",",
"_",
",",
"_",
",",
"_",
"=",
"cd_fast",
".",
"enet_coordinate_descent_gram",
"(",
"coefs",
",",
"alpha",
",",
"0",
",",
"sub_covariance",
",",
"row",
",",
"row",
",",
"max_iter",
",",
"enet_tol",
",",
"check_random_state",
"(",
"None",
")",
",",
"False",
")",
"else",
":",
"# Use LARS",
"_",
",",
"_",
",",
"coefs",
"=",
"lars_path_gram",
"(",
"Xy",
"=",
"row",
",",
"Gram",
"=",
"sub_covariance",
",",
"n_samples",
"=",
"row",
".",
"size",
",",
"alpha_min",
"=",
"alpha",
"/",
"(",
"n_features",
"-",
"1",
")",
",",
"copy_Gram",
"=",
"True",
",",
"eps",
"=",
"eps",
",",
"method",
"=",
"'lars'",
",",
"return_path",
"=",
"False",
")",
"# Update the precision matrix",
"precision_",
"[",
"idx",
",",
"idx",
"]",
"=",
"(",
"1.",
"/",
"(",
"covariance_",
"[",
"idx",
",",
"idx",
"]",
"-",
"np",
".",
"dot",
"(",
"covariance_",
"[",
"indices",
"!=",
"idx",
",",
"idx",
"]",
",",
"coefs",
")",
")",
")",
"precision_",
"[",
"indices",
"!=",
"idx",
",",
"idx",
"]",
"=",
"(",
"-",
"precision_",
"[",
"idx",
",",
"idx",
"]",
"*",
"coefs",
")",
"precision_",
"[",
"idx",
",",
"indices",
"!=",
"idx",
"]",
"=",
"(",
"-",
"precision_",
"[",
"idx",
",",
"idx",
"]",
"*",
"coefs",
")",
"coefs",
"=",
"np",
".",
"dot",
"(",
"sub_covariance",
",",
"coefs",
")",
"covariance_",
"[",
"idx",
",",
"indices",
"!=",
"idx",
"]",
"=",
"coefs",
"covariance_",
"[",
"indices",
"!=",
"idx",
",",
"idx",
"]",
"=",
"coefs",
"if",
"not",
"np",
".",
"isfinite",
"(",
"precision_",
".",
"sum",
"(",
")",
")",
":",
"raise",
"FloatingPointError",
"(",
"'The system is too ill-conditioned '",
"'for this solver'",
")",
"d_gap",
"=",
"_dual_gap",
"(",
"emp_cov",
",",
"precision_",
",",
"alpha",
")",
"cost",
"=",
"_objective",
"(",
"emp_cov",
",",
"precision_",
",",
"alpha",
")",
"if",
"verbose",
":",
"print",
"(",
"'[graphical_lasso] Iteration '",
"'% 3i, cost % 3.2e, dual gap %.3e'",
"%",
"(",
"i",
",",
"cost",
",",
"d_gap",
")",
")",
"if",
"return_costs",
":",
"costs",
".",
"append",
"(",
"(",
"cost",
",",
"d_gap",
")",
")",
"if",
"np",
".",
"abs",
"(",
"d_gap",
")",
"<",
"tol",
":",
"break",
"if",
"not",
"np",
".",
"isfinite",
"(",
"cost",
")",
"and",
"i",
">",
"0",
":",
"raise",
"FloatingPointError",
"(",
"'Non SPD result: the system is '",
"'too ill-conditioned for this solver'",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"'graphical_lasso: did not converge after '",
"'%i iteration: dual gap: %.3e'",
"%",
"(",
"max_iter",
",",
"d_gap",
")",
",",
"ConvergenceWarning",
")",
"except",
"FloatingPointError",
"as",
"e",
":",
"e",
".",
"args",
"=",
"(",
"e",
".",
"args",
"[",
"0",
"]",
"+",
"'. The system is too ill-conditioned for this solver'",
",",
")",
"raise",
"e",
"if",
"return_costs",
":",
"if",
"return_n_iter",
":",
"return",
"covariance_",
",",
"precision_",
",",
"costs",
",",
"i",
"+",
"1",
"else",
":",
"return",
"covariance_",
",",
"precision_",
",",
"costs",
"else",
":",
"if",
"return_n_iter",
":",
"return",
"covariance_",
",",
"precision_",
",",
"i",
"+",
"1",
"else",
":",
"return",
"covariance_",
",",
"precision_"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/covariance/_graph_lasso.py#L79-L278 |
||
dmlc/xgboost | 2775c2a1abd4b5b759ff517617434c8b9aeb4cc0 | python-package/xgboost/core.py | python | DMatrix.get_label | (self) | return self.get_float_info('label') | Get the label of the DMatrix.
Returns
-------
label : array | Get the label of the DMatrix. | [
"Get",
"the",
"label",
"of",
"the",
"DMatrix",
"."
] | def get_label(self) -> np.ndarray:
"""Get the label of the DMatrix.
Returns
-------
label : array
"""
return self.get_float_info('label') | [
"def",
"get_label",
"(",
"self",
")",
"->",
"np",
".",
"ndarray",
":",
"return",
"self",
".",
"get_float_info",
"(",
"'label'",
")"
] | https://github.com/dmlc/xgboost/blob/2775c2a1abd4b5b759ff517617434c8b9aeb4cc0/python-package/xgboost/core.py#L867-L874 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py | python | Resolver._populate_link | (self, req) | Ensure that if a link can be found for this, that it is found.
Note that req.link may still be None - if the requirement is already
installed and not needed to be upgraded based on the return value of
_is_upgrade_allowed().
If preparer.require_hashes is True, don't use the wheel cache, because
cached wheels, always built locally, have different hashes than the
files downloaded from the index server and thus throw false hash
mismatches. Furthermore, cached wheels at present have undeterministic
contents due to file modification times. | Ensure that if a link can be found for this, that it is found. | [
"Ensure",
"that",
"if",
"a",
"link",
"can",
"be",
"found",
"for",
"this",
"that",
"it",
"is",
"found",
"."
] | def _populate_link(self, req):
# type: (InstallRequirement) -> None
"""Ensure that if a link can be found for this, that it is found.
Note that req.link may still be None - if the requirement is already
installed and not needed to be upgraded based on the return value of
_is_upgrade_allowed().
If preparer.require_hashes is True, don't use the wheel cache, because
cached wheels, always built locally, have different hashes than the
files downloaded from the index server and thus throw false hash
mismatches. Furthermore, cached wheels at present have undeterministic
contents due to file modification times.
"""
if req.link is None:
req.link = self._find_requirement_link(req)
if self.wheel_cache is None or self.preparer.require_hashes:
return
cache_entry = self.wheel_cache.get_cache_entry(
link=req.link,
package_name=req.name,
supported_tags=get_supported(),
)
if cache_entry is not None:
logger.debug('Using cached wheel link: %s', cache_entry.link)
if req.link is req.original_link and cache_entry.persistent:
req.original_link_is_in_wheel_cache = True
req.link = cache_entry.link | [
"def",
"_populate_link",
"(",
"self",
",",
"req",
")",
":",
"# type: (InstallRequirement) -> None",
"if",
"req",
".",
"link",
"is",
"None",
":",
"req",
".",
"link",
"=",
"self",
".",
"_find_requirement_link",
"(",
"req",
")",
"if",
"self",
".",
"wheel_cache",
"is",
"None",
"or",
"self",
".",
"preparer",
".",
"require_hashes",
":",
"return",
"cache_entry",
"=",
"self",
".",
"wheel_cache",
".",
"get_cache_entry",
"(",
"link",
"=",
"req",
".",
"link",
",",
"package_name",
"=",
"req",
".",
"name",
",",
"supported_tags",
"=",
"get_supported",
"(",
")",
",",
")",
"if",
"cache_entry",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"'Using cached wheel link: %s'",
",",
"cache_entry",
".",
"link",
")",
"if",
"req",
".",
"link",
"is",
"req",
".",
"original_link",
"and",
"cache_entry",
".",
"persistent",
":",
"req",
".",
"original_link_is_in_wheel_cache",
"=",
"True",
"req",
".",
"link",
"=",
"cache_entry",
".",
"link"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py#L287-L315 |
||
esa/pagmo | 80281d549c8f1b470e1489a5d37c8f06b2e429c0 | PyGMO/problem/_gtop.py | python | _gtoc_1_ctor | (self) | Constructs a GTOC 1 Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
Best known global minima is at -1,581,950
USAGE: problem.gtoc_1() | Constructs a GTOC 1 Problem (Box-Constrained Continuous Single-Objective) | [
"Constructs",
"a",
"GTOC",
"1",
"Problem",
"(",
"Box",
"-",
"Constrained",
"Continuous",
"Single",
"-",
"Objective",
")"
] | def _gtoc_1_ctor(self):
"""
Constructs a GTOC 1 Problem (Box-Constrained Continuous Single-Objective)
NOTE: This problem (MGA) belongs to the GTOP database [http://www.esa.int/gsp/ACT/inf/op/globopt.htm]
Best known global minima is at -1,581,950
USAGE: problem.gtoc_1()
"""
# We construct the arg list for the original constructor exposed by
# boost_python
arg_list = []
self._orig_init(*arg_list) | [
"def",
"_gtoc_1_ctor",
"(",
"self",
")",
":",
"# We construct the arg list for the original constructor exposed by",
"# boost_python",
"arg_list",
"=",
"[",
"]",
"self",
".",
"_orig_init",
"(",
"*",
"arg_list",
")"
] | https://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/PyGMO/problem/_gtop.py#L30-L45 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/definition.py | python | _get_or_define_module | (full_name, modules) | return module | Helper method for defining new modules.
Args:
full_name: Fully qualified name of module to create or return.
modules: Dictionary of all modules. Defaults to sys.modules.
Returns:
Named module if found in 'modules', else creates new module and inserts in
'modules'. Will also construct parent modules if necessary. | Helper method for defining new modules. | [
"Helper",
"method",
"for",
"defining",
"new",
"modules",
"."
] | def _get_or_define_module(full_name, modules):
"""Helper method for defining new modules.
Args:
full_name: Fully qualified name of module to create or return.
modules: Dictionary of all modules. Defaults to sys.modules.
Returns:
Named module if found in 'modules', else creates new module and inserts in
'modules'. Will also construct parent modules if necessary.
"""
module = modules.get(full_name)
if not module:
module = types.ModuleType(full_name)
modules[full_name] = module
split_name = full_name.rsplit('.', 1)
if len(split_name) > 1:
parent_module_name, sub_module_name = split_name
parent_module = _get_or_define_module(parent_module_name, modules)
setattr(parent_module, sub_module_name, module)
return module | [
"def",
"_get_or_define_module",
"(",
"full_name",
",",
"modules",
")",
":",
"module",
"=",
"modules",
".",
"get",
"(",
"full_name",
")",
"if",
"not",
"module",
":",
"module",
"=",
"types",
".",
"ModuleType",
"(",
"full_name",
")",
"modules",
"[",
"full_name",
"]",
"=",
"module",
"split_name",
"=",
"full_name",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"if",
"len",
"(",
"split_name",
")",
">",
"1",
":",
"parent_module_name",
",",
"sub_module_name",
"=",
"split_name",
"parent_module",
"=",
"_get_or_define_module",
"(",
"parent_module_name",
",",
"modules",
")",
"setattr",
"(",
"parent_module",
",",
"sub_module_name",
",",
"module",
")",
"return",
"module"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/definition.py#L66-L88 |
|
sslab-gatech/qsym | 78702ba8928519ffb9beb7859ec2f7ddce2b2fe4 | third_party/pin-2.14-71313-gcc.4.4.7-linux/source/tools/Utils/installer.py | python | walk | (top, options) | return 0 | Set up the env dictionary, then iterate the directory structure,
finding directories of tools that need to be installed for testing. | Set up the env dictionary, then iterate the directory structure,
finding directories of tools that need to be installed for testing. | [
"Set",
"up",
"the",
"env",
"dictionary",
"then",
"iterate",
"the",
"directory",
"structure",
"finding",
"directories",
"of",
"tools",
"that",
"need",
"to",
"be",
"installed",
"for",
"testing",
"."
] | def walk(top, options):
"""
Set up the env dictionary, then iterate the directory structure,
finding directories of tools that need to be installed for testing.
"""
try:
remote_kit_base = os.environ['REMOTE_ROOT']
cmd_prefix = os.environ.get('CMD_PREFIX', '')
device_type = os.environ['DEVICE_TYPE']
device_id = os.environ['REMOTE_DEVICE']
target_arch = os.environ['TARGET']
except KeyError:
warn("Some required environment variables aren't set. Make sure CMD_PREFIX, REMOTE_DEVICE and REMOTE_ROOT are defined'")
return 1
if device_type == 'android':
prop = android_properties
elif device_type == 'mic':
prop = mic_properties
else:
print "Error: unexpected device type, installer cannot continue. Device type is '%s'." % device_type
return 1
env = { 'device_type': device_type,
'device_id' : device_id,
'cmd_prefix' : cmd_prefix,
'target_arch': target_arch,
}
for root, dirs, files in os.walk(top, options):
if 'obj-' + target_arch in dirs:
info('in dir ' + root)
env['root'] = root
env['filename'] = os.path.basename(root) + ".tar.bz2"
env['remote_path'] = os.path.join(remote_kit_base, "source", "tools", os.path.basename(root))
if options.clean:
execute(prop['rmdir_cmd'], env)
continue
try:
archive(env)
execute(prop['mkdir_cmd'], env)
execute(prop['push_cmd'], env)
execute(prop['extract_cmd'], env)
if device_type == 'android' and glob.glob('*.apk') != []:
apks = glob.glob('*.apk')
for apk in apks:
env['apk'] = apk
execute(prop['install_apk_cmd'],env)
if 'apk' in env:
del env['apk']
except subprocess.CalledProcessError as cpe:
warn('Could not install in directory ' + root + '. Error code = ' + str(cpe.returncode) + '. cmd = ' + str(cpe.cmd) + '.' )
return 1
return 0 | [
"def",
"walk",
"(",
"top",
",",
"options",
")",
":",
"try",
":",
"remote_kit_base",
"=",
"os",
".",
"environ",
"[",
"'REMOTE_ROOT'",
"]",
"cmd_prefix",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'CMD_PREFIX'",
",",
"''",
")",
"device_type",
"=",
"os",
".",
"environ",
"[",
"'DEVICE_TYPE'",
"]",
"device_id",
"=",
"os",
".",
"environ",
"[",
"'REMOTE_DEVICE'",
"]",
"target_arch",
"=",
"os",
".",
"environ",
"[",
"'TARGET'",
"]",
"except",
"KeyError",
":",
"warn",
"(",
"\"Some required environment variables aren't set. Make sure CMD_PREFIX, REMOTE_DEVICE and REMOTE_ROOT are defined'\"",
")",
"return",
"1",
"if",
"device_type",
"==",
"'android'",
":",
"prop",
"=",
"android_properties",
"elif",
"device_type",
"==",
"'mic'",
":",
"prop",
"=",
"mic_properties",
"else",
":",
"print",
"\"Error: unexpected device type, installer cannot continue. Device type is '%s'.\"",
"%",
"device_type",
"return",
"1",
"env",
"=",
"{",
"'device_type'",
":",
"device_type",
",",
"'device_id'",
":",
"device_id",
",",
"'cmd_prefix'",
":",
"cmd_prefix",
",",
"'target_arch'",
":",
"target_arch",
",",
"}",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"top",
",",
"options",
")",
":",
"if",
"'obj-'",
"+",
"target_arch",
"in",
"dirs",
":",
"info",
"(",
"'in dir '",
"+",
"root",
")",
"env",
"[",
"'root'",
"]",
"=",
"root",
"env",
"[",
"'filename'",
"]",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"+",
"\".tar.bz2\"",
"env",
"[",
"'remote_path'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"remote_kit_base",
",",
"\"source\"",
",",
"\"tools\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
")",
"if",
"options",
".",
"clean",
":",
"execute",
"(",
"prop",
"[",
"'rmdir_cmd'",
"]",
",",
"env",
")",
"continue",
"try",
":",
"archive",
"(",
"env",
")",
"execute",
"(",
"prop",
"[",
"'mkdir_cmd'",
"]",
",",
"env",
")",
"execute",
"(",
"prop",
"[",
"'push_cmd'",
"]",
",",
"env",
")",
"execute",
"(",
"prop",
"[",
"'extract_cmd'",
"]",
",",
"env",
")",
"if",
"device_type",
"==",
"'android'",
"and",
"glob",
".",
"glob",
"(",
"'*.apk'",
")",
"!=",
"[",
"]",
":",
"apks",
"=",
"glob",
".",
"glob",
"(",
"'*.apk'",
")",
"for",
"apk",
"in",
"apks",
":",
"env",
"[",
"'apk'",
"]",
"=",
"apk",
"execute",
"(",
"prop",
"[",
"'install_apk_cmd'",
"]",
",",
"env",
")",
"if",
"'apk'",
"in",
"env",
":",
"del",
"env",
"[",
"'apk'",
"]",
"except",
"subprocess",
".",
"CalledProcessError",
"as",
"cpe",
":",
"warn",
"(",
"'Could not install in directory '",
"+",
"root",
"+",
"'. Error code = '",
"+",
"str",
"(",
"cpe",
".",
"returncode",
")",
"+",
"'. cmd = '",
"+",
"str",
"(",
"cpe",
".",
"cmd",
")",
"+",
"'.'",
")",
"return",
"1",
"return",
"0"
] | https://github.com/sslab-gatech/qsym/blob/78702ba8928519ffb9beb7859ec2f7ddce2b2fe4/third_party/pin-2.14-71313-gcc.4.4.7-linux/source/tools/Utils/installer.py#L68-L120 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_misc.py | python | DateTime.__ge__ | (*args, **kwargs) | return _misc_.DateTime___ge__(*args, **kwargs) | __ge__(self, DateTime other) -> bool | __ge__(self, DateTime other) -> bool | [
"__ge__",
"(",
"self",
"DateTime",
"other",
")",
"-",
">",
"bool"
] | def __ge__(*args, **kwargs):
"""__ge__(self, DateTime other) -> bool"""
return _misc_.DateTime___ge__(*args, **kwargs) | [
"def",
"__ge__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"DateTime___ge__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L4118-L4120 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libxml2-2.9.1/python/libxml2class.py | python | xpathContext.contextNode | (self) | return __tmp | Get the current node from an xpathContext | Get the current node from an xpathContext | [
"Get",
"the",
"current",
"node",
"from",
"an",
"xpathContext"
] | def contextNode(self):
"""Get the current node from an xpathContext """
ret = libxml2mod.xmlXPathGetContextNode(self._o)
if ret is None:raise xpathError('xmlXPathGetContextNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"contextNode",
"(",
"self",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlXPathGetContextNode",
"(",
"self",
".",
"_o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"xpathError",
"(",
"'xmlXPathGetContextNode() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L6478-L6483 |
|
google/perfetto | fe68c7a7f7657aa71ced68efb126dcac4107c745 | python/perfetto/trace_uri_resolver/resolver.py | python | TraceUriResolver.resolve | (self) | Resolves a list of traces.
Subclasses should implement this method and resolve the parameters
specified in the constructor to a list of traces. | Resolves a list of traces. | [
"Resolves",
"a",
"list",
"of",
"traces",
"."
] | def resolve(self) -> List['TraceUriResolver.Result']:
"""Resolves a list of traces.
Subclasses should implement this method and resolve the parameters
specified in the constructor to a list of traces."""
raise Exception("resolve is unimplemented for this resolver") | [
"def",
"resolve",
"(",
"self",
")",
"->",
"List",
"[",
"'TraceUriResolver.Result'",
"]",
":",
"raise",
"Exception",
"(",
"\"resolve is unimplemented for this resolver\"",
")"
] | https://github.com/google/perfetto/blob/fe68c7a7f7657aa71ced68efb126dcac4107c745/python/perfetto/trace_uri_resolver/resolver.py#L84-L89 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/src/robotsim.py | python | RobotModelDriver.getValue | (self) | return _robotsim.RobotModelDriver_getValue(self) | r"""
getValue(RobotModelDriver self) -> double
Returns the current driver value from the robot's config. | r"""
getValue(RobotModelDriver self) -> double | [
"r",
"getValue",
"(",
"RobotModelDriver",
"self",
")",
"-",
">",
"double"
] | def getValue(self) -> "double":
r"""
getValue(RobotModelDriver self) -> double
Returns the current driver value from the robot's config.
"""
return _robotsim.RobotModelDriver_getValue(self) | [
"def",
"getValue",
"(",
"self",
")",
"->",
"\"double\"",
":",
"return",
"_robotsim",
".",
"RobotModelDriver_getValue",
"(",
"self",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/src/robotsim.py#L4677-L4685 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | scripts/SANS/isis_instrument.py | python | ISISInstrument.reset_TOFs_for_ROI | (self) | Reset background region set by set_TOFs for ROI | Reset background region set by set_TOFs for ROI | [
"Reset",
"background",
"region",
"set",
"by",
"set_TOFs",
"for",
"ROI"
] | def reset_TOFs_for_ROI(self):
"""
Reset background region set by set_TOFs for ROI
"""
self._back_start_ROI = None
self._back_end_ROI = None | [
"def",
"reset_TOFs_for_ROI",
"(",
"self",
")",
":",
"self",
".",
"_back_start_ROI",
"=",
"None",
"self",
".",
"_back_end_ROI",
"=",
"None"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/isis_instrument.py#L713-L718 |
||
MegEngine/MegEngine | ce9ad07a27ec909fb8db4dd67943d24ba98fb93a | imperative/python/megengine/functional/tensor.py | python | expand_dims | (inp: Tensor, axis: Union[int, Sequence[int]]) | return result | r"""Adds dimension before given axis.
Args:
inp: input tensor.
axis: place of new axes.
Returns:
output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor([1, 2])
out = F.expand_dims(x, 0)
print(out.numpy().shape)
Outputs:
.. testoutput::
(1, 2) | r"""Adds dimension before given axis. | [
"r",
"Adds",
"dimension",
"before",
"given",
"axis",
"."
] | def expand_dims(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
r"""Adds dimension before given axis.
Args:
inp: input tensor.
axis: place of new axes.
Returns:
output tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor([1, 2])
out = F.expand_dims(x, 0)
print(out.numpy().shape)
Outputs:
.. testoutput::
(1, 2)
"""
def get_axes():
try:
return [int(axis)]
except (TypeError, ValueError):
pass
return list(map(int, axis))
axis = get_axes()
try:
ndim = inp.ndim + len(axis)
axis = sorted(i + ndim if i < 0 else i for i in axis)
except ValueError:
if any([ind < 0 for ind in axis]):
raise IndexError(
"Does not support negative index when tensor's ndim is unknown"
)
axis = sorted(axis)
assert axis, "axis could not be empty"
op = builtin.AddAxis(axis=axis)
(result,) = apply(op, inp)
return result | [
"def",
"expand_dims",
"(",
"inp",
":",
"Tensor",
",",
"axis",
":",
"Union",
"[",
"int",
",",
"Sequence",
"[",
"int",
"]",
"]",
")",
"->",
"Tensor",
":",
"def",
"get_axes",
"(",
")",
":",
"try",
":",
"return",
"[",
"int",
"(",
"axis",
")",
"]",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"pass",
"return",
"list",
"(",
"map",
"(",
"int",
",",
"axis",
")",
")",
"axis",
"=",
"get_axes",
"(",
")",
"try",
":",
"ndim",
"=",
"inp",
".",
"ndim",
"+",
"len",
"(",
"axis",
")",
"axis",
"=",
"sorted",
"(",
"i",
"+",
"ndim",
"if",
"i",
"<",
"0",
"else",
"i",
"for",
"i",
"in",
"axis",
")",
"except",
"ValueError",
":",
"if",
"any",
"(",
"[",
"ind",
"<",
"0",
"for",
"ind",
"in",
"axis",
"]",
")",
":",
"raise",
"IndexError",
"(",
"\"Does not support negative index when tensor's ndim is unknown\"",
")",
"axis",
"=",
"sorted",
"(",
"axis",
")",
"assert",
"axis",
",",
"\"axis could not be empty\"",
"op",
"=",
"builtin",
".",
"AddAxis",
"(",
"axis",
"=",
"axis",
")",
"(",
"result",
",",
")",
"=",
"apply",
"(",
"op",
",",
"inp",
")",
"return",
"result"
] | https://github.com/MegEngine/MegEngine/blob/ce9ad07a27ec909fb8db4dd67943d24ba98fb93a/imperative/python/megengine/functional/tensor.py#L954-L1003 |
|
mingchen/protobuf-ios | 0958df34558cd54cb7b6e6ca5c8855bf3d475046 | compiler/python/google/protobuf/internal/decoder.py | python | Decoder.ReadFixed64 | (self) | return self._stream.ReadLittleEndian64() | Reads and returns an unsigned, fixed-width, 64-bit integer. | Reads and returns an unsigned, fixed-width, 64-bit integer. | [
"Reads",
"and",
"returns",
"an",
"unsigned",
"fixed",
"-",
"width",
"64",
"-",
"bit",
"integer",
"."
] | def ReadFixed64(self):
"""Reads and returns an unsigned, fixed-width, 64-bit integer."""
return self._stream.ReadLittleEndian64() | [
"def",
"ReadFixed64",
"(",
"self",
")",
":",
"return",
"self",
".",
"_stream",
".",
"ReadLittleEndian64",
"(",
")"
] | https://github.com/mingchen/protobuf-ios/blob/0958df34558cd54cb7b6e6ca5c8855bf3d475046/compiler/python/google/protobuf/internal/decoder.py#L117-L119 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/boto3/resources/collection.py | python | CollectionFactory.load_from_definition | (self, resource_name, collection_model,
service_context, event_emitter) | return type(str(cls_name), (CollectionManager,), attrs) | Loads a collection from a model, creating a new
:py:class:`CollectionManager` subclass
with the correct properties and methods, named based on the service
and resource name, e.g. ec2.InstanceCollectionManager. It also
creates a new :py:class:`ResourceCollection` subclass which is used
by the new manager class.
:type resource_name: string
:param resource_name: Name of the resource to look up. For services,
this should match the ``service_name``.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
:type event_emitter: :py:class:`~botocore.hooks.HierarchialEmitter`
:param event_emitter: An event emitter
:rtype: Subclass of :py:class:`CollectionManager`
:return: The collection class. | Loads a collection from a model, creating a new
:py:class:`CollectionManager` subclass
with the correct properties and methods, named based on the service
and resource name, e.g. ec2.InstanceCollectionManager. It also
creates a new :py:class:`ResourceCollection` subclass which is used
by the new manager class. | [
"Loads",
"a",
"collection",
"from",
"a",
"model",
"creating",
"a",
"new",
":",
"py",
":",
"class",
":",
"CollectionManager",
"subclass",
"with",
"the",
"correct",
"properties",
"and",
"methods",
"named",
"based",
"on",
"the",
"service",
"and",
"resource",
"name",
"e",
".",
"g",
".",
"ec2",
".",
"InstanceCollectionManager",
".",
"It",
"also",
"creates",
"a",
"new",
":",
"py",
":",
"class",
":",
"ResourceCollection",
"subclass",
"which",
"is",
"used",
"by",
"the",
"new",
"manager",
"class",
"."
] | def load_from_definition(self, resource_name, collection_model,
service_context, event_emitter):
"""
Loads a collection from a model, creating a new
:py:class:`CollectionManager` subclass
with the correct properties and methods, named based on the service
and resource name, e.g. ec2.InstanceCollectionManager. It also
creates a new :py:class:`ResourceCollection` subclass which is used
by the new manager class.
:type resource_name: string
:param resource_name: Name of the resource to look up. For services,
this should match the ``service_name``.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
:type event_emitter: :py:class:`~botocore.hooks.HierarchialEmitter`
:param event_emitter: An event emitter
:rtype: Subclass of :py:class:`CollectionManager`
:return: The collection class.
"""
attrs = {}
collection_name = collection_model.name
# Create the batch actions for a collection
self._load_batch_actions(
attrs, resource_name, collection_model,
service_context.service_model, event_emitter)
# Add the documentation to the collection class's methods
self._load_documented_collection_methods(
attrs=attrs, resource_name=resource_name,
collection_model=collection_model,
service_model=service_context.service_model,
event_emitter=event_emitter,
base_class=ResourceCollection)
if service_context.service_name == resource_name:
cls_name = '{0}.{1}Collection'.format(
service_context.service_name, collection_name)
else:
cls_name = '{0}.{1}.{2}Collection'.format(
service_context.service_name, resource_name, collection_name)
collection_cls = type(str(cls_name), (ResourceCollection,),
attrs)
# Add the documentation to the collection manager's methods
self._load_documented_collection_methods(
attrs=attrs, resource_name=resource_name,
collection_model=collection_model,
service_model=service_context.service_model,
event_emitter=event_emitter,
base_class=CollectionManager)
attrs['_collection_cls'] = collection_cls
cls_name += 'Manager'
return type(str(cls_name), (CollectionManager,), attrs) | [
"def",
"load_from_definition",
"(",
"self",
",",
"resource_name",
",",
"collection_model",
",",
"service_context",
",",
"event_emitter",
")",
":",
"attrs",
"=",
"{",
"}",
"collection_name",
"=",
"collection_model",
".",
"name",
"# Create the batch actions for a collection",
"self",
".",
"_load_batch_actions",
"(",
"attrs",
",",
"resource_name",
",",
"collection_model",
",",
"service_context",
".",
"service_model",
",",
"event_emitter",
")",
"# Add the documentation to the collection class's methods",
"self",
".",
"_load_documented_collection_methods",
"(",
"attrs",
"=",
"attrs",
",",
"resource_name",
"=",
"resource_name",
",",
"collection_model",
"=",
"collection_model",
",",
"service_model",
"=",
"service_context",
".",
"service_model",
",",
"event_emitter",
"=",
"event_emitter",
",",
"base_class",
"=",
"ResourceCollection",
")",
"if",
"service_context",
".",
"service_name",
"==",
"resource_name",
":",
"cls_name",
"=",
"'{0}.{1}Collection'",
".",
"format",
"(",
"service_context",
".",
"service_name",
",",
"collection_name",
")",
"else",
":",
"cls_name",
"=",
"'{0}.{1}.{2}Collection'",
".",
"format",
"(",
"service_context",
".",
"service_name",
",",
"resource_name",
",",
"collection_name",
")",
"collection_cls",
"=",
"type",
"(",
"str",
"(",
"cls_name",
")",
",",
"(",
"ResourceCollection",
",",
")",
",",
"attrs",
")",
"# Add the documentation to the collection manager's methods",
"self",
".",
"_load_documented_collection_methods",
"(",
"attrs",
"=",
"attrs",
",",
"resource_name",
"=",
"resource_name",
",",
"collection_model",
"=",
"collection_model",
",",
"service_model",
"=",
"service_context",
".",
"service_model",
",",
"event_emitter",
"=",
"event_emitter",
",",
"base_class",
"=",
"CollectionManager",
")",
"attrs",
"[",
"'_collection_cls'",
"]",
"=",
"collection_cls",
"cls_name",
"+=",
"'Manager'",
"return",
"type",
"(",
"str",
"(",
"cls_name",
")",
",",
"(",
"CollectionManager",
",",
")",
",",
"attrs",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/boto3/resources/collection.py#L368-L426 |
|
yan99033/CNN-SVO | d5591ea88103f8d1b26e5296129bf3b3196a14f1 | rpg_vikit/vikit_py/src/vikit_py/align_trajectory.py | python | align_sim3 | (model, data) | return s, R, t | Implementation of the paper: S. Umeyama, Least-Squares Estimation
of Transformation Parameters Between Two Point Patterns,
IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991.
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
s -- scale factor (scalar)
R -- rotation matrix (3x3)
t -- translation vector (3x1)
t_error -- translational error per point (1xn) | Implementation of the paper: S. Umeyama, Least-Squares Estimation
of Transformation Parameters Between Two Point Patterns,
IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991. | [
"Implementation",
"of",
"the",
"paper",
":",
"S",
".",
"Umeyama",
"Least",
"-",
"Squares",
"Estimation",
"of",
"Transformation",
"Parameters",
"Between",
"Two",
"Point",
"Patterns",
"IEEE",
"Trans",
".",
"Pattern",
"Anal",
".",
"Mach",
".",
"Intell",
".",
"vol",
".",
"13",
"no",
".",
"4",
"1991",
"."
] | def align_sim3(model, data):
"""Implementation of the paper: S. Umeyama, Least-Squares Estimation
of Transformation Parameters Between Two Point Patterns,
IEEE Trans. Pattern Anal. Mach. Intell., vol. 13, no. 4, 1991.
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
s -- scale factor (scalar)
R -- rotation matrix (3x3)
t -- translation vector (3x1)
t_error -- translational error per point (1xn)
"""
# substract mean
mu_M = model.mean(0).reshape(model.shape[0],1)
mu_D = data.mean(0).reshape(data.shape[0],1)
model_zerocentered = model - mu_M
data_zerocentered = data - mu_D
n = np.shape(model)[0]
# correlation
C = 1.0/n*np.dot(model_zerocentered.transpose(), data_zerocentered)
sigma2 = 1.0/n*np.multiply(data_zerocentered,data_zerocentered).sum()
U_svd,D_svd,V_svd = np.linalg.linalg.svd(C)
D_svd = np.diag(D_svd)
V_svd = np.transpose(V_svd)
S = np.eye(3)
if(np.linalg.det(U_svd)*np.linalg.det(V_svd) < 0):
S[2,2] = -1
R = np.dot(U_svd, np.dot(S, np.transpose(V_svd)))
s = 1.0/sigma2*np.trace(np.dot(D_svd, S))
t = mu_M-s*np.dot(R,mu_D)
# TODO:
# model_aligned = s * R * model + t
# alignment_error = model_aligned - data
# t_error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),0)).A[0]
return s, R, t | [
"def",
"align_sim3",
"(",
"model",
",",
"data",
")",
":",
"# substract mean",
"mu_M",
"=",
"model",
".",
"mean",
"(",
"0",
")",
".",
"reshape",
"(",
"model",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"mu_D",
"=",
"data",
".",
"mean",
"(",
"0",
")",
".",
"reshape",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
",",
"1",
")",
"model_zerocentered",
"=",
"model",
"-",
"mu_M",
"data_zerocentered",
"=",
"data",
"-",
"mu_D",
"n",
"=",
"np",
".",
"shape",
"(",
"model",
")",
"[",
"0",
"]",
"# correlation",
"C",
"=",
"1.0",
"/",
"n",
"*",
"np",
".",
"dot",
"(",
"model_zerocentered",
".",
"transpose",
"(",
")",
",",
"data_zerocentered",
")",
"sigma2",
"=",
"1.0",
"/",
"n",
"*",
"np",
".",
"multiply",
"(",
"data_zerocentered",
",",
"data_zerocentered",
")",
".",
"sum",
"(",
")",
"U_svd",
",",
"D_svd",
",",
"V_svd",
"=",
"np",
".",
"linalg",
".",
"linalg",
".",
"svd",
"(",
"C",
")",
"D_svd",
"=",
"np",
".",
"diag",
"(",
"D_svd",
")",
"V_svd",
"=",
"np",
".",
"transpose",
"(",
"V_svd",
")",
"S",
"=",
"np",
".",
"eye",
"(",
"3",
")",
"if",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"U_svd",
")",
"*",
"np",
".",
"linalg",
".",
"det",
"(",
"V_svd",
")",
"<",
"0",
")",
":",
"S",
"[",
"2",
",",
"2",
"]",
"=",
"-",
"1",
"R",
"=",
"np",
".",
"dot",
"(",
"U_svd",
",",
"np",
".",
"dot",
"(",
"S",
",",
"np",
".",
"transpose",
"(",
"V_svd",
")",
")",
")",
"s",
"=",
"1.0",
"/",
"sigma2",
"*",
"np",
".",
"trace",
"(",
"np",
".",
"dot",
"(",
"D_svd",
",",
"S",
")",
")",
"t",
"=",
"mu_M",
"-",
"s",
"*",
"np",
".",
"dot",
"(",
"R",
",",
"mu_D",
")",
"# TODO:",
"# model_aligned = s * R * model + t",
"# alignment_error = model_aligned - data",
"# t_error = np.sqrt(np.sum(np.multiply(alignment_error,alignment_error),0)).A[0]",
"return",
"s",
",",
"R",
",",
"t"
] | https://github.com/yan99033/CNN-SVO/blob/d5591ea88103f8d1b26e5296129bf3b3196a14f1/rpg_vikit/vikit_py/src/vikit_py/align_trajectory.py#L6-L50 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_windows.py | python | Printer.Print | (*args, **kwargs) | return _windows_.Printer_Print(*args, **kwargs) | Print(self, Window parent, Printout printout, bool prompt=True) -> bool | Print(self, Window parent, Printout printout, bool prompt=True) -> bool | [
"Print",
"(",
"self",
"Window",
"parent",
"Printout",
"printout",
"bool",
"prompt",
"=",
"True",
")",
"-",
">",
"bool"
] | def Print(*args, **kwargs):
"""Print(self, Window parent, Printout printout, bool prompt=True) -> bool"""
return _windows_.Printer_Print(*args, **kwargs) | [
"def",
"Print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"Printer_Print",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_windows.py#L5235-L5237 |
|
deepmodeling/deepmd-kit | 159e45d248b0429844fb6a8cb3b3a201987c8d79 | deepmd/infer/ewald_recp.py | python | EwaldRecp.eval | (self,
coord : np.ndarray,
charge : np.ndarray,
box : np.ndarray
) | return energy, force, virial | Evaluate
Parameters
----------
coord
The coordinates of atoms
charge
The atomic charge
box
The simulation region. PBC is assumed
Returns
-------
e
The energy
f
The force
v
The virial | Evaluate
Parameters
----------
coord
The coordinates of atoms
charge
The atomic charge
box
The simulation region. PBC is assumed | [
"Evaluate",
"Parameters",
"----------",
"coord",
"The",
"coordinates",
"of",
"atoms",
"charge",
"The",
"atomic",
"charge",
"box",
"The",
"simulation",
"region",
".",
"PBC",
"is",
"assumed"
] | def eval(self,
coord : np.ndarray,
charge : np.ndarray,
box : np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray] :
"""
Evaluate
Parameters
----------
coord
The coordinates of atoms
charge
The atomic charge
box
The simulation region. PBC is assumed
Returns
-------
e
The energy
f
The force
v
The virial
"""
coord = np.array(coord)
charge = np.array(charge)
box = np.array(box)
nframes = charge.shape[0]
natoms = charge.shape[1]
coord = np.reshape(coord, [nframes * 3 * natoms])
charge = np.reshape(charge, [nframes * natoms])
box = np.reshape(box, [nframes * 9])
[energy, force, virial] \
= run_sess(self.sess, [self.t_energy, self.t_force, self.t_virial],
feed_dict = {
self.t_coord: coord,
self.t_charge: charge,
self.t_box: box,
self.t_nloc: [natoms],
})
return energy, force, virial | [
"def",
"eval",
"(",
"self",
",",
"coord",
":",
"np",
".",
"ndarray",
",",
"charge",
":",
"np",
".",
"ndarray",
",",
"box",
":",
"np",
".",
"ndarray",
")",
"->",
"Tuple",
"[",
"np",
".",
"ndarray",
",",
"np",
".",
"ndarray",
",",
"np",
".",
"ndarray",
"]",
":",
"coord",
"=",
"np",
".",
"array",
"(",
"coord",
")",
"charge",
"=",
"np",
".",
"array",
"(",
"charge",
")",
"box",
"=",
"np",
".",
"array",
"(",
"box",
")",
"nframes",
"=",
"charge",
".",
"shape",
"[",
"0",
"]",
"natoms",
"=",
"charge",
".",
"shape",
"[",
"1",
"]",
"coord",
"=",
"np",
".",
"reshape",
"(",
"coord",
",",
"[",
"nframes",
"*",
"3",
"*",
"natoms",
"]",
")",
"charge",
"=",
"np",
".",
"reshape",
"(",
"charge",
",",
"[",
"nframes",
"*",
"natoms",
"]",
")",
"box",
"=",
"np",
".",
"reshape",
"(",
"box",
",",
"[",
"nframes",
"*",
"9",
"]",
")",
"[",
"energy",
",",
"force",
",",
"virial",
"]",
"=",
"run_sess",
"(",
"self",
".",
"sess",
",",
"[",
"self",
".",
"t_energy",
",",
"self",
".",
"t_force",
",",
"self",
".",
"t_virial",
"]",
",",
"feed_dict",
"=",
"{",
"self",
".",
"t_coord",
":",
"coord",
",",
"self",
".",
"t_charge",
":",
"charge",
",",
"self",
".",
"t_box",
":",
"box",
",",
"self",
".",
"t_nloc",
":",
"[",
"natoms",
"]",
",",
"}",
")",
"return",
"energy",
",",
"force",
",",
"virial"
] | https://github.com/deepmodeling/deepmd-kit/blob/159e45d248b0429844fb6a8cb3b3a201987c8d79/deepmd/infer/ewald_recp.py#L47-L91 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/train/serialization.py | python | _fill_param_into_net | (net, parameter_list) | Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback. | Fills parameter_list into net. | [
"Fills",
"parameter_list",
"into",
"net",
"."
] | def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict) | [
"def",
"_fill_param_into_net",
"(",
"net",
",",
"parameter_list",
")",
":",
"parameter_dict",
"=",
"{",
"}",
"for",
"each_param",
"in",
"parameter_list",
":",
"param_name",
"=",
"each_param",
"[",
"\"name\"",
"]",
"if",
"isinstance",
"(",
"each_param",
"[",
"\"data\"",
"]",
",",
"Parameter",
")",
":",
"each_param",
"[",
"\"data\"",
"]",
".",
"init_data",
"(",
")",
"np_val",
"=",
"each_param",
"[",
"\"data\"",
"]",
".",
"asnumpy",
"(",
")",
"if",
"np_val",
".",
"shape",
"==",
"(",
"1",
",",
")",
":",
"parameter_dict",
"[",
"param_name",
"]",
"=",
"Parameter",
"(",
"np_val",
",",
"name",
"=",
"param_name",
")",
"elif",
"np_val",
".",
"shape",
"==",
"(",
")",
":",
"parameter_dict",
"[",
"param_name",
"]",
"=",
"Parameter",
"(",
"Tensor",
"(",
"np_val",
".",
"tolist",
"(",
")",
",",
"mstype",
".",
"pytype_to_dtype",
"(",
"np_val",
".",
"dtype",
")",
")",
",",
"name",
"=",
"param_name",
")",
"else",
":",
"parameter_dict",
"[",
"param_name",
"]",
"=",
"Parameter",
"(",
"Tensor",
"(",
"np_val",
")",
",",
"name",
"=",
"param_name",
")",
"load_param_into_net",
"(",
"net",
",",
"parameter_dict",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/train/serialization.py#L710-L732 |
||
SoarGroup/Soar | a1c5e249499137a27da60533c72969eef3b8ab6b | scons/scons-local-4.1.0/SCons/Script/Main.py | python | _scons_internal_error | () | Handle all errors but user errors. Print out a message telling
the user what to do in this case and print a normal trace. | Handle all errors but user errors. Print out a message telling
the user what to do in this case and print a normal trace. | [
"Handle",
"all",
"errors",
"but",
"user",
"errors",
".",
"Print",
"out",
"a",
"message",
"telling",
"the",
"user",
"what",
"to",
"do",
"in",
"this",
"case",
"and",
"print",
"a",
"normal",
"trace",
"."
] | def _scons_internal_error():
"""Handle all errors but user errors. Print out a message telling
the user what to do in this case and print a normal trace.
"""
print('internal error')
traceback.print_exc()
sys.exit(2) | [
"def",
"_scons_internal_error",
"(",
")",
":",
"print",
"(",
"'internal error'",
")",
"traceback",
".",
"print_exc",
"(",
")",
"sys",
".",
"exit",
"(",
"2",
")"
] | https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Script/Main.py#L604-L610 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | ProcessEvent.GetExitCode | (*args, **kwargs) | return _misc_.ProcessEvent_GetExitCode(*args, **kwargs) | GetExitCode(self) -> int | GetExitCode(self) -> int | [
"GetExitCode",
"(",
"self",
")",
"-",
">",
"int"
] | def GetExitCode(*args, **kwargs):
"""GetExitCode(self) -> int"""
return _misc_.ProcessEvent_GetExitCode(*args, **kwargs) | [
"def",
"GetExitCode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"ProcessEvent_GetExitCode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L2078-L2080 |
|
alrightchiu/SecondRound | 56f7603ed87b6db2326fcc7a67ec015e820d57e4 | fabfile.py | python | publish | () | Publish to production via rsync | Publish to production via rsync | [
"Publish",
"to",
"production",
"via",
"rsync"
] | def publish():
"""Publish to production via rsync"""
local('pelican -s publishconf.py')
project.rsync_project(
remote_dir=dest_path,
exclude=".DS_Store",
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True,
extra_opts='-c',
) | [
"def",
"publish",
"(",
")",
":",
"local",
"(",
"'pelican -s publishconf.py'",
")",
"project",
".",
"rsync_project",
"(",
"remote_dir",
"=",
"dest_path",
",",
"exclude",
"=",
"\".DS_Store\"",
",",
"local_dir",
"=",
"DEPLOY_PATH",
".",
"rstrip",
"(",
"'/'",
")",
"+",
"'/'",
",",
"delete",
"=",
"True",
",",
"extra_opts",
"=",
"'-c'",
",",
")"
] | https://github.com/alrightchiu/SecondRound/blob/56f7603ed87b6db2326fcc7a67ec015e820d57e4/fabfile.py#L79-L88 |
||
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | Wrapping/Python/paraview/simple.py | python | GetActiveView | () | return active_objects.view | Returns the active view. | Returns the active view. | [
"Returns",
"the",
"active",
"view",
"."
] | def GetActiveView():
"""Returns the active view."""
return active_objects.view | [
"def",
"GetActiveView",
"(",
")",
":",
"return",
"active_objects",
".",
"view"
] | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Wrapping/Python/paraview/simple.py#L1157-L1159 |
|
microsoft/clang | 86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5 | bindings/python/clang/cindex.py | python | TranslationUnit.diagnostics | (self) | return DiagIterator(self) | Return an iterable (and indexable) object containing the diagnostics. | Return an iterable (and indexable) object containing the diagnostics. | [
"Return",
"an",
"iterable",
"(",
"and",
"indexable",
")",
"object",
"containing",
"the",
"diagnostics",
"."
] | def diagnostics(self):
"""
Return an iterable (and indexable) object containing the diagnostics.
"""
class DiagIterator:
def __init__(self, tu):
self.tu = tu
def __len__(self):
return int(conf.lib.clang_getNumDiagnostics(self.tu))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnostic(self.tu, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return DiagIterator(self) | [
"def",
"diagnostics",
"(",
"self",
")",
":",
"class",
"DiagIterator",
":",
"def",
"__init__",
"(",
"self",
",",
"tu",
")",
":",
"self",
".",
"tu",
"=",
"tu",
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"int",
"(",
"conf",
".",
"lib",
".",
"clang_getNumDiagnostics",
"(",
"self",
".",
"tu",
")",
")",
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"diag",
"=",
"conf",
".",
"lib",
".",
"clang_getDiagnostic",
"(",
"self",
".",
"tu",
",",
"key",
")",
"if",
"not",
"diag",
":",
"raise",
"IndexError",
"return",
"Diagnostic",
"(",
"diag",
")",
"return",
"DiagIterator",
"(",
"self",
")"
] | https://github.com/microsoft/clang/blob/86d4513d3e0daa4d5a29b0b1de7c854ca15f9fe5/bindings/python/clang/cindex.py#L2944-L2961 |
|
CanalTP/navitia | cb84ce9859070187e708818b058e6a7e0b7f891b | source/jormungandr/jormungandr/scenarios/journey_filter.py | python | shared_section_generator | (journey) | Definition of journeys with a shared section:
- same stop point of departure and arrival
- same number of sections in the journey | Definition of journeys with a shared section:
- same stop point of departure and arrival
- same number of sections in the journey | [
"Definition",
"of",
"journeys",
"with",
"a",
"shared",
"section",
":",
"-",
"same",
"stop",
"point",
"of",
"departure",
"and",
"arrival",
"-",
"same",
"number",
"of",
"sections",
"in",
"the",
"journey"
] | def shared_section_generator(journey):
"""
Definition of journeys with a shared section:
- same stop point of departure and arrival
- same number of sections in the journey
"""
# Early return: test if the journeys have the same number of sections
yield len(journey.sections)
# Compare each section of the journey with the criteria in the function description
for s in journey.sections:
if s.type == response_pb2.PUBLIC_TRANSPORT:
yield "origin:{}/dest:{}".format(s.origin.uri, s.destination.uri) | [
"def",
"shared_section_generator",
"(",
"journey",
")",
":",
"# Early return: test if the journeys have the same number of sections",
"yield",
"len",
"(",
"journey",
".",
"sections",
")",
"# Compare each section of the journey with the criteria in the function description",
"for",
"s",
"in",
"journey",
".",
"sections",
":",
"if",
"s",
".",
"type",
"==",
"response_pb2",
".",
"PUBLIC_TRANSPORT",
":",
"yield",
"\"origin:{}/dest:{}\"",
".",
"format",
"(",
"s",
".",
"origin",
".",
"uri",
",",
"s",
".",
"destination",
".",
"uri",
")"
] | https://github.com/CanalTP/navitia/blob/cb84ce9859070187e708818b058e6a7e0b7f891b/source/jormungandr/jormungandr/scenarios/journey_filter.py#L520-L533 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/parsing_ops.py | python | _process_raw_parameters | (names, dense_defaults, sparse_keys, sparse_types,
dense_keys, dense_types, dense_shapes) | return (names, dense_defaults_vec, sparse_keys, sparse_types, dense_keys,
dense_shapes_as_proto, dense_shapes) | Process raw parameters to params used by `gen_parsing_ops`.
Args:
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
Returns:
Tuple of `names`, `dense_defaults_vec`, `sparse_keys`, `sparse_types`,
`dense_keys`, `dense_shapes`.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up. | Process raw parameters to params used by `gen_parsing_ops`. | [
"Process",
"raw",
"parameters",
"to",
"params",
"used",
"by",
"gen_parsing_ops",
"."
] | def _process_raw_parameters(names, dense_defaults, sparse_keys, sparse_types,
dense_keys, dense_types, dense_shapes):
"""Process raw parameters to params used by `gen_parsing_ops`.
Args:
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
Returns:
Tuple of `names`, `dense_defaults_vec`, `sparse_keys`, `sparse_types`,
`dense_keys`, `dense_shapes`.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
names = [] if names is None else names
dense_defaults = collections.OrderedDict(
) if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = ([[]] * len(dense_keys)
if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d" %
(len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d" %
(len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d" %
(len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
# Convert dense_shapes to TensorShape object.
dense_shapes = [tensor_shape.as_shape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
if (dense_shape.ndims is not None and dense_shape.ndims > 0 and
dense_shape.dims[0].value is None):
# Variable stride dense shape, the default value should be a
# scalar padding value
if default_value is None:
default_value = ops.convert_to_tensor(
"" if dense_types[i] == dtypes.string else 0, dtype=dense_types[i])
else:
# Reshape to a scalar to ensure user gets an error if they
# provide a tensor that's not intended to be a padding value
# (0 or 2+ elements).
key_name = "padding_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, [])
else:
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shape)
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes_as_proto = [shape.as_proto() for shape in dense_shapes]
return (names, dense_defaults_vec, sparse_keys, sparse_types, dense_keys,
dense_shapes_as_proto, dense_shapes) | [
"def",
"_process_raw_parameters",
"(",
"names",
",",
"dense_defaults",
",",
"sparse_keys",
",",
"sparse_types",
",",
"dense_keys",
",",
"dense_types",
",",
"dense_shapes",
")",
":",
"names",
"=",
"[",
"]",
"if",
"names",
"is",
"None",
"else",
"names",
"dense_defaults",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"if",
"dense_defaults",
"is",
"None",
"else",
"dense_defaults",
"sparse_keys",
"=",
"[",
"]",
"if",
"sparse_keys",
"is",
"None",
"else",
"sparse_keys",
"sparse_types",
"=",
"[",
"]",
"if",
"sparse_types",
"is",
"None",
"else",
"sparse_types",
"dense_keys",
"=",
"[",
"]",
"if",
"dense_keys",
"is",
"None",
"else",
"dense_keys",
"dense_types",
"=",
"[",
"]",
"if",
"dense_types",
"is",
"None",
"else",
"dense_types",
"dense_shapes",
"=",
"(",
"[",
"[",
"]",
"]",
"*",
"len",
"(",
"dense_keys",
")",
"if",
"dense_shapes",
"is",
"None",
"else",
"dense_shapes",
")",
"num_dense",
"=",
"len",
"(",
"dense_keys",
")",
"num_sparse",
"=",
"len",
"(",
"sparse_keys",
")",
"if",
"len",
"(",
"dense_shapes",
")",
"!=",
"num_dense",
":",
"raise",
"ValueError",
"(",
"\"len(dense_shapes) != len(dense_keys): %d vs. %d\"",
"%",
"(",
"len",
"(",
"dense_shapes",
")",
",",
"num_dense",
")",
")",
"if",
"len",
"(",
"dense_types",
")",
"!=",
"num_dense",
":",
"raise",
"ValueError",
"(",
"\"len(dense_types) != len(num_dense): %d vs. %d\"",
"%",
"(",
"len",
"(",
"dense_types",
")",
",",
"num_dense",
")",
")",
"if",
"len",
"(",
"sparse_types",
")",
"!=",
"num_sparse",
":",
"raise",
"ValueError",
"(",
"\"len(sparse_types) != len(sparse_keys): %d vs. %d\"",
"%",
"(",
"len",
"(",
"sparse_types",
")",
",",
"num_sparse",
")",
")",
"if",
"num_dense",
"+",
"num_sparse",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"Must provide at least one sparse key or dense key\"",
")",
"if",
"not",
"set",
"(",
"dense_keys",
")",
".",
"isdisjoint",
"(",
"set",
"(",
"sparse_keys",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Dense and sparse keys must not intersect; intersection: %s\"",
"%",
"set",
"(",
"dense_keys",
")",
".",
"intersection",
"(",
"set",
"(",
"sparse_keys",
")",
")",
")",
"# Convert dense_shapes to TensorShape object.",
"dense_shapes",
"=",
"[",
"tensor_shape",
".",
"as_shape",
"(",
"shape",
")",
"for",
"shape",
"in",
"dense_shapes",
"]",
"dense_defaults_vec",
"=",
"[",
"]",
"for",
"i",
",",
"key",
"in",
"enumerate",
"(",
"dense_keys",
")",
":",
"default_value",
"=",
"dense_defaults",
".",
"get",
"(",
"key",
")",
"dense_shape",
"=",
"dense_shapes",
"[",
"i",
"]",
"if",
"(",
"dense_shape",
".",
"ndims",
"is",
"not",
"None",
"and",
"dense_shape",
".",
"ndims",
">",
"0",
"and",
"dense_shape",
".",
"dims",
"[",
"0",
"]",
".",
"value",
"is",
"None",
")",
":",
"# Variable stride dense shape, the default value should be a",
"# scalar padding value",
"if",
"default_value",
"is",
"None",
":",
"default_value",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"\"\"",
"if",
"dense_types",
"[",
"i",
"]",
"==",
"dtypes",
".",
"string",
"else",
"0",
",",
"dtype",
"=",
"dense_types",
"[",
"i",
"]",
")",
"else",
":",
"# Reshape to a scalar to ensure user gets an error if they",
"# provide a tensor that's not intended to be a padding value",
"# (0 or 2+ elements).",
"key_name",
"=",
"\"padding_\"",
"+",
"re",
".",
"sub",
"(",
"\"[^A-Za-z0-9_.\\\\-/]\"",
",",
"\"_\"",
",",
"key",
")",
"default_value",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"default_value",
",",
"dtype",
"=",
"dense_types",
"[",
"i",
"]",
",",
"name",
"=",
"key_name",
")",
"default_value",
"=",
"array_ops",
".",
"reshape",
"(",
"default_value",
",",
"[",
"]",
")",
"else",
":",
"if",
"default_value",
"is",
"None",
":",
"default_value",
"=",
"constant_op",
".",
"constant",
"(",
"[",
"]",
",",
"dtype",
"=",
"dense_types",
"[",
"i",
"]",
")",
"elif",
"not",
"isinstance",
"(",
"default_value",
",",
"ops",
".",
"Tensor",
")",
":",
"key_name",
"=",
"\"key_\"",
"+",
"re",
".",
"sub",
"(",
"\"[^A-Za-z0-9_.\\\\-/]\"",
",",
"\"_\"",
",",
"key",
")",
"default_value",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"default_value",
",",
"dtype",
"=",
"dense_types",
"[",
"i",
"]",
",",
"name",
"=",
"key_name",
")",
"default_value",
"=",
"array_ops",
".",
"reshape",
"(",
"default_value",
",",
"dense_shape",
")",
"dense_defaults_vec",
".",
"append",
"(",
"default_value",
")",
"# Finally, convert dense_shapes to TensorShapeProto",
"dense_shapes_as_proto",
"=",
"[",
"shape",
".",
"as_proto",
"(",
")",
"for",
"shape",
"in",
"dense_shapes",
"]",
"return",
"(",
"names",
",",
"dense_defaults_vec",
",",
"sparse_keys",
",",
"sparse_types",
",",
"dense_keys",
",",
"dense_shapes_as_proto",
",",
"dense_shapes",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/parsing_ops.py#L879-L982 |
|
facebook/mysql-5.6 | 65a650660ec7b4d627d1b738f397252ff4706207 | arcanist/lint/cpp_linter/cpplint.py | python | UpdateIncludeState | (filename, include_state, io=codecs) | return True | Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise. | Fill up the include_state with new includes found from the file. | [
"Fill",
"up",
"the",
"include_state",
"with",
"new",
"includes",
"found",
"from",
"the",
"file",
"."
] | def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True | [
"def",
"UpdateIncludeState",
"(",
"filename",
",",
"include_state",
",",
"io",
"=",
"codecs",
")",
":",
"headerfile",
"=",
"None",
"try",
":",
"headerfile",
"=",
"io",
".",
"open",
"(",
"filename",
",",
"'r'",
",",
"'utf8'",
",",
"'replace'",
")",
"except",
"IOError",
":",
"return",
"False",
"linenum",
"=",
"0",
"for",
"line",
"in",
"headerfile",
":",
"linenum",
"+=",
"1",
"clean_line",
"=",
"CleanseComments",
"(",
"line",
")",
"match",
"=",
"_RE_PATTERN_INCLUDE",
".",
"search",
"(",
"clean_line",
")",
"if",
"match",
":",
"include",
"=",
"match",
".",
"group",
"(",
"2",
")",
"# The value formatting is cute, but not really used right now.",
"# What matters here is that the key is in include_state.",
"include_state",
".",
"setdefault",
"(",
"include",
",",
"'%s:%d'",
"%",
"(",
"filename",
",",
"linenum",
")",
")",
"return",
"True"
] | https://github.com/facebook/mysql-5.6/blob/65a650660ec7b4d627d1b738f397252ff4706207/arcanist/lint/cpp_linter/cpplint.py#L4359-L4385 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/boto/boto/s3/key.py | python | Key.get_file | (self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None) | Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket. | Retrieves a file from an S3 Key | [
"Retrieves",
"a",
"file",
"from",
"an",
"S3",
"Key"
] | def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None) | [
"def",
"get_file",
"(",
"self",
",",
"fp",
",",
"headers",
"=",
"None",
",",
"cb",
"=",
"None",
",",
"num_cb",
"=",
"10",
",",
"torrent",
"=",
"False",
",",
"version_id",
"=",
"None",
",",
"override_num_retries",
"=",
"None",
",",
"response_headers",
"=",
"None",
")",
":",
"self",
".",
"_get_file_internal",
"(",
"fp",
",",
"headers",
"=",
"headers",
",",
"cb",
"=",
"cb",
",",
"num_cb",
"=",
"num_cb",
",",
"torrent",
"=",
"torrent",
",",
"version_id",
"=",
"version_id",
",",
"override_num_retries",
"=",
"override_num_retries",
",",
"response_headers",
"=",
"response_headers",
",",
"hash_algs",
"=",
"None",
",",
"query_args",
"=",
"None",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/s3/key.py#L1430-L1482 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/perf/measurements/media.py | python | Media.results_are_the_same_on_every_page | (self) | return False | Results can vary from page to page based on media events taking place. | Results can vary from page to page based on media events taking place. | [
"Results",
"can",
"vary",
"from",
"page",
"to",
"page",
"based",
"on",
"media",
"events",
"taking",
"place",
"."
] | def results_are_the_same_on_every_page(self):
"""Results can vary from page to page based on media events taking place."""
return False | [
"def",
"results_are_the_same_on_every_page",
"(",
"self",
")",
":",
"return",
"False"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/perf/measurements/media.py#L28-L30 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pyio.py | python | FileIO.writable | (self) | return self._writable | True if file was opened in a write mode. | True if file was opened in a write mode. | [
"True",
"if",
"file",
"was",
"opened",
"in",
"a",
"write",
"mode",
"."
] | def writable(self):
"""True if file was opened in a write mode."""
self._checkClosed()
return self._writable | [
"def",
"writable",
"(",
"self",
")",
":",
"self",
".",
"_checkClosed",
"(",
")",
"return",
"self",
".",
"_writable"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pyio.py#L1728-L1731 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/toml/encoder.py | python | dumps | (o, encoder=None) | return retval | Stringifies input dict as toml
Args:
o: Object to dump into toml
encoder: The ``TomlEncoder`` to use for constructing the output string
Returns:
String containing the toml corresponding to dict
Examples:
```python
>>> import toml
>>> output = {
... 'a': "I'm a string",
... 'b': ["I'm", "a", "list"],
... 'c': 2400
... }
>>> toml.dumps(output)
'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
``` | Stringifies input dict as toml | [
"Stringifies",
"input",
"dict",
"as",
"toml"
] | def dumps(o, encoder=None):
"""Stringifies input dict as toml
Args:
o: Object to dump into toml
encoder: The ``TomlEncoder`` to use for constructing the output string
Returns:
String containing the toml corresponding to dict
Examples:
```python
>>> import toml
>>> output = {
... 'a': "I'm a string",
... 'b': ["I'm", "a", "list"],
... 'c': 2400
... }
>>> toml.dumps(output)
'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
```
"""
retval = ""
if encoder is None:
encoder = TomlEncoder(o.__class__)
addtoretval, sections = encoder.dump_sections(o, "")
retval += addtoretval
outer_objs = [id(o)]
while sections:
section_ids = [id(section) for section in sections.values()]
for outer_obj in outer_objs:
if outer_obj in section_ids:
raise ValueError("Circular reference detected")
outer_objs += section_ids
newsections = encoder.get_empty_table()
for section in sections:
addtoretval, addtosections = encoder.dump_sections(
sections[section], section)
if addtoretval or (not addtoretval and not addtosections):
if retval and retval[-2:] != "\n\n":
retval += "\n"
retval += "[" + section + "]\n"
if addtoretval:
retval += addtoretval
for s in addtosections:
newsections[section + "." + s] = addtosections[s]
sections = newsections
return retval | [
"def",
"dumps",
"(",
"o",
",",
"encoder",
"=",
"None",
")",
":",
"retval",
"=",
"\"\"",
"if",
"encoder",
"is",
"None",
":",
"encoder",
"=",
"TomlEncoder",
"(",
"o",
".",
"__class__",
")",
"addtoretval",
",",
"sections",
"=",
"encoder",
".",
"dump_sections",
"(",
"o",
",",
"\"\"",
")",
"retval",
"+=",
"addtoretval",
"outer_objs",
"=",
"[",
"id",
"(",
"o",
")",
"]",
"while",
"sections",
":",
"section_ids",
"=",
"[",
"id",
"(",
"section",
")",
"for",
"section",
"in",
"sections",
".",
"values",
"(",
")",
"]",
"for",
"outer_obj",
"in",
"outer_objs",
":",
"if",
"outer_obj",
"in",
"section_ids",
":",
"raise",
"ValueError",
"(",
"\"Circular reference detected\"",
")",
"outer_objs",
"+=",
"section_ids",
"newsections",
"=",
"encoder",
".",
"get_empty_table",
"(",
")",
"for",
"section",
"in",
"sections",
":",
"addtoretval",
",",
"addtosections",
"=",
"encoder",
".",
"dump_sections",
"(",
"sections",
"[",
"section",
"]",
",",
"section",
")",
"if",
"addtoretval",
"or",
"(",
"not",
"addtoretval",
"and",
"not",
"addtosections",
")",
":",
"if",
"retval",
"and",
"retval",
"[",
"-",
"2",
":",
"]",
"!=",
"\"\\n\\n\"",
":",
"retval",
"+=",
"\"\\n\"",
"retval",
"+=",
"\"[\"",
"+",
"section",
"+",
"\"]\\n\"",
"if",
"addtoretval",
":",
"retval",
"+=",
"addtoretval",
"for",
"s",
"in",
"addtosections",
":",
"newsections",
"[",
"section",
"+",
"\".\"",
"+",
"s",
"]",
"=",
"addtosections",
"[",
"s",
"]",
"sections",
"=",
"newsections",
"return",
"retval"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/toml/encoder.py#L34-L83 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/query.py | python | CustomRun.entry_ok | (self) | return None if cli_args is None else (cli_args, restart) | Return apparently valid (cli_args, restart) or None | Return apparently valid (cli_args, restart) or None | [
"Return",
"apparently",
"valid",
"(",
"cli_args",
"restart",
")",
"or",
"None"
] | def entry_ok(self):
"Return apparently valid (cli_args, restart) or None"
cli_args = self.cli_args_ok()
restart = self.restartvar.get()
return None if cli_args is None else (cli_args, restart) | [
"def",
"entry_ok",
"(",
"self",
")",
":",
"cli_args",
"=",
"self",
".",
"cli_args_ok",
"(",
")",
"restart",
"=",
"self",
".",
"restartvar",
".",
"get",
"(",
")",
"return",
"None",
"if",
"cli_args",
"is",
"None",
"else",
"(",
"cli_args",
",",
"restart",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/query.py#L377-L381 |
|
apple/swift-lldb | d74be846ef3e62de946df343e8c234bde93a8912 | scripts/Python/static-binding/lldb.py | python | SBError.SetError | (self, err, type) | return _lldb.SBError_SetError(self, err, type) | SetError(SBError self, uint32_t err, lldb::ErrorType type) | SetError(SBError self, uint32_t err, lldb::ErrorType type) | [
"SetError",
"(",
"SBError",
"self",
"uint32_t",
"err",
"lldb",
"::",
"ErrorType",
"type",
")"
] | def SetError(self, err, type):
"""SetError(SBError self, uint32_t err, lldb::ErrorType type)"""
return _lldb.SBError_SetError(self, err, type) | [
"def",
"SetError",
"(",
"self",
",",
"err",
",",
"type",
")",
":",
"return",
"_lldb",
".",
"SBError_SetError",
"(",
"self",
",",
"err",
",",
"type",
")"
] | https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L4607-L4609 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/deep_memory_profiler/lib/policy.py | python | Policy.find_rule | (self, component_name) | return None | Finds a rule whose name is |component_name|. | Finds a rule whose name is |component_name|. | [
"Finds",
"a",
"rule",
"whose",
"name",
"is",
"|component_name|",
"."
] | def find_rule(self, component_name):
"""Finds a rule whose name is |component_name|. """
for rule in self._rules:
if rule.name == component_name:
return rule
return None | [
"def",
"find_rule",
"(",
"self",
",",
"component_name",
")",
":",
"for",
"rule",
"in",
"self",
".",
"_rules",
":",
"if",
"rule",
".",
"name",
"==",
"component_name",
":",
"return",
"rule",
"return",
"None"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/deep_memory_profiler/lib/policy.py#L128-L133 |
|
cvxpy/cvxpy | 5165b4fb750dfd237de8659383ef24b4b2e33aaf | cvxpy/atoms/matrix_frac.py | python | MatrixFrac.is_atom_convex | (self) | return True | Is the atom convex? | Is the atom convex? | [
"Is",
"the",
"atom",
"convex?"
] | def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return True | [
"def",
"is_atom_convex",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"True"
] | https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/matrix_frac.py#L111-L114 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/callconv.py | python | BaseCallConv._get_arg_packer | (self, argtypes) | return self.context.get_arg_packer(argtypes) | Get an argument packer for the given argument types. | Get an argument packer for the given argument types. | [
"Get",
"an",
"argument",
"packer",
"for",
"the",
"given",
"argument",
"types",
"."
] | def _get_arg_packer(self, argtypes):
"""
Get an argument packer for the given argument types.
"""
return self.context.get_arg_packer(argtypes) | [
"def",
"_get_arg_packer",
"(",
"self",
",",
"argtypes",
")",
":",
"return",
"self",
".",
"context",
".",
"get_arg_packer",
"(",
"argtypes",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/callconv.py#L154-L158 |
|
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/yaml/__init__.py | python | load | (stream, Loader=Loader) | Parse the first YAML document in a stream
and produce the corresponding Python object. | Parse the first YAML document in a stream
and produce the corresponding Python object. | [
"Parse",
"the",
"first",
"YAML",
"document",
"in",
"a",
"stream",
"and",
"produce",
"the",
"corresponding",
"Python",
"object",
"."
] | def load(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose() | [
"def",
"load",
"(",
"stream",
",",
"Loader",
"=",
"Loader",
")",
":",
"loader",
"=",
"Loader",
"(",
"stream",
")",
"try",
":",
"return",
"loader",
".",
"get_single_data",
"(",
")",
"finally",
":",
"loader",
".",
"dispose",
"(",
")"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/yaml/__init__.py#L64-L73 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/poplib.py | python | POP3.uidl | (self, which=None) | return self._longcmd('UIDL') | Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets] | Return message digest (unique id) list. | [
"Return",
"message",
"digest",
"(",
"unique",
"id",
")",
"list",
"."
] | def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL') | [
"def",
"uidl",
"(",
"self",
",",
"which",
"=",
"None",
")",
":",
"if",
"which",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_shortcmd",
"(",
"'UIDL %s'",
"%",
"which",
")",
"return",
"self",
".",
"_longcmd",
"(",
"'UIDL'",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/poplib.py#L308-L317 |
|
PX4/PX4-Autopilot | 0b9f60a0370be53d683352c63fd92db3d6586e18 | Tools/px4airframes/srcparser.py | python | ParameterGroup.AddParameter | (self, param) | Add parameter to the group | Add parameter to the group | [
"Add",
"parameter",
"to",
"the",
"group"
] | def AddParameter(self, param):
"""
Add parameter to the group
"""
self.params.append(param) | [
"def",
"AddParameter",
"(",
"self",
",",
"param",
")",
":",
"self",
".",
"params",
".",
"append",
"(",
"param",
")"
] | https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/Tools/px4airframes/srcparser.py#L15-L19 |
||
taichi-dev/taichi | 973c04d6ba40f34e9e3bd5a28ae0ee0802f136a6 | python/taichi/lang/field.py | python | Field.get_field_members | (self) | return self.vars | Gets field members.
Returns:
List[Expr]: Field members. | Gets field members. | [
"Gets",
"field",
"members",
"."
] | def get_field_members(self):
"""Gets field members.
Returns:
List[Expr]: Field members.
"""
return self.vars | [
"def",
"get_field_members",
"(",
"self",
")",
":",
"return",
"self",
".",
"vars"
] | https://github.com/taichi-dev/taichi/blob/973c04d6ba40f34e9e3bd5a28ae0ee0802f136a6/python/taichi/lang/field.py#L69-L75 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/s3fs/core.py | python | S3FileSystem.merge | (self, path, filelist, **kwargs) | Create single S3 file from list of S3 files
Uses multi-part, no data is downloaded. The original files are
not deleted.
Parameters
----------
path : str
The final file to produce
filelist : list of str
The paths, in order, to assemble into the final file. | Create single S3 file from list of S3 files | [
"Create",
"single",
"S3",
"file",
"from",
"list",
"of",
"S3",
"files"
] | def merge(self, path, filelist, **kwargs):
""" Create single S3 file from list of S3 files
Uses multi-part, no data is downloaded. The original files are
not deleted.
Parameters
----------
path : str
The final file to produce
filelist : list of str
The paths, in order, to assemble into the final file.
"""
bucket, key, version_id = self.split_path(path)
if version_id:
raise ValueError("Cannot write to an explicit versioned file!")
mpu = self._call_s3(
self.s3.create_multipart_upload,
kwargs,
Bucket=bucket,
Key=key
)
# TODO: Make this support versions?
out = [self._call_s3(
self.s3.upload_part_copy,
kwargs,
Bucket=bucket, Key=key, UploadId=mpu['UploadId'],
CopySource=f, PartNumber=i + 1)
for (i, f) in enumerate(filelist)]
parts = [{'PartNumber': i + 1, 'ETag': o['CopyPartResult']['ETag']} for
(i, o) in enumerate(out)]
part_info = {'Parts': parts}
self.s3.complete_multipart_upload(Bucket=bucket, Key=key,
UploadId=mpu['UploadId'],
MultipartUpload=part_info)
self.invalidate_cache(path) | [
"def",
"merge",
"(",
"self",
",",
"path",
",",
"filelist",
",",
"*",
"*",
"kwargs",
")",
":",
"bucket",
",",
"key",
",",
"version_id",
"=",
"self",
".",
"split_path",
"(",
"path",
")",
"if",
"version_id",
":",
"raise",
"ValueError",
"(",
"\"Cannot write to an explicit versioned file!\"",
")",
"mpu",
"=",
"self",
".",
"_call_s3",
"(",
"self",
".",
"s3",
".",
"create_multipart_upload",
",",
"kwargs",
",",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
")",
"# TODO: Make this support versions?",
"out",
"=",
"[",
"self",
".",
"_call_s3",
"(",
"self",
".",
"s3",
".",
"upload_part_copy",
",",
"kwargs",
",",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
",",
"UploadId",
"=",
"mpu",
"[",
"'UploadId'",
"]",
",",
"CopySource",
"=",
"f",
",",
"PartNumber",
"=",
"i",
"+",
"1",
")",
"for",
"(",
"i",
",",
"f",
")",
"in",
"enumerate",
"(",
"filelist",
")",
"]",
"parts",
"=",
"[",
"{",
"'PartNumber'",
":",
"i",
"+",
"1",
",",
"'ETag'",
":",
"o",
"[",
"'CopyPartResult'",
"]",
"[",
"'ETag'",
"]",
"}",
"for",
"(",
"i",
",",
"o",
")",
"in",
"enumerate",
"(",
"out",
")",
"]",
"part_info",
"=",
"{",
"'Parts'",
":",
"parts",
"}",
"self",
".",
"s3",
".",
"complete_multipart_upload",
"(",
"Bucket",
"=",
"bucket",
",",
"Key",
"=",
"key",
",",
"UploadId",
"=",
"mpu",
"[",
"'UploadId'",
"]",
",",
"MultipartUpload",
"=",
"part_info",
")",
"self",
".",
"invalidate_cache",
"(",
"path",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/s3fs/core.py#L827-L862 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_gdi.py | python | Overlay.Reset | (*args, **kwargs) | return _gdi_.Overlay_Reset(*args, **kwargs) | Reset(self) | Reset(self) | [
"Reset",
"(",
"self",
")"
] | def Reset(*args, **kwargs):
"""Reset(self)"""
return _gdi_.Overlay_Reset(*args, **kwargs) | [
"def",
"Reset",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"Overlay_Reset",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L6875-L6877 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/summary/summary.py | python | _compat_summary_scope | (name, family) | Handles `family` argument for v2 op invocation in v1. | Handles `family` argument for v2 op invocation in v1. | [
"Handles",
"family",
"argument",
"for",
"v2",
"op",
"invocation",
"in",
"v1",
"."
] | def _compat_summary_scope(name, family):
"""Handles `family` argument for v2 op invocation in v1."""
# Get a new summary tag name with the `family` arg.
with _summary_op_util.summary_scope(name, family) as (tag, _):
# Reset the root name scope with an empty summary_scope.
with _summary_op_util.summary_scope(name='', family=None):
yield tag | [
"def",
"_compat_summary_scope",
"(",
"name",
",",
"family",
")",
":",
"# Get a new summary tag name with the `family` arg.",
"with",
"_summary_op_util",
".",
"summary_scope",
"(",
"name",
",",
"family",
")",
"as",
"(",
"tag",
",",
"_",
")",
":",
"# Reset the root name scope with an empty summary_scope.",
"with",
"_summary_op_util",
".",
"summary_scope",
"(",
"name",
"=",
"''",
",",
"family",
"=",
"None",
")",
":",
"yield",
"tag"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/summary/summary.py#L847-L853 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/rosdep2/rospkg_loader.py | python | RosPkgLoader.get_loadable_views | (self) | return list(self._rosstack.list()) + [DEFAULT_VIEW_KEY] | 'Views' map to ROS stack names. | 'Views' map to ROS stack names. | [
"Views",
"map",
"to",
"ROS",
"stack",
"names",
"."
] | def get_loadable_views(self):
"""
'Views' map to ROS stack names.
"""
return list(self._rosstack.list()) + [DEFAULT_VIEW_KEY] | [
"def",
"get_loadable_views",
"(",
"self",
")",
":",
"return",
"list",
"(",
"self",
".",
"_rosstack",
".",
"list",
"(",
")",
")",
"+",
"[",
"DEFAULT_VIEW_KEY",
"]"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/rosdep2/rospkg_loader.py#L106-L110 |
|
rapidsai/cudf | d5b2448fc69f17509304d594f029d0df56984962 | python/cudf/cudf/core/series.py | python | TimedeltaProperties.days | (self) | return self._get_td_field("days") | Number of days.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.days
0 141
1 14
2 13000
3 0
4 37
dtype: int64 | Number of days. | [
"Number",
"of",
"days",
"."
] | def days(self):
"""
Number of days.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.days
0 141
1 14
2 13000
3 0
4 37
dtype: int64
"""
return self._get_td_field("days") | [
"def",
"days",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_td_field",
"(",
"\"days\"",
")"
] | https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/series.py#L4620-L4648 |
|
lawy623/SVS | b7c7ae367c82a4797ff4a896a2ff304f02e7f724 | caffe/scripts/cpp_lint.py | python | _NestingState.InnermostClass | (self) | return None | Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise. | Get class info on the top of the stack. | [
"Get",
"class",
"info",
"on",
"the",
"top",
"of",
"the",
"stack",
"."
] | def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None | [
"def",
"InnermostClass",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"stack",
")",
",",
"0",
",",
"-",
"1",
")",
":",
"classinfo",
"=",
"self",
".",
"stack",
"[",
"i",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"classinfo",
",",
"_ClassInfo",
")",
":",
"return",
"classinfo",
"return",
"None"
] | https://github.com/lawy623/SVS/blob/b7c7ae367c82a4797ff4a896a2ff304f02e7f724/caffe/scripts/cpp_lint.py#L2160-L2170 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/utils.py | python | urldefragauth | (url) | return urlunparse((scheme, netloc, path, params, query, '')) | Given a url remove the fragment and the authentication part.
:rtype: str | Given a url remove the fragment and the authentication part. | [
"Given",
"a",
"url",
"remove",
"the",
"fragment",
"and",
"the",
"authentication",
"part",
"."
] | def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, '')) | [
"def",
"urldefragauth",
"(",
"url",
")",
":",
"scheme",
",",
"netloc",
",",
"path",
",",
"params",
",",
"query",
",",
"fragment",
"=",
"urlparse",
"(",
"url",
")",
"# see func:`prepend_scheme_if_needed`",
"if",
"not",
"netloc",
":",
"netloc",
",",
"path",
"=",
"path",
",",
"netloc",
"netloc",
"=",
"netloc",
".",
"rsplit",
"(",
"'@'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"return",
"urlunparse",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"params",
",",
"query",
",",
"''",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/utils.py#L953-L967 |
|
OSGeo/gdal | 3748fc4ba4fba727492774b2b908a2130c864a83 | swig/python/osgeo/osr.py | python | OSRCRSInfo_auth_name_get | (*args) | return _osr.OSRCRSInfo_auth_name_get(*args) | r"""OSRCRSInfo_auth_name_get(CRSInfo crsInfo) -> char const * | r"""OSRCRSInfo_auth_name_get(CRSInfo crsInfo) -> char const * | [
"r",
"OSRCRSInfo_auth_name_get",
"(",
"CRSInfo",
"crsInfo",
")",
"-",
">",
"char",
"const",
"*"
] | def OSRCRSInfo_auth_name_get(*args):
r"""OSRCRSInfo_auth_name_get(CRSInfo crsInfo) -> char const *"""
return _osr.OSRCRSInfo_auth_name_get(*args) | [
"def",
"OSRCRSInfo_auth_name_get",
"(",
"*",
"args",
")",
":",
"return",
"_osr",
".",
"OSRCRSInfo_auth_name_get",
"(",
"*",
"args",
")"
] | https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/osr.py#L1000-L1002 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/image_ops_impl.py | python | _is_tensor | (x) | return isinstance(x, (ops.Tensor, variables.Variable)) | Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`. | Returns `True` if `x` is a symbolic tensor-like object. | [
"Returns",
"True",
"if",
"x",
"is",
"a",
"symbolic",
"tensor",
"-",
"like",
"object",
"."
] | def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable)) | [
"def",
"_is_tensor",
"(",
"x",
")",
":",
"return",
"isinstance",
"(",
"x",
",",
"(",
"ops",
".",
"Tensor",
",",
"variables",
".",
"Variable",
")",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/image_ops_impl.py#L83-L92 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/http/client.py | python | HTTPConnection.set_tunnel | (self, host, port=None, headers=None) | Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTTP connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request. | Set up host and port for HTTP CONNECT tunnelling. | [
"Set",
"up",
"host",
"and",
"port",
"for",
"HTTP",
"CONNECT",
"tunnelling",
"."
] | def set_tunnel(self, host, port=None, headers=None):
"""Set up host and port for HTTP CONNECT tunnelling.
In a connection that uses HTTP CONNECT tunneling, the host passed to the
constructor is used as a proxy server that relays all communication to
the endpoint passed to `set_tunnel`. This done by sending an HTTP
CONNECT request to the proxy server when the connection is established.
This method must be called before the HTTP connection has been
established.
The headers argument should be a mapping of extra HTTP headers to send
with the CONNECT request.
"""
if self.sock:
raise RuntimeError("Can't set up tunnel for established connection")
self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
if headers:
self._tunnel_headers = headers
else:
self._tunnel_headers.clear() | [
"def",
"set_tunnel",
"(",
"self",
",",
"host",
",",
"port",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"if",
"self",
".",
"sock",
":",
"raise",
"RuntimeError",
"(",
"\"Can't set up tunnel for established connection\"",
")",
"self",
".",
"_tunnel_host",
",",
"self",
".",
"_tunnel_port",
"=",
"self",
".",
"_get_hostport",
"(",
"host",
",",
"port",
")",
"if",
"headers",
":",
"self",
".",
"_tunnel_headers",
"=",
"headers",
"else",
":",
"self",
".",
"_tunnel_headers",
".",
"clear",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/http/client.py#L865-L887 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/utils.py | python | get_include | () | return d | Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
... | Return the directory that contains the NumPy \\*.h header files. | [
"Return",
"the",
"directory",
"that",
"contains",
"the",
"NumPy",
"\\\\",
"*",
".",
"h",
"header",
"files",
"."
] | def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d | [
"def",
"get_include",
"(",
")",
":",
"import",
"numpy",
"if",
"numpy",
".",
"show_config",
"is",
"None",
":",
"# running from numpy source directory",
"d",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"numpy",
".",
"__file__",
")",
",",
"'core'",
",",
"'include'",
")",
"else",
":",
"# using installed numpy core headers",
"import",
"numpy",
".",
"core",
"as",
"core",
"d",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"core",
".",
"__file__",
")",
",",
"'include'",
")",
"return",
"d"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/lib/utils.py#L23-L50 |
|
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | config/expandlibs.py | python | isDynamicLib | (path) | return os.path.splitext(path)[1] == conf.DLL_SUFFIX or os.path.basename(path) == 'XUL' | Returns whether the given path points to a dynamic library, that is,
ends with DLL_SUFFIX. | Returns whether the given path points to a dynamic library, that is,
ends with DLL_SUFFIX. | [
"Returns",
"whether",
"the",
"given",
"path",
"points",
"to",
"a",
"dynamic",
"library",
"that",
"is",
"ends",
"with",
"DLL_SUFFIX",
"."
] | def isDynamicLib(path):
'''Returns whether the given path points to a dynamic library, that is,
ends with DLL_SUFFIX.'''
# On mac, the xul library is named XUL, instead of libxul.dylib. Assume any
# file by that name is a dynamic library.
return os.path.splitext(path)[1] == conf.DLL_SUFFIX or os.path.basename(path) == 'XUL' | [
"def",
"isDynamicLib",
"(",
"path",
")",
":",
"# On mac, the xul library is named XUL, instead of libxul.dylib. Assume any",
"# file by that name is a dynamic library.",
"return",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
"==",
"conf",
".",
"DLL_SUFFIX",
"or",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
"==",
"'XUL'"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/config/expandlibs.py#L71-L76 |
|
fifengine/fifengine | 4b62c42e85bec19893cef8e63e6855927cff2c47 | engine/python/fife/extensions/fife_utils.py | python | getUserDataDirectory | (vendor, appname) | return dir | Gets the proper location to save configuration and data files, depending on depending on OS.
Windows: %APPDATA%\vendor\appname
Mac: ~/Library/Application Support/vendor/appname
Linux/Unix/Other: ~/.vendor/appname
See:
Brian Vanderburg II @ http://mail.python.org/pipermail/python-list/2008-May/660779.html | Gets the proper location to save configuration and data files, depending on depending on OS. | [
"Gets",
"the",
"proper",
"location",
"to",
"save",
"configuration",
"and",
"data",
"files",
"depending",
"on",
"depending",
"on",
"OS",
"."
] | def getUserDataDirectory(vendor, appname):
""" Gets the proper location to save configuration and data files, depending on depending on OS.
Windows: %APPDATA%\vendor\appname
Mac: ~/Library/Application Support/vendor/appname
Linux/Unix/Other: ~/.vendor/appname
See:
Brian Vanderburg II @ http://mail.python.org/pipermail/python-list/2008-May/660779.html
"""
dir = None
# WINDOWS
if os.name == "nt":
# Try env APPDATA or USERPROFILE or HOMEDRIVE/HOMEPATH
if "APPDATA" in os.environ:
dir = os.environ["APPDATA"]
if ((dir is None) or (not os.path.isdir(dir))) and ("USERPROFILE" in os.environ):
dir = os.environ["USERPROFILE"]
if os.path.isdir(os.path.join(dir, "Application Data")):
dir = os.path.join(dir, "Application Data")
if ((dir is None) or (not os.path.isdir(dir))) and ("HOMEDRIVE" in os.environ) and ("HOMEPATH" in os.environ):
dir = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"]
if os.path.isdir(os.path.join(dir, "Application Data")):
dir = os.path.join(dir, "Application Data")
if (dir is None) or (not os.path.isdir(dir)):
dir = os.path.expanduser("~")
# On windows, add vendor and app name
dir = os.path.join(dir, vendor, appname)
# Mac
elif os.name == "mac": # ?? may not be entirely correct
dir = os.path.expanduser("~")
dir = os.path.join(dir, "Library", "Application Support")
dir = os.path.join(dir, vendor, appname)
# Unix/Linux/all others
if dir is None:
dir = os.path.expanduser("~")
dir = os.path.join(dir, "."+vendor, appname)
# Create vendor/appname folder if it doesn't exist
if not os.path.isdir(dir):
os.makedirs(dir)
return dir | [
"def",
"getUserDataDirectory",
"(",
"vendor",
",",
"appname",
")",
":",
"dir",
"=",
"None",
"# WINDOWS",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"# Try env APPDATA or USERPROFILE or HOMEDRIVE/HOMEPATH",
"if",
"\"APPDATA\"",
"in",
"os",
".",
"environ",
":",
"dir",
"=",
"os",
".",
"environ",
"[",
"\"APPDATA\"",
"]",
"if",
"(",
"(",
"dir",
"is",
"None",
")",
"or",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
")",
")",
"and",
"(",
"\"USERPROFILE\"",
"in",
"os",
".",
"environ",
")",
":",
"dir",
"=",
"os",
".",
"environ",
"[",
"\"USERPROFILE\"",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\"Application Data\"",
")",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\"Application Data\"",
")",
"if",
"(",
"(",
"dir",
"is",
"None",
")",
"or",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
")",
")",
"and",
"(",
"\"HOMEDRIVE\"",
"in",
"os",
".",
"environ",
")",
"and",
"(",
"\"HOMEPATH\"",
"in",
"os",
".",
"environ",
")",
":",
"dir",
"=",
"os",
".",
"environ",
"[",
"\"HOMEDRIVE\"",
"]",
"+",
"os",
".",
"environ",
"[",
"\"HOMEPATH\"",
"]",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\"Application Data\"",
")",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\"Application Data\"",
")",
"if",
"(",
"dir",
"is",
"None",
")",
"or",
"(",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"# On windows, add vendor and app name",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"vendor",
",",
"appname",
")",
"# Mac",
"elif",
"os",
".",
"name",
"==",
"\"mac\"",
":",
"# ?? may not be entirely correct",
"dir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\"Library\"",
",",
"\"Application Support\"",
")",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"vendor",
",",
"appname",
")",
"# Unix/Linux/all others",
"if",
"dir",
"is",
"None",
":",
"dir",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"\".\"",
"+",
"vendor",
",",
"appname",
")",
"# Create vendor/appname folder if it doesn't exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
":",
"os",
".",
"makedirs",
"(",
"dir",
")",
"return",
"dir"
] | https://github.com/fifengine/fifengine/blob/4b62c42e85bec19893cef8e63e6855927cff2c47/engine/python/fife/extensions/fife_utils.py#L50-L100 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/vcs/git.py | python | Git.has_commit | (cls, location, rev) | Check if rev is a commit that is available in the local repository. | Check if rev is a commit that is available in the local repository. | [
"Check",
"if",
"rev",
"is",
"a",
"commit",
"that",
"is",
"available",
"in",
"the",
"local",
"repository",
"."
] | def has_commit(cls, location, rev):
"""
Check if rev is a commit that is available in the local repository.
"""
try:
cls.run_command(
['rev-parse', '-q', '--verify', "sha^" + rev],
cwd=location,
log_failed_cmd=False,
)
except InstallationError:
return False
else:
return True | [
"def",
"has_commit",
"(",
"cls",
",",
"location",
",",
"rev",
")",
":",
"try",
":",
"cls",
".",
"run_command",
"(",
"[",
"'rev-parse'",
",",
"'-q'",
",",
"'--verify'",
",",
"\"sha^\"",
"+",
"rev",
"]",
",",
"cwd",
"=",
"location",
",",
"log_failed_cmd",
"=",
"False",
",",
")",
"except",
"InstallationError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/vcs/git.py#L342-L355 |
||
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/vis/ipython/widgets.py | python | KlamptWidget.hide | (self,name,value=False) | Changes the visibility status of a certain named target | Changes the visibility status of a certain named target | [
"Changes",
"the",
"visibility",
"status",
"of",
"a",
"certain",
"named",
"target"
] | def hide(self,name,value=False):
"""Changes the visibility status of a certain named target"""
target_name = name
if name in self._extras:
type,data = self._extras[name]
if type == 'Config':
target_name = data
elif type == 'Configs' or type == 'Trajectory':
self.beginRpc(strict=False)
for subitem in data:
self._do_rpc({'type':'set_visible','object':subitem,'value':value})
self.endRpc(strict=False)
return
self._do_rpc({'type':'set_visible','object':target_name,'value':value}) | [
"def",
"hide",
"(",
"self",
",",
"name",
",",
"value",
"=",
"False",
")",
":",
"target_name",
"=",
"name",
"if",
"name",
"in",
"self",
".",
"_extras",
":",
"type",
",",
"data",
"=",
"self",
".",
"_extras",
"[",
"name",
"]",
"if",
"type",
"==",
"'Config'",
":",
"target_name",
"=",
"data",
"elif",
"type",
"==",
"'Configs'",
"or",
"type",
"==",
"'Trajectory'",
":",
"self",
".",
"beginRpc",
"(",
"strict",
"=",
"False",
")",
"for",
"subitem",
"in",
"data",
":",
"self",
".",
"_do_rpc",
"(",
"{",
"'type'",
":",
"'set_visible'",
",",
"'object'",
":",
"subitem",
",",
"'value'",
":",
"value",
"}",
")",
"self",
".",
"endRpc",
"(",
"strict",
"=",
"False",
")",
"return",
"self",
".",
"_do_rpc",
"(",
"{",
"'type'",
":",
"'set_visible'",
",",
"'object'",
":",
"target_name",
",",
"'value'",
":",
"value",
"}",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/vis/ipython/widgets.py#L325-L338 |
||
zufuliu/notepad2 | 680bb88661147936c7ae062da1dae4231486d3c1 | scintilla/scripts/FileGenerator.py | python | ReadFileAsList | (path) | Read all the lnes in the file and return as a list of strings without line ends. | Read all the lnes in the file and return as a list of strings without line ends. | [
"Read",
"all",
"the",
"lnes",
"in",
"the",
"file",
"and",
"return",
"as",
"a",
"list",
"of",
"strings",
"without",
"line",
"ends",
"."
] | def ReadFileAsList(path):
"""Read all the lnes in the file and return as a list of strings without line ends.
"""
with open(path, "r", encoding="utf-8") as f:
return [l.rstrip('\n') for l in f] | [
"def",
"ReadFileAsList",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"f",
":",
"return",
"[",
"l",
".",
"rstrip",
"(",
"'\\n'",
")",
"for",
"l",
"in",
"f",
"]"
] | https://github.com/zufuliu/notepad2/blob/680bb88661147936c7ae062da1dae4231486d3c1/scintilla/scripts/FileGenerator.py#L193-L197 |
||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py | python | xmlNode.docCopyNode | (self, doc, extended) | return __tmp | Do a copy of the node to a given document. | Do a copy of the node to a given document. | [
"Do",
"a",
"copy",
"of",
"the",
"node",
"to",
"a",
"given",
"document",
"."
] | def docCopyNode(self, doc, extended):
"""Do a copy of the node to a given document. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlDocCopyNode(self._o, doc__o, extended)
if ret is None:raise treeError('xmlDocCopyNode() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"docCopyNode",
"(",
"self",
",",
"doc",
",",
"extended",
")",
":",
"if",
"doc",
"is",
"None",
":",
"doc__o",
"=",
"None",
"else",
":",
"doc__o",
"=",
"doc",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlDocCopyNode",
"(",
"self",
".",
"_o",
",",
"doc__o",
",",
"extended",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlDocCopyNode() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2class.py#L2400-L2407 |
|
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/tools/jinja2/environment.py | python | Template.is_up_to_date | (self) | return self._uptodate() | If this variable is `False` there is a newer version available. | If this variable is `False` there is a newer version available. | [
"If",
"this",
"variable",
"is",
"False",
"there",
"is",
"a",
"newer",
"version",
"available",
"."
] | def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate() | [
"def",
"is_up_to_date",
"(",
"self",
")",
":",
"if",
"self",
".",
"_uptodate",
"is",
"None",
":",
"return",
"True",
"return",
"self",
".",
"_uptodate",
"(",
")"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/jinja2/environment.py#L1118-L1122 |
|
Illumina/strelka | d7377443b62319f7c7bd70c241c4b2df3459e29a | src/python/lib/configureOptions.py | python | ConfigureWorkflowOptions.addWorkflowGroupOptions | (self,group) | Add options to OptionsGroup object which specify
parameters which commonly change from run to run | Add options to OptionsGroup object which specify
parameters which commonly change from run to run | [
"Add",
"options",
"to",
"OptionsGroup",
"object",
"which",
"specify",
"parameters",
"which",
"commonly",
"change",
"from",
"run",
"to",
"run"
] | def addWorkflowGroupOptions(self,group) :
"""
Add options to OptionsGroup object which specify
parameters which commonly change from run to run
"""
pass | [
"def",
"addWorkflowGroupOptions",
"(",
"self",
",",
"group",
")",
":",
"pass"
] | https://github.com/Illumina/strelka/blob/d7377443b62319f7c7bd70c241c4b2df3459e29a/src/python/lib/configureOptions.py#L53-L58 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_windows.py | python | PreviewCanvas.SetPreview | (*args, **kwargs) | return _windows_.PreviewCanvas_SetPreview(*args, **kwargs) | SetPreview(self, wxPrintPreviewBase preview) | SetPreview(self, wxPrintPreviewBase preview) | [
"SetPreview",
"(",
"self",
"wxPrintPreviewBase",
"preview",
")"
] | def SetPreview(*args, **kwargs):
"""SetPreview(self, wxPrintPreviewBase preview)"""
return _windows_.PreviewCanvas_SetPreview(*args, **kwargs) | [
"def",
"SetPreview",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_windows_",
".",
"PreviewCanvas_SetPreview",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_windows.py#L5462-L5464 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/integrate/python/ops/odes.py | python | _dopri5 | (func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None) | Solve an ODE for `odeint` using method='dopri5'. | Solve an ODE for `odeint` using method='dopri5'. | [
"Solve",
"an",
"ODE",
"for",
"odeint",
"using",
"method",
"=",
"dopri5",
"."
] | def _dopri5(func,
y0,
t,
rtol,
atol,
full_output=False,
first_step=None,
safety=0.9,
ifactor=10.0,
dfactor=0.2,
max_num_steps=1000,
name=None):
"""Solve an ODE for `odeint` using method='dopri5'."""
if first_step is None:
# at some point, we might want to switch to picking the step size
# automatically
first_step = 1.0
with ops.name_scope(name, 'dopri5', [
y0, t, rtol, atol, safety, ifactor, dfactor, max_num_steps
]) as scope:
first_step = ops.convert_to_tensor(
first_step, dtype=t.dtype, name='first_step')
safety = ops.convert_to_tensor(safety, dtype=t.dtype, name='safety')
ifactor = ops.convert_to_tensor(ifactor, dtype=t.dtype, name='ifactor')
dfactor = ops.convert_to_tensor(dfactor, dtype=t.dtype, name='dfactor')
max_num_steps = ops.convert_to_tensor(
max_num_steps, dtype=dtypes.int32, name='max_num_steps')
def adaptive_runge_kutta_step(rk_state, history, n_steps):
"""Take an adaptive Runge-Kutta step to integrate the ODE."""
y0, f0, _, t0, dt, interp_coeff = rk_state
with ops.name_scope('assertions'):
check_underflow = control_flow_ops.Assert(t0 + dt > t0,
['underflow in dt', dt])
check_max_num_steps = control_flow_ops.Assert(
n_steps < max_num_steps, ['max_num_steps exceeded'])
check_numerics = control_flow_ops.Assert(
math_ops.reduce_all(math_ops.is_finite(abs(y0))),
['non-finite values in state `y`', y0])
with ops.control_dependencies(
[check_underflow, check_max_num_steps, check_numerics]):
y1, f1, y1_error, k = _runge_kutta_step(func, y0, f0, t0, dt)
with ops.name_scope('error_ratio'):
# We use the same approach as the dopri5 fortran code.
error_tol = atol + rtol * math_ops.maximum(abs(y0), abs(y1))
tensor_error_ratio = _abs_square(y1_error) / _abs_square(error_tol)
# Could also use reduce_maximum here.
error_ratio = math_ops.sqrt(math_ops.reduce_mean(tensor_error_ratio))
accept_step = error_ratio <= 1
with ops.name_scope('update/rk_state'):
# If we don't accept the step, the _RungeKuttaState will be useless
# (covering a time-interval of size 0), but that's OK, because in such
# cases we always immediately take another Runge-Kutta step.
y_next = control_flow_ops.cond(accept_step, lambda: y1, lambda: y0)
f_next = control_flow_ops.cond(accept_step, lambda: f1, lambda: f0)
t_next = control_flow_ops.cond(accept_step, lambda: t0 + dt, lambda: t0)
interp_coeff = control_flow_ops.cond(
accept_step, lambda: _interp_fit_rk(y0, y1, k, dt),
lambda: interp_coeff)
dt_next = _optimal_step_size(dt, error_ratio, safety, ifactor, dfactor)
rk_state = _RungeKuttaState(y_next, f_next, t0, t_next, dt_next,
interp_coeff)
with ops.name_scope('update/history'):
history = _History(
_ta_append(history.integrate_points, t0 + dt),
_ta_append(history.error_ratio, error_ratio))
return rk_state, history, n_steps + 1
def interpolate(solution, history, rk_state, i):
"""Interpolate through the next time point, integrating as necessary."""
with ops.name_scope('interpolate'):
rk_state, history, _ = control_flow_ops.while_loop(
lambda rk_state, *_: t[i] > rk_state.t1,
adaptive_runge_kutta_step, (rk_state, history, 0),
name='integrate_loop')
y = _interp_evaluate(rk_state.interp_coeff, rk_state.t0, rk_state.t1,
t[i])
solution = solution.write(i, y)
return solution, history, rk_state, i + 1
with _assert_increasing(t):
num_times = array_ops.size(t)
solution = tensor_array_ops.TensorArray(
y0.dtype, size=num_times).write(0, y0)
history = _History(
integrate_points=tensor_array_ops.TensorArray(
t.dtype, size=0, dynamic_size=True),
error_ratio=tensor_array_ops.TensorArray(
rtol.dtype, size=0, dynamic_size=True))
rk_state = _RungeKuttaState(
y0, func(y0, t[0]), t[0], t[0], first_step, interp_coeff=[y0] * 5)
solution, history, _, _ = control_flow_ops.while_loop(
lambda _, __, ___, i: i < num_times,
interpolate, (solution, history, rk_state, 1),
name='interpolate_loop')
y = solution.stack(name=scope)
y.set_shape(t.get_shape().concatenate(y0.get_shape()))
if not full_output:
return y
else:
integrate_points = history.integrate_points.stack()
info_dict = {
'num_func_evals': 6 * array_ops.size(integrate_points) + 1,
'integrate_points': integrate_points,
'error_ratio': history.error_ratio.stack()
}
return (y, info_dict) | [
"def",
"_dopri5",
"(",
"func",
",",
"y0",
",",
"t",
",",
"rtol",
",",
"atol",
",",
"full_output",
"=",
"False",
",",
"first_step",
"=",
"None",
",",
"safety",
"=",
"0.9",
",",
"ifactor",
"=",
"10.0",
",",
"dfactor",
"=",
"0.2",
",",
"max_num_steps",
"=",
"1000",
",",
"name",
"=",
"None",
")",
":",
"if",
"first_step",
"is",
"None",
":",
"# at some point, we might want to switch to picking the step size",
"# automatically",
"first_step",
"=",
"1.0",
"with",
"ops",
".",
"name_scope",
"(",
"name",
",",
"'dopri5'",
",",
"[",
"y0",
",",
"t",
",",
"rtol",
",",
"atol",
",",
"safety",
",",
"ifactor",
",",
"dfactor",
",",
"max_num_steps",
"]",
")",
"as",
"scope",
":",
"first_step",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"first_step",
",",
"dtype",
"=",
"t",
".",
"dtype",
",",
"name",
"=",
"'first_step'",
")",
"safety",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"safety",
",",
"dtype",
"=",
"t",
".",
"dtype",
",",
"name",
"=",
"'safety'",
")",
"ifactor",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"ifactor",
",",
"dtype",
"=",
"t",
".",
"dtype",
",",
"name",
"=",
"'ifactor'",
")",
"dfactor",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"dfactor",
",",
"dtype",
"=",
"t",
".",
"dtype",
",",
"name",
"=",
"'dfactor'",
")",
"max_num_steps",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"max_num_steps",
",",
"dtype",
"=",
"dtypes",
".",
"int32",
",",
"name",
"=",
"'max_num_steps'",
")",
"def",
"adaptive_runge_kutta_step",
"(",
"rk_state",
",",
"history",
",",
"n_steps",
")",
":",
"\"\"\"Take an adaptive Runge-Kutta step to integrate the ODE.\"\"\"",
"y0",
",",
"f0",
",",
"_",
",",
"t0",
",",
"dt",
",",
"interp_coeff",
"=",
"rk_state",
"with",
"ops",
".",
"name_scope",
"(",
"'assertions'",
")",
":",
"check_underflow",
"=",
"control_flow_ops",
".",
"Assert",
"(",
"t0",
"+",
"dt",
">",
"t0",
",",
"[",
"'underflow in dt'",
",",
"dt",
"]",
")",
"check_max_num_steps",
"=",
"control_flow_ops",
".",
"Assert",
"(",
"n_steps",
"<",
"max_num_steps",
",",
"[",
"'max_num_steps exceeded'",
"]",
")",
"check_numerics",
"=",
"control_flow_ops",
".",
"Assert",
"(",
"math_ops",
".",
"reduce_all",
"(",
"math_ops",
".",
"is_finite",
"(",
"abs",
"(",
"y0",
")",
")",
")",
",",
"[",
"'non-finite values in state `y`'",
",",
"y0",
"]",
")",
"with",
"ops",
".",
"control_dependencies",
"(",
"[",
"check_underflow",
",",
"check_max_num_steps",
",",
"check_numerics",
"]",
")",
":",
"y1",
",",
"f1",
",",
"y1_error",
",",
"k",
"=",
"_runge_kutta_step",
"(",
"func",
",",
"y0",
",",
"f0",
",",
"t0",
",",
"dt",
")",
"with",
"ops",
".",
"name_scope",
"(",
"'error_ratio'",
")",
":",
"# We use the same approach as the dopri5 fortran code.",
"error_tol",
"=",
"atol",
"+",
"rtol",
"*",
"math_ops",
".",
"maximum",
"(",
"abs",
"(",
"y0",
")",
",",
"abs",
"(",
"y1",
")",
")",
"tensor_error_ratio",
"=",
"_abs_square",
"(",
"y1_error",
")",
"/",
"_abs_square",
"(",
"error_tol",
")",
"# Could also use reduce_maximum here.",
"error_ratio",
"=",
"math_ops",
".",
"sqrt",
"(",
"math_ops",
".",
"reduce_mean",
"(",
"tensor_error_ratio",
")",
")",
"accept_step",
"=",
"error_ratio",
"<=",
"1",
"with",
"ops",
".",
"name_scope",
"(",
"'update/rk_state'",
")",
":",
"# If we don't accept the step, the _RungeKuttaState will be useless",
"# (covering a time-interval of size 0), but that's OK, because in such",
"# cases we always immediately take another Runge-Kutta step.",
"y_next",
"=",
"control_flow_ops",
".",
"cond",
"(",
"accept_step",
",",
"lambda",
":",
"y1",
",",
"lambda",
":",
"y0",
")",
"f_next",
"=",
"control_flow_ops",
".",
"cond",
"(",
"accept_step",
",",
"lambda",
":",
"f1",
",",
"lambda",
":",
"f0",
")",
"t_next",
"=",
"control_flow_ops",
".",
"cond",
"(",
"accept_step",
",",
"lambda",
":",
"t0",
"+",
"dt",
",",
"lambda",
":",
"t0",
")",
"interp_coeff",
"=",
"control_flow_ops",
".",
"cond",
"(",
"accept_step",
",",
"lambda",
":",
"_interp_fit_rk",
"(",
"y0",
",",
"y1",
",",
"k",
",",
"dt",
")",
",",
"lambda",
":",
"interp_coeff",
")",
"dt_next",
"=",
"_optimal_step_size",
"(",
"dt",
",",
"error_ratio",
",",
"safety",
",",
"ifactor",
",",
"dfactor",
")",
"rk_state",
"=",
"_RungeKuttaState",
"(",
"y_next",
",",
"f_next",
",",
"t0",
",",
"t_next",
",",
"dt_next",
",",
"interp_coeff",
")",
"with",
"ops",
".",
"name_scope",
"(",
"'update/history'",
")",
":",
"history",
"=",
"_History",
"(",
"_ta_append",
"(",
"history",
".",
"integrate_points",
",",
"t0",
"+",
"dt",
")",
",",
"_ta_append",
"(",
"history",
".",
"error_ratio",
",",
"error_ratio",
")",
")",
"return",
"rk_state",
",",
"history",
",",
"n_steps",
"+",
"1",
"def",
"interpolate",
"(",
"solution",
",",
"history",
",",
"rk_state",
",",
"i",
")",
":",
"\"\"\"Interpolate through the next time point, integrating as necessary.\"\"\"",
"with",
"ops",
".",
"name_scope",
"(",
"'interpolate'",
")",
":",
"rk_state",
",",
"history",
",",
"_",
"=",
"control_flow_ops",
".",
"while_loop",
"(",
"lambda",
"rk_state",
",",
"*",
"_",
":",
"t",
"[",
"i",
"]",
">",
"rk_state",
".",
"t1",
",",
"adaptive_runge_kutta_step",
",",
"(",
"rk_state",
",",
"history",
",",
"0",
")",
",",
"name",
"=",
"'integrate_loop'",
")",
"y",
"=",
"_interp_evaluate",
"(",
"rk_state",
".",
"interp_coeff",
",",
"rk_state",
".",
"t0",
",",
"rk_state",
".",
"t1",
",",
"t",
"[",
"i",
"]",
")",
"solution",
"=",
"solution",
".",
"write",
"(",
"i",
",",
"y",
")",
"return",
"solution",
",",
"history",
",",
"rk_state",
",",
"i",
"+",
"1",
"with",
"_assert_increasing",
"(",
"t",
")",
":",
"num_times",
"=",
"array_ops",
".",
"size",
"(",
"t",
")",
"solution",
"=",
"tensor_array_ops",
".",
"TensorArray",
"(",
"y0",
".",
"dtype",
",",
"size",
"=",
"num_times",
")",
".",
"write",
"(",
"0",
",",
"y0",
")",
"history",
"=",
"_History",
"(",
"integrate_points",
"=",
"tensor_array_ops",
".",
"TensorArray",
"(",
"t",
".",
"dtype",
",",
"size",
"=",
"0",
",",
"dynamic_size",
"=",
"True",
")",
",",
"error_ratio",
"=",
"tensor_array_ops",
".",
"TensorArray",
"(",
"rtol",
".",
"dtype",
",",
"size",
"=",
"0",
",",
"dynamic_size",
"=",
"True",
")",
")",
"rk_state",
"=",
"_RungeKuttaState",
"(",
"y0",
",",
"func",
"(",
"y0",
",",
"t",
"[",
"0",
"]",
")",
",",
"t",
"[",
"0",
"]",
",",
"t",
"[",
"0",
"]",
",",
"first_step",
",",
"interp_coeff",
"=",
"[",
"y0",
"]",
"*",
"5",
")",
"solution",
",",
"history",
",",
"_",
",",
"_",
"=",
"control_flow_ops",
".",
"while_loop",
"(",
"lambda",
"_",
",",
"__",
",",
"___",
",",
"i",
":",
"i",
"<",
"num_times",
",",
"interpolate",
",",
"(",
"solution",
",",
"history",
",",
"rk_state",
",",
"1",
")",
",",
"name",
"=",
"'interpolate_loop'",
")",
"y",
"=",
"solution",
".",
"stack",
"(",
"name",
"=",
"scope",
")",
"y",
".",
"set_shape",
"(",
"t",
".",
"get_shape",
"(",
")",
".",
"concatenate",
"(",
"y0",
".",
"get_shape",
"(",
")",
")",
")",
"if",
"not",
"full_output",
":",
"return",
"y",
"else",
":",
"integrate_points",
"=",
"history",
".",
"integrate_points",
".",
"stack",
"(",
")",
"info_dict",
"=",
"{",
"'num_func_evals'",
":",
"6",
"*",
"array_ops",
".",
"size",
"(",
"integrate_points",
")",
"+",
"1",
",",
"'integrate_points'",
":",
"integrate_points",
",",
"'error_ratio'",
":",
"history",
".",
"error_ratio",
".",
"stack",
"(",
")",
"}",
"return",
"(",
"y",
",",
"info_dict",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/integrate/python/ops/odes.py#L303-L418 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/optparse.py | python | Values._update_careful | (self, dict) | Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored. | Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored. | [
"Update",
"the",
"option",
"values",
"from",
"an",
"arbitrary",
"dictionary",
"but",
"only",
"use",
"keys",
"from",
"dict",
"that",
"already",
"have",
"a",
"corresponding",
"attribute",
"in",
"self",
".",
"Any",
"keys",
"in",
"dict",
"without",
"a",
"corresponding",
"attribute",
"are",
"silently",
"ignored",
"."
] | def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval) | [
"def",
"_update_careful",
"(",
"self",
",",
"dict",
")",
":",
"for",
"attr",
"in",
"dir",
"(",
"self",
")",
":",
"if",
"attr",
"in",
"dict",
":",
"dval",
"=",
"dict",
"[",
"attr",
"]",
"if",
"dval",
"is",
"not",
"None",
":",
"setattr",
"(",
"self",
",",
"attr",
",",
"dval",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/optparse.py#L855-L866 |
||
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/py_vulcanize/third_party/rcssmin/_setup/py3/shell.py | python | frompath | (executable) | return None | Find executable in PATH | Find executable in PATH | [
"Find",
"executable",
"in",
"PATH"
] | def frompath(executable):
""" Find executable in PATH """
# Based on distutils.spawn.find_executable.
path = _os.environ.get('PATH', '')
paths = [
_os.path.expanduser(item)
for item in path.split(_os.pathsep)
]
ext = _os.path.splitext(executable)[1]
exts = ['']
if _sys.platform == 'win32' or _os.name == 'os2':
eext = ['.exe', '.bat', '.py']
if ext not in eext:
exts.extend(eext)
for ext in exts:
if not _os.path.isfile(executable + ext):
for path in paths:
fname = _os.path.join(path, executable + ext)
if _os.path.isfile(fname):
# the file exists, we have a shot at spawn working
return fname
else:
return executable + ext
return None | [
"def",
"frompath",
"(",
"executable",
")",
":",
"# Based on distutils.spawn.find_executable.",
"path",
"=",
"_os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
"paths",
"=",
"[",
"_os",
".",
"path",
".",
"expanduser",
"(",
"item",
")",
"for",
"item",
"in",
"path",
".",
"split",
"(",
"_os",
".",
"pathsep",
")",
"]",
"ext",
"=",
"_os",
".",
"path",
".",
"splitext",
"(",
"executable",
")",
"[",
"1",
"]",
"exts",
"=",
"[",
"''",
"]",
"if",
"_sys",
".",
"platform",
"==",
"'win32'",
"or",
"_os",
".",
"name",
"==",
"'os2'",
":",
"eext",
"=",
"[",
"'.exe'",
",",
"'.bat'",
",",
"'.py'",
"]",
"if",
"ext",
"not",
"in",
"eext",
":",
"exts",
".",
"extend",
"(",
"eext",
")",
"for",
"ext",
"in",
"exts",
":",
"if",
"not",
"_os",
".",
"path",
".",
"isfile",
"(",
"executable",
"+",
"ext",
")",
":",
"for",
"path",
"in",
"paths",
":",
"fname",
"=",
"_os",
".",
"path",
".",
"join",
"(",
"path",
",",
"executable",
"+",
"ext",
")",
"if",
"_os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
":",
"# the file exists, we have a shot at spawn working",
"return",
"fname",
"else",
":",
"return",
"executable",
"+",
"ext",
"return",
"None"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/py_vulcanize/third_party/rcssmin/_setup/py3/shell.py#L326-L351 |
|
NASA-SW-VnV/ikos | 71325dfb94737332542caa708d7537752021522d | analyzer/python/ikos/analyzer.py | python | clang_ikos_flags | () | return [
# enable clang warnings
'-Wall',
# disable source code fortification
'-U_FORTIFY_SOURCE',
'-D_FORTIFY_SOURCE=0',
# flag for intrinsic.h
'-D__IKOS__',
# compile in debug mode
'-g',
# disable optimizations
'-O0',
# disable the 'optnone' attribute
# see https://bugs.llvm.org/show_bug.cgi?id=35950#c10
'-Xclang',
'-disable-O0-optnone',
] | Clang flags for ikos | Clang flags for ikos | [
"Clang",
"flags",
"for",
"ikos"
] | def clang_ikos_flags():
''' Clang flags for ikos '''
return [
# enable clang warnings
'-Wall',
# disable source code fortification
'-U_FORTIFY_SOURCE',
'-D_FORTIFY_SOURCE=0',
# flag for intrinsic.h
'-D__IKOS__',
# compile in debug mode
'-g',
# disable optimizations
'-O0',
# disable the 'optnone' attribute
# see https://bugs.llvm.org/show_bug.cgi?id=35950#c10
'-Xclang',
'-disable-O0-optnone',
] | [
"def",
"clang_ikos_flags",
"(",
")",
":",
"return",
"[",
"# enable clang warnings",
"'-Wall'",
",",
"# disable source code fortification",
"'-U_FORTIFY_SOURCE'",
",",
"'-D_FORTIFY_SOURCE=0'",
",",
"# flag for intrinsic.h",
"'-D__IKOS__'",
",",
"# compile in debug mode",
"'-g'",
",",
"# disable optimizations",
"'-O0'",
",",
"# disable the 'optnone' attribute",
"# see https://bugs.llvm.org/show_bug.cgi?id=35950#c10",
"'-Xclang'",
",",
"'-disable-O0-optnone'",
",",
"]"
] | https://github.com/NASA-SW-VnV/ikos/blob/71325dfb94737332542caa708d7537752021522d/analyzer/python/ikos/analyzer.py#L649-L667 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/distributed/elastic/utils/logging.py | python | get_logger | (name: Optional[str] = None) | return _setup_logger(name or _derive_module_name(depth=2)) | Util function to set up a simple logger that writes
into stderr. The loglevel is fetched from the LOGLEVEL
env. variable or WARNING as default. The function will use the
module name of the caller if no name is provided.
Args:
name: Name of the logger. If no name provided, the name will
be derived from the call stack. | Util function to set up a simple logger that writes
into stderr. The loglevel is fetched from the LOGLEVEL
env. variable or WARNING as default. The function will use the
module name of the caller if no name is provided. | [
"Util",
"function",
"to",
"set",
"up",
"a",
"simple",
"logger",
"that",
"writes",
"into",
"stderr",
".",
"The",
"loglevel",
"is",
"fetched",
"from",
"the",
"LOGLEVEL",
"env",
".",
"variable",
"or",
"WARNING",
"as",
"default",
".",
"The",
"function",
"will",
"use",
"the",
"module",
"name",
"of",
"the",
"caller",
"if",
"no",
"name",
"is",
"provided",
"."
] | def get_logger(name: Optional[str] = None):
"""
Util function to set up a simple logger that writes
into stderr. The loglevel is fetched from the LOGLEVEL
env. variable or WARNING as default. The function will use the
module name of the caller if no name is provided.
Args:
name: Name of the logger. If no name provided, the name will
be derived from the call stack.
"""
# Derive the name of the caller, if none provided
# Use depth=2 since this function takes up one level in the call stack
return _setup_logger(name or _derive_module_name(depth=2)) | [
"def",
"get_logger",
"(",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
":",
"# Derive the name of the caller, if none provided",
"# Use depth=2 since this function takes up one level in the call stack",
"return",
"_setup_logger",
"(",
"name",
"or",
"_derive_module_name",
"(",
"depth",
"=",
"2",
")",
")"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/elastic/utils/logging.py#L18-L32 |
|
lyxok1/Tiny-DSOD | 94d15450699bea0dd3720e75e2d273e476174fba | scripts/cpp_lint.py | python | IsCppString | (line) | return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 | Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant. | Does line terminate so, that the next symbol is in string constant. | [
"Does",
"line",
"terminate",
"so",
"that",
"the",
"next",
"symbol",
"is",
"in",
"string",
"constant",
"."
] | def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 | [
"def",
"IsCppString",
"(",
"line",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"r'\\\\'",
",",
"'XX'",
")",
"# after this, \\\\\" does not match to \\\"",
"return",
"(",
"(",
"line",
".",
"count",
"(",
"'\"'",
")",
"-",
"line",
".",
"count",
"(",
"r'\\\"'",
")",
"-",
"line",
".",
"count",
"(",
"\"'\\\"'\"",
")",
")",
"&",
"1",
")",
"==",
"1"
] | https://github.com/lyxok1/Tiny-DSOD/blob/94d15450699bea0dd3720e75e2d273e476174fba/scripts/cpp_lint.py#L1045-L1059 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/_vendor/six.py | python | _SixMetaPathImporter.is_package | (self, fullname) | return hasattr(self.__get_module(fullname), "__path__") | Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451) | Return true, if the named module is a package. | [
"Return",
"true",
"if",
"the",
"named",
"module",
"is",
"a",
"package",
"."
] | def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__") | [
"def",
"is_package",
"(",
"self",
",",
"fullname",
")",
":",
"return",
"hasattr",
"(",
"self",
".",
"__get_module",
"(",
"fullname",
")",
",",
"\"__path__\"",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/_vendor/six.py#L209-L216 |
|
apache/incubator-weex | 5c25f0b59f7ac90703c363e7261f60bd06356dbe | weex_core/tools/cpplint.py | python | PrintCategories | () | Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter. | Prints a list of all the error-categories used by error messages. | [
"Prints",
"a",
"list",
"of",
"all",
"the",
"error",
"-",
"categories",
"used",
"by",
"error",
"messages",
"."
] | def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0) | [
"def",
"PrintCategories",
"(",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"''",
".",
"join",
"(",
"' %s\\n'",
"%",
"cat",
"for",
"cat",
"in",
"_ERROR_CATEGORIES",
")",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | https://github.com/apache/incubator-weex/blob/5c25f0b59f7ac90703c363e7261f60bd06356dbe/weex_core/tools/cpplint.py#L6136-L6142 |
||
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/debug/lib/debug_data.py | python | DebugDumpDir.core_metadata | (self) | return output[0] if len(output) == 1 else output | Metadata about the `Session.run()` call from the core runtime.
Of the three counters available in the return value, `global_step` is
supplied by the caller of the debugged `Session.run()`, while
`session_run_index` and `executor_step_index` are determined by the state
of the core runtime, automatically. For the same fetch list, feed keys and
debug tensor watch options, the same executor will be used and
`executor_step_index` should increase by one at a time. However, runs with
different fetch lists, feed keys and debug_tensor watch options that all
share the same `Session` object can lead to gaps in `session_run_index`.
Returns:
If core metadata are loaded, a `namedtuple` with the fields:
`global_step`: A global step count supplied by the caller of
`Session.run()`. It is optional to the caller. If the caller did not
supply this parameter, its value will be -1.
`session_run_index`: A sorted index for Run() calls to the underlying
TensorFlow `Session` object.
`executor_step_index`: A counter for invocations of a given runtime
executor. The same executor is re-used for the same fetched tensors,
target nodes, input feed keys and debug tensor watch options.
`input_names`: Names of the input (feed) Tensors.
`output_names`: Names of the output (fetched) Tensors.
`target_nodes`: Names of the target nodes.
If the core metadata have not been loaded, `None`.
If more than one core metadata files exist, return a list of the
`nametuple` described above. | Metadata about the `Session.run()` call from the core runtime. | [
"Metadata",
"about",
"the",
"Session",
".",
"run",
"()",
"call",
"from",
"the",
"core",
"runtime",
"."
] | def core_metadata(self):
"""Metadata about the `Session.run()` call from the core runtime.
Of the three counters available in the return value, `global_step` is
supplied by the caller of the debugged `Session.run()`, while
`session_run_index` and `executor_step_index` are determined by the state
of the core runtime, automatically. For the same fetch list, feed keys and
debug tensor watch options, the same executor will be used and
`executor_step_index` should increase by one at a time. However, runs with
different fetch lists, feed keys and debug_tensor watch options that all
share the same `Session` object can lead to gaps in `session_run_index`.
Returns:
If core metadata are loaded, a `namedtuple` with the fields:
`global_step`: A global step count supplied by the caller of
`Session.run()`. It is optional to the caller. If the caller did not
supply this parameter, its value will be -1.
`session_run_index`: A sorted index for Run() calls to the underlying
TensorFlow `Session` object.
`executor_step_index`: A counter for invocations of a given runtime
executor. The same executor is re-used for the same fetched tensors,
target nodes, input feed keys and debug tensor watch options.
`input_names`: Names of the input (feed) Tensors.
`output_names`: Names of the output (fetched) Tensors.
`target_nodes`: Names of the target nodes.
If the core metadata have not been loaded, `None`.
If more than one core metadata files exist, return a list of the
`nametuple` described above.
"""
output = self._core_metadata
return output[0] if len(output) == 1 else output | [
"def",
"core_metadata",
"(",
"self",
")",
":",
"output",
"=",
"self",
".",
"_core_metadata",
"return",
"output",
"[",
"0",
"]",
"if",
"len",
"(",
"output",
")",
"==",
"1",
"else",
"output"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/debug/lib/debug_data.py#L687-L718 |
|
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/ndarray/numpy/_op.py | python | polyval | (p, x) | Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.]) | Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned. | [
"Evaluate",
"a",
"polynomial",
"at",
"specific",
"values",
".",
"If",
"p",
"is",
"of",
"length",
"N",
"this",
"function",
"returns",
"the",
"value",
":",
"p",
"[",
"0",
"]",
"*",
"x",
"**",
"(",
"N",
"-",
"1",
")",
"+",
"p",
"[",
"1",
"]",
"*",
"x",
"**",
"(",
"N",
"-",
"2",
")",
"+",
"...",
"+",
"p",
"[",
"N",
"-",
"2",
"]",
"*",
"x",
"+",
"p",
"[",
"N",
"-",
"1",
"]",
"If",
"x",
"is",
"a",
"sequence",
"then",
"p",
"(",
"x",
")",
"is",
"returned",
"for",
"each",
"element",
"of",
"x",
".",
"If",
"x",
"is",
"another",
"polynomial",
"then",
"the",
"composite",
"polynomial",
"p",
"(",
"x",
"(",
"t",
"))",
"is",
"returned",
"."
] | def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
from ...numpy import ndarray
if isinstance(p, numeric_types) and isinstance(x, numeric_types):
return _np.polyval(p, x)
elif isinstance(p, ndarray) and isinstance(x, ndarray):
return _api_internal.polyval(p, x)
else:
raise TypeError('type not supported') | [
"def",
"polyval",
"(",
"p",
",",
"x",
")",
":",
"from",
".",
".",
".",
"numpy",
"import",
"ndarray",
"if",
"isinstance",
"(",
"p",
",",
"numeric_types",
")",
"and",
"isinstance",
"(",
"x",
",",
"numeric_types",
")",
":",
"return",
"_np",
".",
"polyval",
"(",
"p",
",",
"x",
")",
"elif",
"isinstance",
"(",
"p",
",",
"ndarray",
")",
"and",
"isinstance",
"(",
"x",
",",
"ndarray",
")",
":",
"return",
"_api_internal",
".",
"polyval",
"(",
"p",
",",
"x",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'type not supported'",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/ndarray/numpy/_op.py#L9286-L9334 |
||
ros/geometry2 | c0cb44e5315abc6067d7640cf58487e61d8d680a | tf2_ros/src/tf2_ros/buffer_interface.py | python | BufferInterface.can_transform | (self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)) | Check if a transform from the source frame to the target frame is possible.
Must be implemented by a subclass of BufferInterface.
:param target_frame: Name of the frame to transform into.
:param source_frame: Name of the input frame.
:param time: The time at which to get the transform. (0 will get the latest)
:param timeout: (Optional) Time to wait for the target frame to become available.
:return: True if the transform is possible, false otherwise.
:rtype: bool | Check if a transform from the source frame to the target frame is possible. | [
"Check",
"if",
"a",
"transform",
"from",
"the",
"source",
"frame",
"to",
"the",
"target",
"frame",
"is",
"possible",
"."
] | def can_transform(self, target_frame, source_frame, time, timeout=rospy.Duration(0.0)):
"""
Check if a transform from the source frame to the target frame is possible.
Must be implemented by a subclass of BufferInterface.
:param target_frame: Name of the frame to transform into.
:param source_frame: Name of the input frame.
:param time: The time at which to get the transform. (0 will get the latest)
:param timeout: (Optional) Time to wait for the target frame to become available.
:return: True if the transform is possible, false otherwise.
:rtype: bool
"""
raise NotImplementedException() | [
"def",
"can_transform",
"(",
"self",
",",
"target_frame",
",",
"source_frame",
",",
"time",
",",
"timeout",
"=",
"rospy",
".",
"Duration",
"(",
"0.0",
")",
")",
":",
"raise",
"NotImplementedException",
"(",
")"
] | https://github.com/ros/geometry2/blob/c0cb44e5315abc6067d7640cf58487e61d8d680a/tf2_ros/src/tf2_ros/buffer_interface.py#L135-L148 |
||
apache/qpid-proton | 6bcdfebb55ea3554bc29b1901422532db331a591 | python/proton/_transport.py | python | Transport.connection | (self) | return _endpoints.Connection.wrap(pn_transport_connection(self._impl)) | The connection bound to this transport. | The connection bound to this transport. | [
"The",
"connection",
"bound",
"to",
"this",
"transport",
"."
] | def connection(self) -> 'Connection':
"""The connection bound to this transport."""
from . import _endpoints
return _endpoints.Connection.wrap(pn_transport_connection(self._impl)) | [
"def",
"connection",
"(",
"self",
")",
"->",
"'Connection'",
":",
"from",
".",
"import",
"_endpoints",
"return",
"_endpoints",
".",
"Connection",
".",
"wrap",
"(",
"pn_transport_connection",
"(",
"self",
".",
"_impl",
")",
")"
] | https://github.com/apache/qpid-proton/blob/6bcdfebb55ea3554bc29b1901422532db331a591/python/proton/_transport.py#L501-L504 |
|
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/numpy/multiarray.py | python | dot | (a, b, out=None) | return _mx_nd_np.dot(a, b, out=out) | Dot product of two arrays. Specifically,
* If both `a` and `b` are 1-D arrays, it is inner product of vectors
* If both `a` and `b` are 2-D arrays, it is matrix multiplication,
* If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``np.multiply(a, b)`` or ``a * b`` is preferred.
* If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
* If `a` is an N-D array and `b` is a 2-D array, it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])
Parameters
----------
a : ndarray
First argument.
b : ndarray
Second argument.
out : ndarray, optional
Output argument. It must have the same shape and type as the expected output.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned
Examples
--------
>>> a = np.array(3)
>>> b = np.array(4)
>>> np.dot(a, b)
array(12.)
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0], [0, 1]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.dot(a, b)
array([[4., 1.],
[2., 2.]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(5*6)[::-1].reshape((6,5))
>>> np.dot(a, b)[2,3,2,2]
array(29884.)
>>> np.sum(a[2,3,2,:] * b[:,2])
array(29884.) | Dot product of two arrays. Specifically, | [
"Dot",
"product",
"of",
"two",
"arrays",
".",
"Specifically"
] | def dot(a, b, out=None):
"""
Dot product of two arrays. Specifically,
* If both `a` and `b` are 1-D arrays, it is inner product of vectors
* If both `a` and `b` are 2-D arrays, it is matrix multiplication,
* If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``np.multiply(a, b)`` or ``a * b`` is preferred.
* If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
* If `a` is an N-D array and `b` is a 2-D array, it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])
Parameters
----------
a : ndarray
First argument.
b : ndarray
Second argument.
out : ndarray, optional
Output argument. It must have the same shape and type as the expected output.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned
Examples
--------
>>> a = np.array(3)
>>> b = np.array(4)
>>> np.dot(a, b)
array(12.)
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0], [0, 1]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.dot(a, b)
array([[4., 1.],
[2., 2.]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(5*6)[::-1].reshape((6,5))
>>> np.dot(a, b)[2,3,2,2]
array(29884.)
>>> np.sum(a[2,3,2,:] * b[:,2])
array(29884.)
"""
return _mx_nd_np.dot(a, b, out=out) | [
"def",
"dot",
"(",
"a",
",",
"b",
",",
"out",
"=",
"None",
")",
":",
"return",
"_mx_nd_np",
".",
"dot",
"(",
"a",
",",
"b",
",",
"out",
"=",
"out",
")"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/numpy/multiarray.py#L12712-L12771 |
|
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/directtools/DirectLights.py | python | DirectLights.allOn | (self) | Turn on all DIRECT lights | Turn on all DIRECT lights | [
"Turn",
"on",
"all",
"DIRECT",
"lights"
] | def allOn(self):
"""
Turn on all DIRECT lights
"""
for light in self.lightDict.values():
self.setOn(light)
# Make sure there is a default material
render.setMaterial(Material()) | [
"def",
"allOn",
"(",
"self",
")",
":",
"for",
"light",
"in",
"self",
".",
"lightDict",
".",
"values",
"(",
")",
":",
"self",
".",
"setOn",
"(",
"light",
")",
"# Make sure there is a default material",
"render",
".",
"setMaterial",
"(",
"Material",
"(",
")",
")"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/directtools/DirectLights.py#L99-L106 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/multiprocessing/pool.py | python | Pool._maintain_pool | (self) | Clean up any exited workers and start replacements for them. | Clean up any exited workers and start replacements for them. | [
"Clean",
"up",
"any",
"exited",
"workers",
"and",
"start",
"replacements",
"for",
"them",
"."
] | def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool() | [
"def",
"_maintain_pool",
"(",
"self",
")",
":",
"if",
"self",
".",
"_join_exited_workers",
"(",
")",
":",
"self",
".",
"_repopulate_pool",
"(",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/multiprocessing/pool.py#L225-L229 |
||
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/pkg_resources/_vendor/pyparsing.py | python | ParseResults.asList | ( self ) | return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] | Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] | Returns the parse results as a nested list of matching tokens, all converted to strings. | [
"Returns",
"the",
"parse",
"results",
"as",
"a",
"nested",
"list",
"of",
"matching",
"tokens",
"all",
"converted",
"to",
"strings",
"."
] | def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] | [
"def",
"asList",
"(",
"self",
")",
":",
"return",
"[",
"res",
".",
"asList",
"(",
")",
"if",
"isinstance",
"(",
"res",
",",
"ParseResults",
")",
"else",
"res",
"for",
"res",
"in",
"self",
".",
"__toklist",
"]"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pkg_resources/_vendor/pyparsing.py#L704-L718 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.