nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/keras/layers/merge.py | python | maximum | (inputs, **kwargs) | return Maximum(**kwargs)(inputs) | Functional interface to compute maximum (element-wise) list of `inputs`.
This is equivalent to the `tf.keras.layers.Maximum` layer.
For example:
```python
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)
out = tf.keras.layers.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Args:
inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape. | Functional interface to compute maximum (element-wise) list of `inputs`. | [
"Functional",
"interface",
"to",
"compute",
"maximum",
"(",
"element",
"-",
"wise",
")",
"list",
"of",
"inputs",
"."
] | def maximum(inputs, **kwargs):
"""Functional interface to compute maximum (element-wise) list of `inputs`.
This is equivalent to the `tf.keras.layers.Maximum` layer.
For example:
```python
input1 = tf.keras.layers.Input(shape=(16,))
x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)
input2 = tf.keras.layers.Input(shape=(32,))
x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)
max_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)
out = tf.keras.layers.Dense(4)(max_inp)
model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
```
Args:
inputs: A list of input tensors (at least 2) of same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor (of same shape as input tensor) with the element-wise
maximum of the inputs.
Raises:
ValueError: If input tensors are of different shape.
"""
return Maximum(**kwargs)(inputs) | [
"def",
"maximum",
"(",
"inputs",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Maximum",
"(",
"*",
"*",
"kwargs",
")",
"(",
"inputs",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/layers/merge.py#L868-L896 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/utils/packaging.py | python | get_requires_python | (dist) | return requires_python | Return the "Requires-Python" metadata for a distribution, or None
if not present. | Return the "Requires-Python" metadata for a distribution, or None
if not present. | [
"Return",
"the",
"Requires",
"-",
"Python",
"metadata",
"for",
"a",
"distribution",
"or",
"None",
"if",
"not",
"present",
"."
] | def get_requires_python(dist):
# type: (pkg_resources.Distribution) -> Optional[str]
"""
Return the "Requires-Python" metadata for a distribution, or None
if not present.
"""
pkg_info_dict = get_metadata(dist)
requires_python = pkg_info_dict.get('Requires-Python')
if requires_python is not None:
# Convert to a str to satisfy the type checker, since requires_python
# can be a Header object.
requires_python = str(requires_python)
return requires_python | [
"def",
"get_requires_python",
"(",
"dist",
")",
":",
"# type: (pkg_resources.Distribution) -> Optional[str]",
"pkg_info_dict",
"=",
"get_metadata",
"(",
"dist",
")",
"requires_python",
"=",
"pkg_info_dict",
".",
"get",
"(",
"'Requires-Python'",
")",
"if",
"requires_python",
"is",
"not",
"None",
":",
"# Convert to a str to satisfy the type checker, since requires_python",
"# can be a Header object.",
"requires_python",
"=",
"str",
"(",
"requires_python",
")",
"return",
"requires_python"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/utils/packaging.py#L70-L84 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/pydoc.py | python | HTMLDoc.bigsection | (self, title, *args) | return self.section(title, *args) | Format a section with a big heading. | Format a section with a big heading. | [
"Format",
"a",
"section",
"with",
"a",
"big",
"heading",
"."
] | def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args) | [
"def",
"bigsection",
"(",
"self",
",",
"title",
",",
"*",
"args",
")",
":",
"title",
"=",
"'<big><strong>%s</strong></big>'",
"%",
"title",
"return",
"self",
".",
"section",
"(",
"title",
",",
"*",
"args",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/pydoc.py#L467-L470 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/training/input.py | python | _store_sparse_tensors_join | (tensor_list_list, enqueue_many, keep_input) | return (stored_list_list, sparse_info_list) | Store SparseTensors for feeding into batch_join, etc. | Store SparseTensors for feeding into batch_join, etc. | [
"Store",
"SparseTensors",
"for",
"feeding",
"into",
"batch_join",
"etc",
"."
] | def _store_sparse_tensors_join(tensor_list_list, enqueue_many, keep_input):
"""Store SparseTensors for feeding into batch_join, etc."""
(s0, sparse_info_list) = _store_sparse_tensors(
tensor_list_list[0], enqueue_many, keep_input)
stored_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
s, sparse_info_candidate = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input,
[st.map_op for st in sparse_info_list])
if sparse_info_list != sparse_info_candidate:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
sparse_info_list = [
info.merge_with(candidate)
for (info, candidate) in zip(sparse_info_list, sparse_info_candidate)]
stored_list_list.append(s)
return (stored_list_list, sparse_info_list) | [
"def",
"_store_sparse_tensors_join",
"(",
"tensor_list_list",
",",
"enqueue_many",
",",
"keep_input",
")",
":",
"(",
"s0",
",",
"sparse_info_list",
")",
"=",
"_store_sparse_tensors",
"(",
"tensor_list_list",
"[",
"0",
"]",
",",
"enqueue_many",
",",
"keep_input",
")",
"stored_list_list",
"=",
"[",
"s0",
"]",
"for",
"tensor_list",
"in",
"tensor_list_list",
"[",
"1",
":",
"]",
":",
"s",
",",
"sparse_info_candidate",
"=",
"_store_sparse_tensors",
"(",
"tensor_list",
",",
"enqueue_many",
",",
"keep_input",
",",
"[",
"st",
".",
"map_op",
"for",
"st",
"in",
"sparse_info_list",
"]",
")",
"if",
"sparse_info_list",
"!=",
"sparse_info_candidate",
":",
"raise",
"ValueError",
"(",
"\"Inconsistent SparseTensors list: %s vs. %s\"",
"%",
"(",
"tensor_list_list",
"[",
"0",
"]",
",",
"tensor_list",
")",
")",
"sparse_info_list",
"=",
"[",
"info",
".",
"merge_with",
"(",
"candidate",
")",
"for",
"(",
"info",
",",
"candidate",
")",
"in",
"zip",
"(",
"sparse_info_list",
",",
"sparse_info_candidate",
")",
"]",
"stored_list_list",
".",
"append",
"(",
"s",
")",
"return",
"(",
"stored_list_list",
",",
"sparse_info_list",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/training/input.py#L575-L592 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | deps/src/libxml2-2.9.1/python/libxml2.py | python | xmlDoc.htmlSaveFile | (self, filename) | return ret | Dump an HTML document to a file. If @filename is "-" the
stdout file is used. | Dump an HTML document to a file. If | [
"Dump",
"an",
"HTML",
"document",
"to",
"a",
"file",
".",
"If"
] | def htmlSaveFile(self, filename):
"""Dump an HTML document to a file. If @filename is "-" the
stdout file is used. """
ret = libxml2mod.htmlSaveFile(filename, self._o)
return ret | [
"def",
"htmlSaveFile",
"(",
"self",
",",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlSaveFile",
"(",
"filename",
",",
"self",
".",
"_o",
")",
"return",
"ret"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L4047-L4051 |
|
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Editor/Python/windows/Lib/site-packages/pkg_resources/__init__.py | python | run_script | (dist_spec, script_name) | Locate distribution `dist_spec` and run its `script_name` script | Locate distribution `dist_spec` and run its `script_name` script | [
"Locate",
"distribution",
"dist_spec",
"and",
"run",
"its",
"script_name",
"script"
] | def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns) | [
"def",
"run_script",
"(",
"dist_spec",
",",
"script_name",
")",
":",
"ns",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_globals",
"name",
"=",
"ns",
"[",
"'__name__'",
"]",
"ns",
".",
"clear",
"(",
")",
"ns",
"[",
"'__name__'",
"]",
"=",
"name",
"require",
"(",
"dist_spec",
")",
"[",
"0",
"]",
".",
"run_script",
"(",
"script_name",
",",
"ns",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pkg_resources/__init__.py#L463-L469 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/contrib/slim/python/slim/evaluation.py | python | evaluation_loop | (master,
checkpoint_dir,
logdir,
num_evals=1,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=_USE_DEFAULT,
summary_op_feed_dict=None,
variables_to_restore=None,
eval_interval_secs=60,
max_number_of_evaluations=None,
session_config=None) | Runs TF-Slim's Evaluation Loop.
Args:
master: The BNS address of the TensorFlow master.
checkpoint_dir: The directory where checkpoints are stored.
logdir: The directory where the TensorFlow summaries are written to.
num_evals: The number of times to run `eval_op`.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
default the summary_op is set to tf.merge_all_summaries().
summary_op_feed_dict: An optional feed dictionary to use when running the
`summary_op`.
variables_to_restore: A list of TensorFlow variables to restore during
evaluation. If the argument is left as `None` then
slim.variables.GetVariablesToRestore() is used.
eval_interval_secs: The minimum number of seconds between evaluations.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as 'None', the evaluation continues indefinitely.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used. | Runs TF-Slim's Evaluation Loop. | [
"Runs",
"TF",
"-",
"Slim",
"s",
"Evaluation",
"Loop",
"."
] | def evaluation_loop(master,
checkpoint_dir,
logdir,
num_evals=1,
eval_op=None,
eval_op_feed_dict=None,
final_op=None,
final_op_feed_dict=None,
summary_op=_USE_DEFAULT,
summary_op_feed_dict=None,
variables_to_restore=None,
eval_interval_secs=60,
max_number_of_evaluations=None,
session_config=None):
"""Runs TF-Slim's Evaluation Loop.
Args:
master: The BNS address of the TensorFlow master.
checkpoint_dir: The directory where checkpoints are stored.
logdir: The directory where the TensorFlow summaries are written to.
num_evals: The number of times to run `eval_op`.
eval_op: A operation run `num_evals` times.
eval_op_feed_dict: The feed dictionary to use when executing the `eval_op`.
final_op: An operation to execute after all of the `eval_op` executions. The
value of `final_op` is returned.
final_op_feed_dict: A feed dictionary to use when executing `final_op`.
summary_op: The summary_op to evaluate after running TF-Slims metric ops. By
default the summary_op is set to tf.merge_all_summaries().
summary_op_feed_dict: An optional feed dictionary to use when running the
`summary_op`.
variables_to_restore: A list of TensorFlow variables to restore during
evaluation. If the argument is left as `None` then
slim.variables.GetVariablesToRestore() is used.
eval_interval_secs: The minimum number of seconds between evaluations.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as 'None', the evaluation continues indefinitely.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
"""
if summary_op == _USE_DEFAULT:
summary_op = logging_ops.merge_all_summaries()
global_step = variables.get_or_create_global_step()
init_op = control_flow_ops.group(tf_variables.initialize_all_variables(),
tf_variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables())
saver = tf_saver.Saver(variables_to_restore or
variables.get_variables_to_restore())
summary_writer = summary_io.SummaryWriter(logdir)
sv = supervisor.Supervisor(graph=ops.get_default_graph(),
logdir=logdir,
init_op=init_op,
summary_op=None,
summary_writer=None,
global_step=None,
saver=saver)
last_checkpoint = None
number_of_evaluations = 0
while True:
last_checkpoint = wait_for_new_checkpoint(checkpoint_dir, last_checkpoint)
start = time.time()
logging.info('Starting evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
with sv.managed_session(
master, start_standard_services=False, config=session_config) as sess:
sv.saver.restore(sess, last_checkpoint)
sv.start_queue_runners(sess)
evaluation(sess,
num_evals=num_evals,
eval_op=eval_op,
eval_op_feed_dict=eval_op_feed_dict,
final_op=final_op,
final_op_feed_dict=final_op_feed_dict,
summary_op=summary_op,
summary_op_feed_dict=summary_op_feed_dict,
summary_writer=summary_writer,
global_step=global_step)
logging.info('Finished evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
logging.info('Reached max_number_of_evaluations=%s. Exit',
max_number_of_evaluations)
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval) | [
"def",
"evaluation_loop",
"(",
"master",
",",
"checkpoint_dir",
",",
"logdir",
",",
"num_evals",
"=",
"1",
",",
"eval_op",
"=",
"None",
",",
"eval_op_feed_dict",
"=",
"None",
",",
"final_op",
"=",
"None",
",",
"final_op_feed_dict",
"=",
"None",
",",
"summary_op",
"=",
"_USE_DEFAULT",
",",
"summary_op_feed_dict",
"=",
"None",
",",
"variables_to_restore",
"=",
"None",
",",
"eval_interval_secs",
"=",
"60",
",",
"max_number_of_evaluations",
"=",
"None",
",",
"session_config",
"=",
"None",
")",
":",
"if",
"summary_op",
"==",
"_USE_DEFAULT",
":",
"summary_op",
"=",
"logging_ops",
".",
"merge_all_summaries",
"(",
")",
"global_step",
"=",
"variables",
".",
"get_or_create_global_step",
"(",
")",
"init_op",
"=",
"control_flow_ops",
".",
"group",
"(",
"tf_variables",
".",
"initialize_all_variables",
"(",
")",
",",
"tf_variables",
".",
"initialize_local_variables",
"(",
")",
",",
"data_flow_ops",
".",
"initialize_all_tables",
"(",
")",
")",
"saver",
"=",
"tf_saver",
".",
"Saver",
"(",
"variables_to_restore",
"or",
"variables",
".",
"get_variables_to_restore",
"(",
")",
")",
"summary_writer",
"=",
"summary_io",
".",
"SummaryWriter",
"(",
"logdir",
")",
"sv",
"=",
"supervisor",
".",
"Supervisor",
"(",
"graph",
"=",
"ops",
".",
"get_default_graph",
"(",
")",
",",
"logdir",
"=",
"logdir",
",",
"init_op",
"=",
"init_op",
",",
"summary_op",
"=",
"None",
",",
"summary_writer",
"=",
"None",
",",
"global_step",
"=",
"None",
",",
"saver",
"=",
"saver",
")",
"last_checkpoint",
"=",
"None",
"number_of_evaluations",
"=",
"0",
"while",
"True",
":",
"last_checkpoint",
"=",
"wait_for_new_checkpoint",
"(",
"checkpoint_dir",
",",
"last_checkpoint",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"logging",
".",
"info",
"(",
"'Starting evaluation at '",
"+",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d-%H:%M:%S'",
",",
"time",
".",
"gmtime",
"(",
")",
")",
")",
"with",
"sv",
".",
"managed_session",
"(",
"master",
",",
"start_standard_services",
"=",
"False",
",",
"config",
"=",
"session_config",
")",
"as",
"sess",
":",
"sv",
".",
"saver",
".",
"restore",
"(",
"sess",
",",
"last_checkpoint",
")",
"sv",
".",
"start_queue_runners",
"(",
"sess",
")",
"evaluation",
"(",
"sess",
",",
"num_evals",
"=",
"num_evals",
",",
"eval_op",
"=",
"eval_op",
",",
"eval_op_feed_dict",
"=",
"eval_op_feed_dict",
",",
"final_op",
"=",
"final_op",
",",
"final_op_feed_dict",
"=",
"final_op_feed_dict",
",",
"summary_op",
"=",
"summary_op",
",",
"summary_op_feed_dict",
"=",
"summary_op_feed_dict",
",",
"summary_writer",
"=",
"summary_writer",
",",
"global_step",
"=",
"global_step",
")",
"logging",
".",
"info",
"(",
"'Finished evaluation at '",
"+",
"time",
".",
"strftime",
"(",
"'%Y-%m-%d-%H:%M:%S'",
",",
"time",
".",
"gmtime",
"(",
")",
")",
")",
"number_of_evaluations",
"+=",
"1",
"if",
"(",
"max_number_of_evaluations",
"and",
"number_of_evaluations",
">=",
"max_number_of_evaluations",
")",
":",
"logging",
".",
"info",
"(",
"'Reached max_number_of_evaluations=%s. Exit'",
",",
"max_number_of_evaluations",
")",
"break",
"time_to_next_eval",
"=",
"start",
"+",
"eval_interval_secs",
"-",
"time",
".",
"time",
"(",
")",
"if",
"time_to_next_eval",
">",
"0",
":",
"time",
".",
"sleep",
"(",
"time_to_next_eval",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/slim/python/slim/evaluation.py#L244-L339 |
||
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/ops/while_v2.py | python | _get_intermediates | (func_graph) | return intermediates | Returns all tensors in `func_graph` that should be accumulated. | Returns all tensors in `func_graph` that should be accumulated. | [
"Returns",
"all",
"tensors",
"in",
"func_graph",
"that",
"should",
"be",
"accumulated",
"."
] | def _get_intermediates(func_graph):
"""Returns all tensors in `func_graph` that should be accumulated."""
# We currently accumulate output tensors of most ops in the function and rely
# on the pruning pass to get rid of the unused accumulators at runtime.
# However, this can bloat the GraphDef and make debugging harder so we perform
# some optimizations.
#
# Optimization we currently perform:
# 1. We do not accumulate tensors which already have an accumulator
# in the loop body.
# 2. We do not accumulate outputs of Identity nodes. When building the
# FuncGraph, we add an Identity node for each output (see
# `AutomaticControlDependencies.mark_as_return`). Accumulating outputs
# of all these nodes bloats the GraphDef quite a bit so we remove those.
# Since the gradient of an Identity node does not rely on its forward op's
# input this is safe to do.
#
# Other possible optimizations:
# 1. Only accumulate tensors that will be required by the backward pass.
# This will require running the gradient pass and hence would increase the
# graph building time for the forward pass.
# 2. Do not accumulate Const nodes created inside the loop body.
# 3. Do not accumulate loop vars that are returned as-is just like captured
# tensors.
intermediates = []
reverse_captures = dict((v.ref(), k) for k, v in func_graph.captures)
for op in func_graph.get_operations():
if op.type == "Identity":
continue
# Accumulating mutexes can cause deadlock.
if op.type == "MutexLock":
continue
for o in op.outputs:
if (o is not func_graph.inputs[0] and # Loop counter.
o.dtype != dtypes.resource and # Do not accumulate resource tensors.
_get_accumulator(o) is None and # Has existing accumulator.
o.ref() not in reverse_captures
): # Captured value, hence loop invariant.
intermediates.append(o)
return intermediates | [
"def",
"_get_intermediates",
"(",
"func_graph",
")",
":",
"# We currently accumulate output tensors of most ops in the function and rely",
"# on the pruning pass to get rid of the unused accumulators at runtime.",
"# However, this can bloat the GraphDef and make debugging harder so we perform",
"# some optimizations.",
"#",
"# Optimization we currently perform:",
"# 1. We do not accumulate tensors which already have an accumulator",
"# in the loop body.",
"# 2. We do not accumulate outputs of Identity nodes. When building the",
"# FuncGraph, we add an Identity node for each output (see",
"# `AutomaticControlDependencies.mark_as_return`). Accumulating outputs",
"# of all these nodes bloats the GraphDef quite a bit so we remove those.",
"# Since the gradient of an Identity node does not rely on its forward op's",
"# input this is safe to do.",
"#",
"# Other possible optimizations:",
"# 1. Only accumulate tensors that will be required by the backward pass.",
"# This will require running the gradient pass and hence would increase the",
"# graph building time for the forward pass.",
"# 2. Do not accumulate Const nodes created inside the loop body.",
"# 3. Do not accumulate loop vars that are returned as-is just like captured",
"# tensors.",
"intermediates",
"=",
"[",
"]",
"reverse_captures",
"=",
"dict",
"(",
"(",
"v",
".",
"ref",
"(",
")",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"func_graph",
".",
"captures",
")",
"for",
"op",
"in",
"func_graph",
".",
"get_operations",
"(",
")",
":",
"if",
"op",
".",
"type",
"==",
"\"Identity\"",
":",
"continue",
"# Accumulating mutexes can cause deadlock.",
"if",
"op",
".",
"type",
"==",
"\"MutexLock\"",
":",
"continue",
"for",
"o",
"in",
"op",
".",
"outputs",
":",
"if",
"(",
"o",
"is",
"not",
"func_graph",
".",
"inputs",
"[",
"0",
"]",
"and",
"# Loop counter.",
"o",
".",
"dtype",
"!=",
"dtypes",
".",
"resource",
"and",
"# Do not accumulate resource tensors.",
"_get_accumulator",
"(",
"o",
")",
"is",
"None",
"and",
"# Has existing accumulator.",
"o",
".",
"ref",
"(",
")",
"not",
"in",
"reverse_captures",
")",
":",
"# Captured value, hence loop invariant.",
"intermediates",
".",
"append",
"(",
"o",
")",
"return",
"intermediates"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/while_v2.py#L502-L542 |
|
wujian16/Cornell-MOE | df299d1be882d2af9796d7a68b3f9505cac7a53e | moe/optimal_learning/python/python_version/optimization.py | python | NullOptimizer.__init__ | (self, domain, optimizable, *args, **kwargs) | Construct a NullOptimizer.
:param domain: the domain that this optimizer operates over
:type domain: interfaces.domain_interface.DomainInterface subclass
:param optimizable: object representing the objective function being optimized
:type optimizable: interfaces.optimization_interface.OptimizableInterface subclass | Construct a NullOptimizer. | [
"Construct",
"a",
"NullOptimizer",
"."
] | def __init__(self, domain, optimizable, *args, **kwargs):
"""Construct a NullOptimizer.
:param domain: the domain that this optimizer operates over
:type domain: interfaces.domain_interface.DomainInterface subclass
:param optimizable: object representing the objective function being optimized
:type optimizable: interfaces.optimization_interface.OptimizableInterface subclass
"""
self.domain = domain
self.objective_function = optimizable | [
"def",
"__init__",
"(",
"self",
",",
"domain",
",",
"optimizable",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"domain",
"=",
"domain",
"self",
".",
"objective_function",
"=",
"optimizable"
] | https://github.com/wujian16/Cornell-MOE/blob/df299d1be882d2af9796d7a68b3f9505cac7a53e/moe/optimal_learning/python/python_version/optimization.py#L375-L385 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/linalg_grad.py | python | _TransposeTridiagonalMatrix | (diags) | return array_ops.stack([superdiag, diag, subdiag], axis=-2) | Transposes a tridiagonal matrix.
Args:
diags: the diagonals of the input matrix in the compact form (see
linalg_ops.tridiagonal_solve).
Returns:
Diagonals of the transposed matrix in the compact form. | Transposes a tridiagonal matrix. | [
"Transposes",
"a",
"tridiagonal",
"matrix",
"."
] | def _TransposeTridiagonalMatrix(diags):
"""Transposes a tridiagonal matrix.
Args:
diags: the diagonals of the input matrix in the compact form (see
linalg_ops.tridiagonal_solve).
Returns:
Diagonals of the transposed matrix in the compact form.
"""
diag = diags[..., 1, :]
if diags.shape.is_fully_defined():
# For fully defined tensor we can concat with a tensor of zeros, which is
# faster than using array_ops.pad().
zeros = array_ops.zeros(list(diags.shape[:-2]) + [1], dtype=diags.dtype)
superdiag = array_ops.concat((diags[..., 2, 1:], zeros), axis=-1)
subdiag = array_ops.concat((zeros, diags[..., 0, :-1]), axis=-1)
else:
rank = array_ops.rank(diags)
zeros = array_ops.zeros((rank - 2, 2), dtype=dtypes.int32)
superdiag_pad = array_ops.concat((zeros, array_ops.constant([[0, 1]])),
axis=0)
superdiag = array_ops.pad(diags[..., 2, 1:], superdiag_pad)
subdiag_pad = array_ops.concat((zeros, array_ops.constant([[1, 0]])),
axis=0)
subdiag = array_ops.pad(diags[..., 0, :-1], subdiag_pad)
return array_ops.stack([superdiag, diag, subdiag], axis=-2) | [
"def",
"_TransposeTridiagonalMatrix",
"(",
"diags",
")",
":",
"diag",
"=",
"diags",
"[",
"...",
",",
"1",
",",
":",
"]",
"if",
"diags",
".",
"shape",
".",
"is_fully_defined",
"(",
")",
":",
"# For fully defined tensor we can concat with a tensor of zeros, which is",
"# faster than using array_ops.pad().",
"zeros",
"=",
"array_ops",
".",
"zeros",
"(",
"list",
"(",
"diags",
".",
"shape",
"[",
":",
"-",
"2",
"]",
")",
"+",
"[",
"1",
"]",
",",
"dtype",
"=",
"diags",
".",
"dtype",
")",
"superdiag",
"=",
"array_ops",
".",
"concat",
"(",
"(",
"diags",
"[",
"...",
",",
"2",
",",
"1",
":",
"]",
",",
"zeros",
")",
",",
"axis",
"=",
"-",
"1",
")",
"subdiag",
"=",
"array_ops",
".",
"concat",
"(",
"(",
"zeros",
",",
"diags",
"[",
"...",
",",
"0",
",",
":",
"-",
"1",
"]",
")",
",",
"axis",
"=",
"-",
"1",
")",
"else",
":",
"rank",
"=",
"array_ops",
".",
"rank",
"(",
"diags",
")",
"zeros",
"=",
"array_ops",
".",
"zeros",
"(",
"(",
"rank",
"-",
"2",
",",
"2",
")",
",",
"dtype",
"=",
"dtypes",
".",
"int32",
")",
"superdiag_pad",
"=",
"array_ops",
".",
"concat",
"(",
"(",
"zeros",
",",
"array_ops",
".",
"constant",
"(",
"[",
"[",
"0",
",",
"1",
"]",
"]",
")",
")",
",",
"axis",
"=",
"0",
")",
"superdiag",
"=",
"array_ops",
".",
"pad",
"(",
"diags",
"[",
"...",
",",
"2",
",",
"1",
":",
"]",
",",
"superdiag_pad",
")",
"subdiag_pad",
"=",
"array_ops",
".",
"concat",
"(",
"(",
"zeros",
",",
"array_ops",
".",
"constant",
"(",
"[",
"[",
"1",
",",
"0",
"]",
"]",
")",
")",
",",
"axis",
"=",
"0",
")",
"subdiag",
"=",
"array_ops",
".",
"pad",
"(",
"diags",
"[",
"...",
",",
"0",
",",
":",
"-",
"1",
"]",
",",
"subdiag_pad",
")",
"return",
"array_ops",
".",
"stack",
"(",
"[",
"superdiag",
",",
"diag",
",",
"subdiag",
"]",
",",
"axis",
"=",
"-",
"2",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/linalg_grad.py#L527-L555 |
|
OAID/Tengine | 66b2c22ad129d25e2fc6de3b22a608bb54dd90db | tools/optimize/nanodet_m-opt.py | python | optimize_add_softmax | (nodes) | return nodes | add additional softmax node in the end of all distance prediction branches
Args:
nodes: the graph.node of ONNX model
Returns:
optimized graph nodes(inplace) | add additional softmax node in the end of all distance prediction branches
Args:
nodes: the graph.node of ONNX model
Returns:
optimized graph nodes(inplace) | [
"add",
"additional",
"softmax",
"node",
"in",
"the",
"end",
"of",
"all",
"distance",
"prediction",
"branches",
"Args",
":",
"nodes",
":",
"the",
"graph",
".",
"node",
"of",
"ONNX",
"model",
"Returns",
":",
"optimized",
"graph",
"nodes",
"(",
"inplace",
")"
] | def optimize_add_softmax(nodes):
"""
add additional softmax node in the end of all distance prediction branches
Args:
nodes: the graph.node of ONNX model
Returns:
optimized graph nodes(inplace)
"""
for n in nodes:
if 'Transpose' == n.op_type and "dis_pred_stride_" in n.output[0]:
## add additional softmax node
_input = n.output[0]
_output = _input.replace("dis_pred_stride_", "dis_sm_stride_")
n_sm = onnx.helper.make_node('Softmax', inputs=[_input], outputs=[_output], axis=-1)
nodes.append(n_sm)
return nodes | [
"def",
"optimize_add_softmax",
"(",
"nodes",
")",
":",
"for",
"n",
"in",
"nodes",
":",
"if",
"'Transpose'",
"==",
"n",
".",
"op_type",
"and",
"\"dis_pred_stride_\"",
"in",
"n",
".",
"output",
"[",
"0",
"]",
":",
"## add additional softmax node",
"_input",
"=",
"n",
".",
"output",
"[",
"0",
"]",
"_output",
"=",
"_input",
".",
"replace",
"(",
"\"dis_pred_stride_\"",
",",
"\"dis_sm_stride_\"",
")",
"n_sm",
"=",
"onnx",
".",
"helper",
".",
"make_node",
"(",
"'Softmax'",
",",
"inputs",
"=",
"[",
"_input",
"]",
",",
"outputs",
"=",
"[",
"_output",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"nodes",
".",
"append",
"(",
"n_sm",
")",
"return",
"nodes"
] | https://github.com/OAID/Tengine/blob/66b2c22ad129d25e2fc6de3b22a608bb54dd90db/tools/optimize/nanodet_m-opt.py#L125-L140 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/nn/probability/distribution/_utils/custom_ops.py | python | exp_generic | (input_x) | return exp(input_x) | Exp op on Ascend doesn't support int types.
Fix this with casting the type. | Exp op on Ascend doesn't support int types.
Fix this with casting the type. | [
"Exp",
"op",
"on",
"Ascend",
"doesn",
"t",
"support",
"int",
"types",
".",
"Fix",
"this",
"with",
"casting",
"the",
"type",
"."
] | def exp_generic(input_x):
"""
Exp op on Ascend doesn't support int types.
Fix this with casting the type.
"""
exp = P.Exp()
cast = P.Cast()
dtype = P.DType()
checktype = P.IsSubClass()
if not checktype(dtype(input_x), mstype.float_):
input_x = cast(input_x, mstype.float32)
return exp(input_x) | [
"def",
"exp_generic",
"(",
"input_x",
")",
":",
"exp",
"=",
"P",
".",
"Exp",
"(",
")",
"cast",
"=",
"P",
".",
"Cast",
"(",
")",
"dtype",
"=",
"P",
".",
"DType",
"(",
")",
"checktype",
"=",
"P",
".",
"IsSubClass",
"(",
")",
"if",
"not",
"checktype",
"(",
"dtype",
"(",
"input_x",
")",
",",
"mstype",
".",
"float_",
")",
":",
"input_x",
"=",
"cast",
"(",
"input_x",
",",
"mstype",
".",
"float32",
")",
"return",
"exp",
"(",
"input_x",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/probability/distribution/_utils/custom_ops.py#L21-L33 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | PlatformInformation.GetOperatingSystemIdName | (*args, **kwargs) | return _misc_.PlatformInformation_GetOperatingSystemIdName(*args, **kwargs) | GetOperatingSystemIdName(self) -> String | GetOperatingSystemIdName(self) -> String | [
"GetOperatingSystemIdName",
"(",
"self",
")",
"-",
">",
"String"
] | def GetOperatingSystemIdName(*args, **kwargs):
"""GetOperatingSystemIdName(self) -> String"""
return _misc_.PlatformInformation_GetOperatingSystemIdName(*args, **kwargs) | [
"def",
"GetOperatingSystemIdName",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"PlatformInformation_GetOperatingSystemIdName",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L1113-L1115 |
|
krishauser/Klampt | 972cc83ea5befac3f653c1ba20f80155768ad519 | Python/klampt/robotsim.py | python | VolumeGrid.setValues | (self, np_array3: Vector) | return _robotsim.VolumeGrid_setValues(self, np_array3) | r"""
Args:
np_array3 (:obj:`3D Numpy array of floats`) | r"""
Args:
np_array3 (:obj:`3D Numpy array of floats`) | [
"r",
"Args",
":",
"np_array3",
"(",
":",
"obj",
":",
"3D",
"Numpy",
"array",
"of",
"floats",
")"
] | def setValues(self, np_array3: Vector) ->None:
r"""
Args:
np_array3 (:obj:`3D Numpy array of floats`)
"""
return _robotsim.VolumeGrid_setValues(self, np_array3) | [
"def",
"setValues",
"(",
"self",
",",
"np_array3",
":",
"Vector",
")",
"->",
"None",
":",
"return",
"_robotsim",
".",
"VolumeGrid_setValues",
"(",
"self",
",",
"np_array3",
")"
] | https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/robotsim.py#L1771-L1776 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | samples/pySketch/pySketch.py | python | DrawingObject.getData | (self) | return [self.position.x, self.position.y,
self.size.width, self.size.height,
self.penColour.Red(),
self.penColour.Green(),
self.penColour.Blue(),
self.fillColour.Red(),
self.fillColour.Green(),
self.fillColour.Blue(),
self.lineSize] | Return a copy of the object's internal data.
This is used to save this DrawingObject to disk. | Return a copy of the object's internal data. | [
"Return",
"a",
"copy",
"of",
"the",
"object",
"s",
"internal",
"data",
"."
] | def getData(self):
""" Return a copy of the object's internal data.
This is used to save this DrawingObject to disk.
"""
return [self.position.x, self.position.y,
self.size.width, self.size.height,
self.penColour.Red(),
self.penColour.Green(),
self.penColour.Blue(),
self.fillColour.Red(),
self.fillColour.Green(),
self.fillColour.Blue(),
self.lineSize] | [
"def",
"getData",
"(",
"self",
")",
":",
"return",
"[",
"self",
".",
"position",
".",
"x",
",",
"self",
".",
"position",
".",
"y",
",",
"self",
".",
"size",
".",
"width",
",",
"self",
".",
"size",
".",
"height",
",",
"self",
".",
"penColour",
".",
"Red",
"(",
")",
",",
"self",
".",
"penColour",
".",
"Green",
"(",
")",
",",
"self",
".",
"penColour",
".",
"Blue",
"(",
")",
",",
"self",
".",
"fillColour",
".",
"Red",
"(",
")",
",",
"self",
".",
"fillColour",
".",
"Green",
"(",
")",
",",
"self",
".",
"fillColour",
".",
"Blue",
"(",
")",
",",
"self",
".",
"lineSize",
"]"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/pySketch/pySketch.py#L2209-L2222 |
|
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozbuild/jar.py | python | JarMaker.finalizeJar | (self, jarPath, chromebasepath, register, doZip=True) | Helper method to write out the chrome registration entries to
jarfile.manifest or chrome.manifest, or both.
The actual file processing is done in updateManifest. | Helper method to write out the chrome registration entries to
jarfile.manifest or chrome.manifest, or both. | [
"Helper",
"method",
"to",
"write",
"out",
"the",
"chrome",
"registration",
"entries",
"to",
"jarfile",
".",
"manifest",
"or",
"chrome",
".",
"manifest",
"or",
"both",
"."
] | def finalizeJar(self, jarPath, chromebasepath, register, doZip=True):
'''Helper method to write out the chrome registration entries to
jarfile.manifest or chrome.manifest, or both.
The actual file processing is done in updateManifest.
'''
# rewrite the manifest, if entries given
if not register:
return
chromeManifest = os.path.join(os.path.dirname(jarPath), '..',
'chrome.manifest')
if self.useJarfileManifest:
self.updateManifest(jarPath + '.manifest',
chromebasepath.format(''), register)
addEntriesToListFile(chromeManifest,
['manifest chrome/{0}.manifest'.format(os.path.basename(jarPath))])
if self.useChromeManifest:
self.updateManifest(chromeManifest,
chromebasepath.format('chrome/'),
register)
# If requested, add a root chrome manifest entry (assumed to be in the parent directory
# of chromeManifest) with the application specific id. In cases where we're building
# lang packs, the root manifest must know about application sub directories.
if self.rootManifestAppId:
rootChromeManifest = \
os.path.join(os.path.normpath(os.path.dirname(chromeManifest)),
'..', 'chrome.manifest')
rootChromeManifest = os.path.normpath(rootChromeManifest)
chromeDir = \
os.path.basename(os.path.dirname(os.path.normpath(chromeManifest)))
logging.info("adding '%s' entry to root chrome manifest appid=%s"
% (chromeDir, self.rootManifestAppId))
addEntriesToListFile(rootChromeManifest,
['manifest %s/chrome.manifest application=%s'
% (chromeDir,
self.rootManifestAppId)]) | [
"def",
"finalizeJar",
"(",
"self",
",",
"jarPath",
",",
"chromebasepath",
",",
"register",
",",
"doZip",
"=",
"True",
")",
":",
"# rewrite the manifest, if entries given",
"if",
"not",
"register",
":",
"return",
"chromeManifest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"jarPath",
")",
",",
"'..'",
",",
"'chrome.manifest'",
")",
"if",
"self",
".",
"useJarfileManifest",
":",
"self",
".",
"updateManifest",
"(",
"jarPath",
"+",
"'.manifest'",
",",
"chromebasepath",
".",
"format",
"(",
"''",
")",
",",
"register",
")",
"addEntriesToListFile",
"(",
"chromeManifest",
",",
"[",
"'manifest chrome/{0}.manifest'",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"jarPath",
")",
")",
"]",
")",
"if",
"self",
".",
"useChromeManifest",
":",
"self",
".",
"updateManifest",
"(",
"chromeManifest",
",",
"chromebasepath",
".",
"format",
"(",
"'chrome/'",
")",
",",
"register",
")",
"# If requested, add a root chrome manifest entry (assumed to be in the parent directory",
"# of chromeManifest) with the application specific id. In cases where we're building",
"# lang packs, the root manifest must know about application sub directories.",
"if",
"self",
".",
"rootManifestAppId",
":",
"rootChromeManifest",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"chromeManifest",
")",
")",
",",
"'..'",
",",
"'chrome.manifest'",
")",
"rootChromeManifest",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"rootChromeManifest",
")",
"chromeDir",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"chromeManifest",
")",
")",
")",
"logging",
".",
"info",
"(",
"\"adding '%s' entry to root chrome manifest appid=%s\"",
"%",
"(",
"chromeDir",
",",
"self",
".",
"rootManifestAppId",
")",
")",
"addEntriesToListFile",
"(",
"rootChromeManifest",
",",
"[",
"'manifest %s/chrome.manifest application=%s'",
"%",
"(",
"chromeDir",
",",
"self",
".",
"rootManifestAppId",
")",
"]",
")"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozbuild/jar.py#L153-L193 |
||
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/executables.py | python | is_executable | (path) | return get_type(path) != UNKNOWN | Return whether a given file path points to an executable or a library,
where an executable or library is identified by:
- the file extension on OS/2 and WINNT
- the file signature on OS/X and ELF systems (GNU/Linux, Android, BSD,
Solaris)
As this function is intended for use to choose between the ExecutableFile
and File classes in FileFinder, and choosing ExecutableFile only matters
on OS/2, OS/X, ELF and WINNT (in GCC build) systems, we don't bother
detecting other kind of executables. | Return whether a given file path points to an executable or a library,
where an executable or library is identified by:
- the file extension on OS/2 and WINNT
- the file signature on OS/X and ELF systems (GNU/Linux, Android, BSD,
Solaris) | [
"Return",
"whether",
"a",
"given",
"file",
"path",
"points",
"to",
"an",
"executable",
"or",
"a",
"library",
"where",
"an",
"executable",
"or",
"library",
"is",
"identified",
"by",
":",
"-",
"the",
"file",
"extension",
"on",
"OS",
"/",
"2",
"and",
"WINNT",
"-",
"the",
"file",
"signature",
"on",
"OS",
"/",
"X",
"and",
"ELF",
"systems",
"(",
"GNU",
"/",
"Linux",
"Android",
"BSD",
"Solaris",
")"
] | def is_executable(path):
'''
Return whether a given file path points to an executable or a library,
where an executable or library is identified by:
- the file extension on OS/2 and WINNT
- the file signature on OS/X and ELF systems (GNU/Linux, Android, BSD,
Solaris)
As this function is intended for use to choose between the ExecutableFile
and File classes in FileFinder, and choosing ExecutableFile only matters
on OS/2, OS/X, ELF and WINNT (in GCC build) systems, we don't bother
detecting other kind of executables.
'''
from buildconfig import substs
if not os.path.exists(path):
return False
if substs['OS_ARCH'] == 'WINNT':
return path.lower().endswith((substs['DLL_SUFFIX'],
substs['BIN_SUFFIX']))
return get_type(path) != UNKNOWN | [
"def",
"is_executable",
"(",
"path",
")",
":",
"from",
"buildconfig",
"import",
"substs",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"False",
"if",
"substs",
"[",
"'OS_ARCH'",
"]",
"==",
"'WINNT'",
":",
"return",
"path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"(",
"substs",
"[",
"'DLL_SUFFIX'",
"]",
",",
"substs",
"[",
"'BIN_SUFFIX'",
"]",
")",
")",
"return",
"get_type",
"(",
"path",
")",
"!=",
"UNKNOWN"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/executables.py#L57-L78 |
|
rapidsai/cudf | d5b2448fc69f17509304d594f029d0df56984962 | python/dask_cudf/dask_cudf/_version.py | python | render_git_describe_long | (pieces) | return rendered | TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix) | TAG-DISTANCE-gHEX[-dirty]. | [
"TAG",
"-",
"DISTANCE",
"-",
"gHEX",
"[",
"-",
"dirty",
"]",
"."
] | def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered | [
"def",
"render_git_describe_long",
"(",
"pieces",
")",
":",
"if",
"pieces",
"[",
"\"closest-tag\"",
"]",
":",
"rendered",
"=",
"pieces",
"[",
"\"closest-tag\"",
"]",
"rendered",
"+=",
"\"-%d-g%s\"",
"%",
"(",
"pieces",
"[",
"\"distance\"",
"]",
",",
"pieces",
"[",
"\"short\"",
"]",
")",
"else",
":",
"# exception #1",
"rendered",
"=",
"pieces",
"[",
"\"short\"",
"]",
"if",
"pieces",
"[",
"\"dirty\"",
"]",
":",
"rendered",
"+=",
"\"-dirty\"",
"return",
"rendered"
] | https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/dask_cudf/dask_cudf/_version.py#L456-L473 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_gdi.py | python | GraphicsRenderer.CreateBrush | (*args, **kwargs) | return _gdi_.GraphicsRenderer_CreateBrush(*args, **kwargs) | CreateBrush(self, Brush brush) -> GraphicsBrush | CreateBrush(self, Brush brush) -> GraphicsBrush | [
"CreateBrush",
"(",
"self",
"Brush",
"brush",
")",
"-",
">",
"GraphicsBrush"
] | def CreateBrush(*args, **kwargs):
"""CreateBrush(self, Brush brush) -> GraphicsBrush"""
return _gdi_.GraphicsRenderer_CreateBrush(*args, **kwargs) | [
"def",
"CreateBrush",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"GraphicsRenderer_CreateBrush",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_gdi.py#L6604-L6606 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/tensor_forest/client/random_forest.py | python | EveryCheckpointPreSaveListener.__init__ | (self, op) | Initializes the object.
Args:
op: An op to run before each checkpoint save. | Initializes the object. | [
"Initializes",
"the",
"object",
"."
] | def __init__(self, op):
"""Initializes the object.
Args:
op: An op to run before each checkpoint save.
"""
self._op = op | [
"def",
"__init__",
"(",
"self",
",",
"op",
")",
":",
"self",
".",
"_op",
"=",
"op"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/tensor_forest/client/random_forest.py#L131-L137 |
||
google/syzygy | 8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5 | third_party/numpy/files/numpy/ctypeslib.py | python | ndpointer | (dtype=None, ndim=None, shape=None, flags=None) | return klass | Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP | Array-checking restype/argtypes. | [
"Array",
"-",
"checking",
"restype",
"/",
"argtypes",
"."
] | def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
... #doctest: +SKIP
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
... #doctest: +SKIP
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError, "invalid flags specification"
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass | [
"def",
"ndpointer",
"(",
"dtype",
"=",
"None",
",",
"ndim",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"flags",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"not",
"None",
":",
"dtype",
"=",
"_dtype",
"(",
"dtype",
")",
"num",
"=",
"None",
"if",
"flags",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"flags",
",",
"str",
")",
":",
"flags",
"=",
"flags",
".",
"split",
"(",
"','",
")",
"elif",
"isinstance",
"(",
"flags",
",",
"(",
"int",
",",
"integer",
")",
")",
":",
"num",
"=",
"flags",
"flags",
"=",
"_flags_fromnum",
"(",
"num",
")",
"elif",
"isinstance",
"(",
"flags",
",",
"flagsobj",
")",
":",
"num",
"=",
"flags",
".",
"num",
"flags",
"=",
"_flags_fromnum",
"(",
"num",
")",
"if",
"num",
"is",
"None",
":",
"try",
":",
"flags",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"upper",
"(",
")",
"for",
"x",
"in",
"flags",
"]",
"except",
":",
"raise",
"TypeError",
",",
"\"invalid flags specification\"",
"num",
"=",
"_num_fromflags",
"(",
"flags",
")",
"try",
":",
"return",
"_pointer_type_cache",
"[",
"(",
"dtype",
",",
"ndim",
",",
"shape",
",",
"num",
")",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"dtype",
"is",
"None",
":",
"name",
"=",
"'any'",
"elif",
"dtype",
".",
"names",
":",
"name",
"=",
"str",
"(",
"id",
"(",
"dtype",
")",
")",
"else",
":",
"name",
"=",
"dtype",
".",
"str",
"if",
"ndim",
"is",
"not",
"None",
":",
"name",
"+=",
"\"_%dd\"",
"%",
"ndim",
"if",
"shape",
"is",
"not",
"None",
":",
"try",
":",
"strshape",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"shape",
"]",
"except",
"TypeError",
":",
"strshape",
"=",
"[",
"str",
"(",
"shape",
")",
"]",
"shape",
"=",
"(",
"shape",
",",
")",
"shape",
"=",
"tuple",
"(",
"shape",
")",
"name",
"+=",
"\"_\"",
"+",
"\"x\"",
".",
"join",
"(",
"strshape",
")",
"if",
"flags",
"is",
"not",
"None",
":",
"name",
"+=",
"\"_\"",
"+",
"\"_\"",
".",
"join",
"(",
"flags",
")",
"else",
":",
"flags",
"=",
"[",
"]",
"klass",
"=",
"type",
"(",
"\"ndpointer_%s\"",
"%",
"name",
",",
"(",
"_ndptr",
",",
")",
",",
"{",
"\"_dtype_\"",
":",
"dtype",
",",
"\"_shape_\"",
":",
"shape",
",",
"\"_ndim_\"",
":",
"ndim",
",",
"\"_flags_\"",
":",
"num",
"}",
")",
"_pointer_type_cache",
"[",
"dtype",
"]",
"=",
"klass",
"return",
"klass"
] | https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/ctypeslib.py#L186-L286 |
|
eventql/eventql | 7ca0dbb2e683b525620ea30dc40540a22d5eb227 | deps/3rdparty/spidermonkey/mozjs/python/requests/requests/sessions.py | python | Session.close | (self) | Closes all adapters and as such the session | Closes all adapters and as such the session | [
"Closes",
"all",
"adapters",
"and",
"as",
"such",
"the",
"session"
] | def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close() | [
"def",
"close",
"(",
"self",
")",
":",
"for",
"v",
"in",
"self",
".",
"adapters",
".",
"values",
"(",
")",
":",
"v",
".",
"close",
"(",
")"
] | https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/requests/requests/sessions.py#L648-L651 |
||
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TNEGraph.GetMxNId | (self) | return _snap.TNEGraph_GetMxNId(self) | GetMxNId(TNEGraph self) -> int
Parameters:
self: TNEGraph const * | GetMxNId(TNEGraph self) -> int | [
"GetMxNId",
"(",
"TNEGraph",
"self",
")",
"-",
">",
"int"
] | def GetMxNId(self):
"""
GetMxNId(TNEGraph self) -> int
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_GetMxNId(self) | [
"def",
"GetMxNId",
"(",
"self",
")",
":",
"return",
"_snap",
".",
"TNEGraph_GetMxNId",
"(",
"self",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L4468-L4476 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/aui.py | python | AuiToolBarItem.SetActive | (*args, **kwargs) | return _aui.AuiToolBarItem_SetActive(*args, **kwargs) | SetActive(self, bool b) | SetActive(self, bool b) | [
"SetActive",
"(",
"self",
"bool",
"b",
")"
] | def SetActive(*args, **kwargs):
"""SetActive(self, bool b)"""
return _aui.AuiToolBarItem_SetActive(*args, **kwargs) | [
"def",
"SetActive",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiToolBarItem_SetActive",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/aui.py#L1841-L1843 |
|
isl-org/Open3D | 79aec3ddde6a571ce2f28e4096477e52ec465244 | python/open3d/ml/tf/python/layers/neighbor_search.py | python | FixedRadiusSearch.call | (self,
points,
queries,
radius,
points_row_splits=None,
queries_row_splits=None,
hash_table_size_factor=1 / 64,
hash_table=None) | return result | This function computes the neighbors within a fixed radius for each query point.
Arguments:
points: The 3D positions of the input points. It can be a RaggedTensor.
*This argument must be given as a positional argument!*
queries: The 3D positions of the query points. It can be a RaggedTensor.
radius: A scalar with the neighborhood radius
points_row_splits: Optional 1D vector with the row splits information
if points is batched. This vector is [0, num_points] if there is
only 1 batch item.
queries_row_splits: Optional 1D vector with the row splits information
if queries is batched. This vector is [0, num_queries] if there is
only 1 batch item.
hash_table_size_factor: Scalar. The size of the hash table as fraction
of points.
hash_table: A precomputed hash table generated with build_spatial_hash_table().
This input can be used to explicitly force the reuse of a hash table in special
cases and is usually not needed.
Note that the hash table must have been generated with the same 'points' array.
Returns:
3 Tensors in the following order
neighbors_index
The compact list of indices of the neighbors. The corresponding query point
can be inferred from the 'neighbor_count_row_splits' vector.
neighbors_row_splits
The exclusive prefix sum of the neighbor count for the query points including
the total neighbor count as the last element. The size of this array is the
number of queries + 1.
neighbors_distance
Stores the distance to each neighbor if 'return_distances' is True.
Note that the distances are squared if metric is L2.
This is a zero length Tensor if 'return_distances' is False. | This function computes the neighbors within a fixed radius for each query point. | [
"This",
"function",
"computes",
"the",
"neighbors",
"within",
"a",
"fixed",
"radius",
"for",
"each",
"query",
"point",
"."
] | def call(self,
points,
queries,
radius,
points_row_splits=None,
queries_row_splits=None,
hash_table_size_factor=1 / 64,
hash_table=None):
"""This function computes the neighbors within a fixed radius for each query point.
Arguments:
points: The 3D positions of the input points. It can be a RaggedTensor.
*This argument must be given as a positional argument!*
queries: The 3D positions of the query points. It can be a RaggedTensor.
radius: A scalar with the neighborhood radius
points_row_splits: Optional 1D vector with the row splits information
if points is batched. This vector is [0, num_points] if there is
only 1 batch item.
queries_row_splits: Optional 1D vector with the row splits information
if queries is batched. This vector is [0, num_queries] if there is
only 1 batch item.
hash_table_size_factor: Scalar. The size of the hash table as fraction
of points.
hash_table: A precomputed hash table generated with build_spatial_hash_table().
This input can be used to explicitly force the reuse of a hash table in special
cases and is usually not needed.
Note that the hash table must have been generated with the same 'points' array.
Returns:
3 Tensors in the following order
neighbors_index
The compact list of indices of the neighbors. The corresponding query point
can be inferred from the 'neighbor_count_row_splits' vector.
neighbors_row_splits
The exclusive prefix sum of the neighbor count for the query points including
the total neighbor count as the last element. The size of this array is the
number of queries + 1.
neighbors_distance
Stores the distance to each neighbor if 'return_distances' is True.
Note that the distances are squared if metric is L2.
This is a zero length Tensor if 'return_distances' is False.
"""
if isinstance(points, tf.RaggedTensor):
points_row_splits = points.row_splits
points = points.values
if isinstance(queries, tf.RaggedTensor):
queries_row_splits = queries.row_splits
queries = queries.values
if points_row_splits is None:
points_row_splits = tf.cast(tf.stack([0, tf.shape(points)[0]]),
dtype=tf.int64)
if queries_row_splits is None:
queries_row_splits = tf.cast(tf.stack([0, tf.shape(queries)[0]]),
dtype=tf.int64)
if hash_table is None:
table = ops.build_spatial_hash_table(
max_hash_table_size=self.max_hash_table_size,
points=points,
radius=radius,
points_row_splits=points_row_splits,
hash_table_size_factor=hash_table_size_factor)
else:
table = hash_table
result = ops.fixed_radius_search(
ignore_query_point=self.ignore_query_point,
return_distances=self.return_distances,
metric=self.metric,
points=points,
queries=queries,
radius=radius,
points_row_splits=points_row_splits,
queries_row_splits=queries_row_splits,
hash_table_splits=table.hash_table_splits,
hash_table_index=table.hash_table_index,
hash_table_cell_splits=table.hash_table_cell_splits)
return result | [
"def",
"call",
"(",
"self",
",",
"points",
",",
"queries",
",",
"radius",
",",
"points_row_splits",
"=",
"None",
",",
"queries_row_splits",
"=",
"None",
",",
"hash_table_size_factor",
"=",
"1",
"/",
"64",
",",
"hash_table",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"points",
",",
"tf",
".",
"RaggedTensor",
")",
":",
"points_row_splits",
"=",
"points",
".",
"row_splits",
"points",
"=",
"points",
".",
"values",
"if",
"isinstance",
"(",
"queries",
",",
"tf",
".",
"RaggedTensor",
")",
":",
"queries_row_splits",
"=",
"queries",
".",
"row_splits",
"queries",
"=",
"queries",
".",
"values",
"if",
"points_row_splits",
"is",
"None",
":",
"points_row_splits",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"stack",
"(",
"[",
"0",
",",
"tf",
".",
"shape",
"(",
"points",
")",
"[",
"0",
"]",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"if",
"queries_row_splits",
"is",
"None",
":",
"queries_row_splits",
"=",
"tf",
".",
"cast",
"(",
"tf",
".",
"stack",
"(",
"[",
"0",
",",
"tf",
".",
"shape",
"(",
"queries",
")",
"[",
"0",
"]",
"]",
")",
",",
"dtype",
"=",
"tf",
".",
"int64",
")",
"if",
"hash_table",
"is",
"None",
":",
"table",
"=",
"ops",
".",
"build_spatial_hash_table",
"(",
"max_hash_table_size",
"=",
"self",
".",
"max_hash_table_size",
",",
"points",
"=",
"points",
",",
"radius",
"=",
"radius",
",",
"points_row_splits",
"=",
"points_row_splits",
",",
"hash_table_size_factor",
"=",
"hash_table_size_factor",
")",
"else",
":",
"table",
"=",
"hash_table",
"result",
"=",
"ops",
".",
"fixed_radius_search",
"(",
"ignore_query_point",
"=",
"self",
".",
"ignore_query_point",
",",
"return_distances",
"=",
"self",
".",
"return_distances",
",",
"metric",
"=",
"self",
".",
"metric",
",",
"points",
"=",
"points",
",",
"queries",
"=",
"queries",
",",
"radius",
"=",
"radius",
",",
"points_row_splits",
"=",
"points_row_splits",
",",
"queries_row_splits",
"=",
"queries_row_splits",
",",
"hash_table_splits",
"=",
"table",
".",
"hash_table_splits",
",",
"hash_table_index",
"=",
"table",
".",
"hash_table_index",
",",
"hash_table_cell_splits",
"=",
"table",
".",
"hash_table_cell_splits",
")",
"return",
"result"
] | https://github.com/isl-org/Open3D/blob/79aec3ddde6a571ce2f28e4096477e52ec465244/python/open3d/ml/tf/python/layers/neighbor_search.py#L82-L168 |
|
rdkit/rdkit | ede860ae316d12d8568daf5ee800921c3389c84e | rdkit/ML/Descriptors/CompoundDescriptors.py | python | CompoundDescriptorCalculator.CalcCompoundDescriptorsForComposition | (self, compos='', composList=None, propDict={}) | return res | calculates all simple descriptors for a given composition
**Arguments**
- compos: a string representation of the composition
- composList: a *composVect*
- propDict: a dictionary containing the properties of the composition
as a whole (e.g. structural variables, etc.)
The client must provide either _compos_ or _composList_. If both are
provided, _composList_ takes priority.
**Returns**
the list of descriptor values
**Notes**
- when _compos_ is provided, this uses _chemutils.SplitComposition_
to split the composition into its individual pieces | calculates all simple descriptors for a given composition | [
"calculates",
"all",
"simple",
"descriptors",
"for",
"a",
"given",
"composition"
] | def CalcCompoundDescriptorsForComposition(self, compos='', composList=None, propDict={}):
""" calculates all simple descriptors for a given composition
**Arguments**
- compos: a string representation of the composition
- composList: a *composVect*
- propDict: a dictionary containing the properties of the composition
as a whole (e.g. structural variables, etc.)
The client must provide either _compos_ or _composList_. If both are
provided, _composList_ takes priority.
**Returns**
the list of descriptor values
**Notes**
- when _compos_ is provided, this uses _chemutils.SplitComposition_
to split the composition into its individual pieces
"""
if composList is None:
composList = chemutils.SplitComposition(compos)
res = []
for cl in self.compoundList:
val = Parser.CalcSingleCompoundDescriptor(composList, cl[1:], self.atomDict, propDict)
res.append(val)
return res | [
"def",
"CalcCompoundDescriptorsForComposition",
"(",
"self",
",",
"compos",
"=",
"''",
",",
"composList",
"=",
"None",
",",
"propDict",
"=",
"{",
"}",
")",
":",
"if",
"composList",
"is",
"None",
":",
"composList",
"=",
"chemutils",
".",
"SplitComposition",
"(",
"compos",
")",
"res",
"=",
"[",
"]",
"for",
"cl",
"in",
"self",
".",
"compoundList",
":",
"val",
"=",
"Parser",
".",
"CalcSingleCompoundDescriptor",
"(",
"composList",
",",
"cl",
"[",
"1",
":",
"]",
",",
"self",
".",
"atomDict",
",",
"propDict",
")",
"res",
".",
"append",
"(",
"val",
")",
"return",
"res"
] | https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/ML/Descriptors/CompoundDescriptors.py#L315-L345 |
|
genn-team/genn | 75e1eb218cafa228bf36ae4613d1ce26e877b12c | pygenn/genn_groups.py | python | Group.push_var_to_device | (self, var_name) | Wrapper around GeNNModel.push_var_to_device
Args:
var_name -- string with the name of the variable | Wrapper around GeNNModel.push_var_to_device | [
"Wrapper",
"around",
"GeNNModel",
".",
"push_var_to_device"
] | def push_var_to_device(self, var_name):
"""Wrapper around GeNNModel.push_var_to_device
Args:
var_name -- string with the name of the variable
"""
self._model.push_var_to_device(self.name, var_name) | [
"def",
"push_var_to_device",
"(",
"self",
",",
"var_name",
")",
":",
"self",
".",
"_model",
".",
"push_var_to_device",
"(",
"self",
".",
"name",
",",
"var_name",
")"
] | https://github.com/genn-team/genn/blob/75e1eb218cafa228bf36ae4613d1ce26e877b12c/pygenn/genn_groups.py#L98-L104 |
||
Ifsttar/I-Simpa | 2283385f4cac769a92e265edabb9c79cb6c42d03 | currentRelease/ExperimentalScript/md_octave/__init__.py | python | MD_Octave.gettreelabel | (self) | return "MD Octave" | Return core label | Return core label | [
"Return",
"core",
"label"
] | def gettreelabel(self):
"""
Return core label
"""
return "MD Octave" | [
"def",
"gettreelabel",
"(",
"self",
")",
":",
"return",
"\"MD Octave\""
] | https://github.com/Ifsttar/I-Simpa/blob/2283385f4cac769a92e265edabb9c79cb6c42d03/currentRelease/ExperimentalScript/md_octave/__init__.py#L37-L41 |
|
deepmodeling/deepmd-kit | 159e45d248b0429844fb6a8cb3b3a201987c8d79 | deepmd/entrypoints/main.py | python | parse_args | (args: Optional[List[str]] = None) | return parsed_args | DeePMD-Kit commandline options argument parser.
Parameters
----------
args: List[str]
list of command line arguments, main purpose is testing default option None
takes arguments from sys.argv | DeePMD-Kit commandline options argument parser. | [
"DeePMD",
"-",
"Kit",
"commandline",
"options",
"argument",
"parser",
"."
] | def parse_args(args: Optional[List[str]] = None):
"""DeePMD-Kit commandline options argument parser.
Parameters
----------
args: List[str]
list of command line arguments, main purpose is testing default option None
takes arguments from sys.argv
"""
parser = argparse.ArgumentParser(
description="DeePMD-kit: A deep learning package for many-body potential energy"
" representation and molecular dynamics",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
subparsers = parser.add_subparsers(title="Valid subcommands", dest="command")
# * logging options parser *********************************************************
# with use of the parent argument this options will be added to every parser
parser_log = argparse.ArgumentParser(
add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser_log.add_argument(
"-v",
"--log-level",
choices=["DEBUG", "3", "INFO", "2", "WARNING", "1", "ERROR", "0"],
default="INFO",
help="set verbosity level by string or number, 0=ERROR, 1=WARNING, 2=INFO "
"and 3=DEBUG",
)
parser_log.add_argument(
"-l",
"--log-path",
type=str,
default=None,
help="set log file to log messages to disk, if not specified, the logs will "
"only be output to console",
)
# * mpi logging parser *************************************************************
parser_mpi_log = argparse.ArgumentParser(
add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser_mpi_log.add_argument(
"-m",
"--mpi-log",
type=str,
default="master",
choices=("master", "collect", "workers"),
help="Set the manner of logging when running with MPI. 'master' logs only on "
"main process, 'collect' broadcasts logs from workers to master and 'workers' "
"means each process will output its own log",
)
# * config script ******************************************************************
parser_cfig = subparsers.add_parser(
"config",
parents=[parser_log],
help="fast configuration of parameter file for smooth model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_cfig.add_argument(
"-o", "--output", type=str, default="input.json", help="the output json file"
)
# * transfer script ****************************************************************
parser_transfer = subparsers.add_parser(
"transfer", parents=[parser_log], help="pass parameters to another model"
)
parser_transfer.add_argument(
"-r",
"--raw-model",
default="raw_frozen_model.pb",
type=str,
help="the model receiving parameters",
)
parser_transfer.add_argument(
"-O",
"--old-model",
default="old_frozen_model.pb",
type=str,
help="the model providing parameters",
)
parser_transfer.add_argument(
"-o",
"--output",
default="frozen_model.pb",
type=str,
help="the model after passing parameters",
)
# * config parser ******************************************************************
parser_train = subparsers.add_parser(
"train",
parents=[parser_log, parser_mpi_log],
help="train a model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_train.add_argument(
"INPUT", help="the input parameter file in json or yaml format"
)
parser_train.add_argument(
"-i",
"--init-model",
type=str,
default=None,
help="Initialize the model by the provided checkpoint.",
)
parser_train.add_argument(
"-r",
"--restart",
type=str,
default=None,
help="Restart the training from the provided checkpoint.",
)
parser_train.add_argument(
"-o",
"--output",
type=str,
default="out.json",
help="The output file of the parameters used in training.",
)
parser_train.add_argument(
"-f",
"--init-frz-model",
type=str,
default=None,
help="Initialize the training from the frozen model.",
)
# * freeze script ******************************************************************
parser_frz = subparsers.add_parser(
"freeze",
parents=[parser_log],
help="freeze the model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_frz.add_argument(
"-c",
"--checkpoint-folder",
type=str,
default=".",
help="path to checkpoint folder",
)
parser_frz.add_argument(
"-o",
"--output",
type=str,
default="frozen_model.pb",
help="name of graph, will output to the checkpoint folder",
)
parser_frz.add_argument(
"-n",
"--node-names",
type=str,
default=None,
help="the frozen nodes, if not set, determined from the model type",
)
# * test script ********************************************************************
parser_tst = subparsers.add_parser(
"test",
parents=[parser_log],
help="test the model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_tst.add_argument(
"-m",
"--model",
default="frozen_model.pb",
type=str,
help="Frozen model file to import",
)
parser_tst.add_argument(
"-s",
"--system",
default=".",
type=str,
help="The system dir. Recursively detect systems in this directory",
)
parser_tst.add_argument(
"-S", "--set-prefix", default="set", type=str, help="The set prefix"
)
parser_tst.add_argument(
"-n", "--numb-test", default=100, type=int, help="The number of data for test"
)
parser_tst.add_argument(
"-r", "--rand-seed", type=int, default=None, help="The random seed"
)
parser_tst.add_argument(
"--shuffle-test", action="store_true", default=False, help="Shuffle test data"
)
parser_tst.add_argument(
"-d",
"--detail-file",
type=str,
default=None,
help="File where details of energy force and virial accuracy will be written",
)
parser_tst.add_argument(
"-a",
"--atomic",
action="store_true",
default=False,
help="Test the accuracy of atomic label, i.e. energy / tensor (dipole, polar)",
)
# * compress model *****************************************************************
# Compress a model, which including tabulating the embedding-net.
# The table is composed of fifth-order polynomial coefficients and is assembled
# from two sub-tables. The first table takes the step(parameter) as it's uniform
# step, while the second table takes 10 * step as it\s uniform step
# The range of the first table is automatically detected by deepmd-kit, while the
# second table ranges from the first table's upper boundary(upper) to the
# extrapolate(parameter) * upper.
parser_compress = subparsers.add_parser(
"compress",
parents=[parser_log, parser_mpi_log],
help="compress a model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_compress.add_argument(
"-i",
"--input",
default="frozen_model.pb",
type=str,
help="The original frozen model, which will be compressed by the code",
)
parser_compress.add_argument(
"-o",
"--output",
default="frozen_model_compressed.pb",
type=str,
help="The compressed model",
)
parser_compress.add_argument(
"-s",
"--step",
default=0.01,
type=float,
help="Model compression uses fifth-order polynomials to interpolate the embedding-net. "
"It introduces two tables with different step size to store the parameters of the polynomials. "
"The first table covers the range of the training data, while the second table is an extrapolation of the training data. "
"The domain of each table is uniformly divided by a given step size. "
"And the step(parameter) denotes the step size of the first table and the second table will "
"use 10 * step as it's step size to save the memory. "
"Usually the value ranges from 0.1 to 0.001. "
"Smaller step means higher accuracy and bigger model size",
)
parser_compress.add_argument(
"-e",
"--extrapolate",
default=5,
type=int,
help="The domain range of the first table is automatically detected by the code: [d_low, d_up]. "
"While the second table ranges from the first table's upper boundary(d_up) to the extrapolate(parameter) * d_up: [d_up, extrapolate * d_up]",
)
parser_compress.add_argument(
"-f",
"--frequency",
default=-1,
type=int,
help="The frequency of tabulation overflow check(Whether the input environment "
"matrix overflow the first or second table range). "
"By default do not check the overflow",
)
parser_compress.add_argument(
"-c",
"--checkpoint-folder",
type=str,
default="model-compression",
help="path to checkpoint folder",
)
parser_compress.add_argument(
"-t",
"--training-script",
type=str,
default=None,
help="The training script of the input frozen model",
)
# * print docs script **************************************************************
parsers_doc = subparsers.add_parser(
"doc-train-input",
parents=[parser_log],
help="print the documentation (in rst format) of input training parameters.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parsers_doc.add_argument(
"--out-type",
default="rst",
type=str,
help="The output type"
)
# * make model deviation ***********************************************************
parser_model_devi = subparsers.add_parser(
"model-devi",
parents=[parser_log],
help="calculate model deviation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser_model_devi.add_argument(
"-m",
"--models",
default=["graph.000.pb", "graph.001.pb", "graph.002.pb", "graph.003.pb"],
nargs="+",
type=str,
help="Frozen models file to import",
)
parser_model_devi.add_argument(
"-s",
"--system",
default=".",
type=str,
help="The system directory, not support recursive detection.",
)
parser_model_devi.add_argument(
"-S", "--set-prefix", default="set", type=str, help="The set prefix"
)
parser_model_devi.add_argument(
"-o",
"--output",
default="model_devi.out",
type=str,
help="The output file for results of model deviation"
)
parser_model_devi.add_argument(
"-f",
"--frequency",
default=1,
type=int,
help="The trajectory frequency of the system"
)
# * convert models
# supported: 1.2->2.0, 1.3->2.0
parser_transform = subparsers.add_parser(
'convert-from',
parents=[parser_log],
help='convert lower model version to supported version',
)
parser_transform.add_argument(
'FROM',
type = str,
choices = ['1.2', '1.3'],
help="The original model compatibility",
)
parser_transform.add_argument(
'-i',
"--input-model",
default = "frozen_model.pb",
type=str,
help = "the input model",
)
parser_transform.add_argument(
"-o",
"--output-model",
default = "convert_out.pb",
type=str,
help='the output model',
)
# --version
parser.add_argument('--version', action='version', version='DeePMD-kit v%s' % __version__)
parsed_args = parser.parse_args(args=args)
if parsed_args.command is None:
parser.print_help()
else:
parsed_args.log_level = get_ll(parsed_args.log_level)
return parsed_args | [
"def",
"parse_args",
"(",
"args",
":",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
"=",
"None",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"DeePMD-kit: A deep learning package for many-body potential energy\"",
"\" representation and molecular dynamics\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"subparsers",
"=",
"parser",
".",
"add_subparsers",
"(",
"title",
"=",
"\"Valid subcommands\"",
",",
"dest",
"=",
"\"command\"",
")",
"# * logging options parser *********************************************************",
"# with use of the parent argument this options will be added to every parser",
"parser_log",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"parser_log",
".",
"add_argument",
"(",
"\"-v\"",
",",
"\"--log-level\"",
",",
"choices",
"=",
"[",
"\"DEBUG\"",
",",
"\"3\"",
",",
"\"INFO\"",
",",
"\"2\"",
",",
"\"WARNING\"",
",",
"\"1\"",
",",
"\"ERROR\"",
",",
"\"0\"",
"]",
",",
"default",
"=",
"\"INFO\"",
",",
"help",
"=",
"\"set verbosity level by string or number, 0=ERROR, 1=WARNING, 2=INFO \"",
"\"and 3=DEBUG\"",
",",
")",
"parser_log",
".",
"add_argument",
"(",
"\"-l\"",
",",
"\"--log-path\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"set log file to log messages to disk, if not specified, the logs will \"",
"\"only be output to console\"",
",",
")",
"# * mpi logging parser *************************************************************",
"parser_mpi_log",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
")",
"parser_mpi_log",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--mpi-log\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"master\"",
",",
"choices",
"=",
"(",
"\"master\"",
",",
"\"collect\"",
",",
"\"workers\"",
")",
",",
"help",
"=",
"\"Set the manner of logging when running with MPI. 'master' logs only on \"",
"\"main process, 'collect' broadcasts logs from workers to master and 'workers' \"",
"\"means each process will output its own log\"",
",",
")",
"# * config script ******************************************************************",
"parser_cfig",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"config\"",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"\"fast configuration of parameter file for smooth model\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parser_cfig",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"input.json\"",
",",
"help",
"=",
"\"the output json file\"",
")",
"# * transfer script ****************************************************************",
"parser_transfer",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"transfer\"",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"\"pass parameters to another model\"",
")",
"parser_transfer",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--raw-model\"",
",",
"default",
"=",
"\"raw_frozen_model.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the model receiving parameters\"",
",",
")",
"parser_transfer",
".",
"add_argument",
"(",
"\"-O\"",
",",
"\"--old-model\"",
",",
"default",
"=",
"\"old_frozen_model.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the model providing parameters\"",
",",
")",
"parser_transfer",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"default",
"=",
"\"frozen_model.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the model after passing parameters\"",
",",
")",
"# * config parser ******************************************************************",
"parser_train",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"train\"",
",",
"parents",
"=",
"[",
"parser_log",
",",
"parser_mpi_log",
"]",
",",
"help",
"=",
"\"train a model\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parser_train",
".",
"add_argument",
"(",
"\"INPUT\"",
",",
"help",
"=",
"\"the input parameter file in json or yaml format\"",
")",
"parser_train",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--init-model\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Initialize the model by the provided checkpoint.\"",
",",
")",
"parser_train",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--restart\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Restart the training from the provided checkpoint.\"",
",",
")",
"parser_train",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"out.json\"",
",",
"help",
"=",
"\"The output file of the parameters used in training.\"",
",",
")",
"parser_train",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--init-frz-model\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"Initialize the training from the frozen model.\"",
",",
")",
"# * freeze script ******************************************************************",
"parser_frz",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"freeze\"",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"\"freeze the model\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parser_frz",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--checkpoint-folder\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\".\"",
",",
"help",
"=",
"\"path to checkpoint folder\"",
",",
")",
"parser_frz",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"frozen_model.pb\"",
",",
"help",
"=",
"\"name of graph, will output to the checkpoint folder\"",
",",
")",
"parser_frz",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--node-names\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"the frozen nodes, if not set, determined from the model type\"",
",",
")",
"# * test script ********************************************************************",
"parser_tst",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"test\"",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"\"test the model\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--model\"",
",",
"default",
"=",
"\"frozen_model.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Frozen model file to import\"",
",",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--system\"",
",",
"default",
"=",
"\".\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The system dir. Recursively detect systems in this directory\"",
",",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-S\"",
",",
"\"--set-prefix\"",
",",
"default",
"=",
"\"set\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The set prefix\"",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-n\"",
",",
"\"--numb-test\"",
",",
"default",
"=",
"100",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"The number of data for test\"",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-r\"",
",",
"\"--rand-seed\"",
",",
"type",
"=",
"int",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"The random seed\"",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"--shuffle-test\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Shuffle test data\"",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-d\"",
",",
"\"--detail-file\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"File where details of energy force and virial accuracy will be written\"",
",",
")",
"parser_tst",
".",
"add_argument",
"(",
"\"-a\"",
",",
"\"--atomic\"",
",",
"action",
"=",
"\"store_true\"",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Test the accuracy of atomic label, i.e. energy / tensor (dipole, polar)\"",
",",
")",
"# * compress model *****************************************************************",
"# Compress a model, which including tabulating the embedding-net.",
"# The table is composed of fifth-order polynomial coefficients and is assembled",
"# from two sub-tables. The first table takes the step(parameter) as it's uniform",
"# step, while the second table takes 10 * step as it\\s uniform step",
"# The range of the first table is automatically detected by deepmd-kit, while the",
"# second table ranges from the first table's upper boundary(upper) to the",
"# extrapolate(parameter) * upper.",
"parser_compress",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"compress\"",
",",
"parents",
"=",
"[",
"parser_log",
",",
"parser_mpi_log",
"]",
",",
"help",
"=",
"\"compress a model\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-i\"",
",",
"\"--input\"",
",",
"default",
"=",
"\"frozen_model.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The original frozen model, which will be compressed by the code\"",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"default",
"=",
"\"frozen_model_compressed.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The compressed model\"",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--step\"",
",",
"default",
"=",
"0.01",
",",
"type",
"=",
"float",
",",
"help",
"=",
"\"Model compression uses fifth-order polynomials to interpolate the embedding-net. \"",
"\"It introduces two tables with different step size to store the parameters of the polynomials. \"",
"\"The first table covers the range of the training data, while the second table is an extrapolation of the training data. \"",
"\"The domain of each table is uniformly divided by a given step size. \"",
"\"And the step(parameter) denotes the step size of the first table and the second table will \"",
"\"use 10 * step as it's step size to save the memory. \"",
"\"Usually the value ranges from 0.1 to 0.001. \"",
"\"Smaller step means higher accuracy and bigger model size\"",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-e\"",
",",
"\"--extrapolate\"",
",",
"default",
"=",
"5",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"The domain range of the first table is automatically detected by the code: [d_low, d_up]. \"",
"\"While the second table ranges from the first table's upper boundary(d_up) to the extrapolate(parameter) * d_up: [d_up, extrapolate * d_up]\"",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--frequency\"",
",",
"default",
"=",
"-",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"The frequency of tabulation overflow check(Whether the input environment \"",
"\"matrix overflow the first or second table range). \"",
"\"By default do not check the overflow\"",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-c\"",
",",
"\"--checkpoint-folder\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"\"model-compression\"",
",",
"help",
"=",
"\"path to checkpoint folder\"",
",",
")",
"parser_compress",
".",
"add_argument",
"(",
"\"-t\"",
",",
"\"--training-script\"",
",",
"type",
"=",
"str",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"The training script of the input frozen model\"",
",",
")",
"# * print docs script **************************************************************",
"parsers_doc",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"doc-train-input\"",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"\"print the documentation (in rst format) of input training parameters.\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parsers_doc",
".",
"add_argument",
"(",
"\"--out-type\"",
",",
"default",
"=",
"\"rst\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The output type\"",
")",
"# * make model deviation ***********************************************************",
"parser_model_devi",
"=",
"subparsers",
".",
"add_parser",
"(",
"\"model-devi\"",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"\"calculate model deviation\"",
",",
"formatter_class",
"=",
"argparse",
".",
"ArgumentDefaultsHelpFormatter",
",",
")",
"parser_model_devi",
".",
"add_argument",
"(",
"\"-m\"",
",",
"\"--models\"",
",",
"default",
"=",
"[",
"\"graph.000.pb\"",
",",
"\"graph.001.pb\"",
",",
"\"graph.002.pb\"",
",",
"\"graph.003.pb\"",
"]",
",",
"nargs",
"=",
"\"+\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"Frozen models file to import\"",
",",
")",
"parser_model_devi",
".",
"add_argument",
"(",
"\"-s\"",
",",
"\"--system\"",
",",
"default",
"=",
"\".\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The system directory, not support recursive detection.\"",
",",
")",
"parser_model_devi",
".",
"add_argument",
"(",
"\"-S\"",
",",
"\"--set-prefix\"",
",",
"default",
"=",
"\"set\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The set prefix\"",
")",
"parser_model_devi",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output\"",
",",
"default",
"=",
"\"model_devi.out\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"The output file for results of model deviation\"",
")",
"parser_model_devi",
".",
"add_argument",
"(",
"\"-f\"",
",",
"\"--frequency\"",
",",
"default",
"=",
"1",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"The trajectory frequency of the system\"",
")",
"# * convert models",
"# supported: 1.2->2.0, 1.3->2.0",
"parser_transform",
"=",
"subparsers",
".",
"add_parser",
"(",
"'convert-from'",
",",
"parents",
"=",
"[",
"parser_log",
"]",
",",
"help",
"=",
"'convert lower model version to supported version'",
",",
")",
"parser_transform",
".",
"add_argument",
"(",
"'FROM'",
",",
"type",
"=",
"str",
",",
"choices",
"=",
"[",
"'1.2'",
",",
"'1.3'",
"]",
",",
"help",
"=",
"\"The original model compatibility\"",
",",
")",
"parser_transform",
".",
"add_argument",
"(",
"'-i'",
",",
"\"--input-model\"",
",",
"default",
"=",
"\"frozen_model.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"\"the input model\"",
",",
")",
"parser_transform",
".",
"add_argument",
"(",
"\"-o\"",
",",
"\"--output-model\"",
",",
"default",
"=",
"\"convert_out.pb\"",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'the output model'",
",",
")",
"# --version",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"'DeePMD-kit v%s'",
"%",
"__version__",
")",
"parsed_args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"args",
")",
"if",
"parsed_args",
".",
"command",
"is",
"None",
":",
"parser",
".",
"print_help",
"(",
")",
"else",
":",
"parsed_args",
".",
"log_level",
"=",
"get_ll",
"(",
"parsed_args",
".",
"log_level",
")",
"return",
"parsed_args"
] | https://github.com/deepmodeling/deepmd-kit/blob/159e45d248b0429844fb6a8cb3b3a201987c8d79/deepmd/entrypoints/main.py#L46-L415 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | TextAttr.GetTextEffects | (*args, **kwargs) | return _controls_.TextAttr_GetTextEffects(*args, **kwargs) | GetTextEffects(self) -> int | GetTextEffects(self) -> int | [
"GetTextEffects",
"(",
"self",
")",
"-",
">",
"int"
] | def GetTextEffects(*args, **kwargs):
"""GetTextEffects(self) -> int"""
return _controls_.TextAttr_GetTextEffects(*args, **kwargs) | [
"def",
"GetTextEffects",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TextAttr_GetTextEffects",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L1756-L1758 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/setuptools/py2/pkg_resources/_vendor/pyparsing.py | python | ParserElement.copy | ( self ) | return cpy | Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") | Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") | [
"Make",
"a",
"copy",
"of",
"this",
"C",
"{",
"ParserElement",
"}",
".",
"Useful",
"for",
"defining",
"different",
"parse",
"actions",
"for",
"the",
"same",
"parsing",
"pattern",
"using",
"copies",
"of",
"the",
"original",
"parse",
"element",
".",
"Example",
"::",
"integer",
"=",
"Word",
"(",
"nums",
")",
".",
"setParseAction",
"(",
"lambda",
"toks",
":",
"int",
"(",
"toks",
"[",
"0",
"]",
"))",
"integerK",
"=",
"integer",
".",
"copy",
"()",
".",
"addParseAction",
"(",
"lambda",
"toks",
":",
"toks",
"[",
"0",
"]",
"*",
"1024",
")",
"+",
"Suppress",
"(",
"K",
")",
"integerM",
"=",
"integer",
".",
"copy",
"()",
".",
"addParseAction",
"(",
"lambda",
"toks",
":",
"toks",
"[",
"0",
"]",
"*",
"1024",
"*",
"1024",
")",
"+",
"Suppress",
"(",
"M",
")",
"print",
"(",
"OneOrMore",
"(",
"integerK",
"|",
"integerM",
"|",
"integer",
")",
".",
"parseString",
"(",
"5K",
"100",
"640K",
"256M",
"))",
"prints",
"::",
"[",
"5120",
"100",
"655360",
"268435456",
"]",
"Equivalent",
"form",
"of",
"C",
"{",
"expr",
".",
"copy",
"()",
"}",
"is",
"just",
"C",
"{",
"expr",
"()",
"}",
"::",
"integerM",
"=",
"integer",
"()",
".",
"addParseAction",
"(",
"lambda",
"toks",
":",
"toks",
"[",
"0",
"]",
"*",
"1024",
"*",
"1024",
")",
"+",
"Suppress",
"(",
"M",
")"
] | def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy | [
"def",
"copy",
"(",
"self",
")",
":",
"cpy",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"cpy",
".",
"parseAction",
"=",
"self",
".",
"parseAction",
"[",
":",
"]",
"cpy",
".",
"ignoreExprs",
"=",
"self",
".",
"ignoreExprs",
"[",
":",
"]",
"if",
"self",
".",
"copyDefaultWhiteChars",
":",
"cpy",
".",
"whiteChars",
"=",
"ParserElement",
".",
"DEFAULT_WHITE_CHARS",
"return",
"cpy"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/pkg_resources/_vendor/pyparsing.py#L1167-L1188 |
|
manticoresoftware/manticoresearch | f675d16267543d934ce84074f087d13496ec462c | api/sphinxapi.py | python | SphinxClient.SetFieldWeights | (self, weights) | Bind per-field weights by name; expects (name,field_weight) dictionary as argument. | Bind per-field weights by name; expects (name,field_weight) dictionary as argument. | [
"Bind",
"per",
"-",
"field",
"weights",
"by",
"name",
";",
"expects",
"(",
"name",
"field_weight",
")",
"dictionary",
"as",
"argument",
"."
] | def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in list(weights.items()):
assert(isinstance(key,str))
AssertUInt32 ( val )
self._fieldweights = weights | [
"def",
"SetFieldWeights",
"(",
"self",
",",
"weights",
")",
":",
"assert",
"(",
"isinstance",
"(",
"weights",
",",
"dict",
")",
")",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"weights",
".",
"items",
"(",
")",
")",
":",
"assert",
"(",
"isinstance",
"(",
"key",
",",
"str",
")",
")",
"AssertUInt32",
"(",
"val",
")",
"self",
".",
"_fieldweights",
"=",
"weights"
] | https://github.com/manticoresoftware/manticoresearch/blob/f675d16267543d934ce84074f087d13496ec462c/api/sphinxapi.py#L394-L402 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/learn/python/learn/estimators/head.py | python | _MultiLabelHead._metrics | (self, eval_loss, predictions, labels, weights) | return metrics | Returns a dict of metrics keyed by name. | Returns a dict of metrics keyed by name. | [
"Returns",
"a",
"dict",
"of",
"metrics",
"keyed",
"by",
"name",
"."
] | def _metrics(self, eval_loss, predictions, labels, weights):
"""Returns a dict of metrics keyed by name."""
with ops.name_scope("metrics", values=(
[eval_loss, labels, weights] + list(six.itervalues(predictions)))):
classes = predictions[prediction_key.PredictionKey.CLASSES]
probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]
logits = predictions[prediction_key.PredictionKey.LOGITS]
metrics = {_summary_key(self.head_name, mkey.LOSS):
metrics_lib.streaming_mean(eval_loss)}
# TODO(b/29366811): This currently results in both an "accuracy" and an
# "accuracy/threshold_0.500000_mean" metric for binary classification.
metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (
metrics_lib.streaming_accuracy(classes, labels, weights))
metrics[_summary_key(self.head_name, mkey.AUC)] = _streaming_auc(
probabilities, labels, weights)
metrics[_summary_key(self.head_name, mkey.AUC_PR)] = _streaming_auc(
probabilities, labels, weights, curve="PR")
for class_id in self._metric_class_ids:
# TODO(ptucker): Add per-class accuracy, precision, recall.
metrics[_summary_key(
self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (
_predictions_streaming_mean(classes, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (
_indicator_labels_streaming_mean(labels, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (
_predictions_streaming_mean(probabilities, weights, class_id))
metrics[_summary_key(
self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (
_predictions_streaming_mean(logits, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id))
metrics[_summary_key(self.head_name, mkey.CLASS_AUC_PR % class_id)] = (
_streaming_auc(probabilities, labels, weights, class_id,
curve="PR"))
return metrics | [
"def",
"_metrics",
"(",
"self",
",",
"eval_loss",
",",
"predictions",
",",
"labels",
",",
"weights",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"\"metrics\"",
",",
"values",
"=",
"(",
"[",
"eval_loss",
",",
"labels",
",",
"weights",
"]",
"+",
"list",
"(",
"six",
".",
"itervalues",
"(",
"predictions",
")",
")",
")",
")",
":",
"classes",
"=",
"predictions",
"[",
"prediction_key",
".",
"PredictionKey",
".",
"CLASSES",
"]",
"probabilities",
"=",
"predictions",
"[",
"prediction_key",
".",
"PredictionKey",
".",
"PROBABILITIES",
"]",
"logits",
"=",
"predictions",
"[",
"prediction_key",
".",
"PredictionKey",
".",
"LOGITS",
"]",
"metrics",
"=",
"{",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"LOSS",
")",
":",
"metrics_lib",
".",
"streaming_mean",
"(",
"eval_loss",
")",
"}",
"# TODO(b/29366811): This currently results in both an \"accuracy\" and an",
"# \"accuracy/threshold_0.500000_mean\" metric for binary classification.",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"ACCURACY",
")",
"]",
"=",
"(",
"metrics_lib",
".",
"streaming_accuracy",
"(",
"classes",
",",
"labels",
",",
"weights",
")",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"AUC",
")",
"]",
"=",
"_streaming_auc",
"(",
"probabilities",
",",
"labels",
",",
"weights",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"AUC_PR",
")",
"]",
"=",
"_streaming_auc",
"(",
"probabilities",
",",
"labels",
",",
"weights",
",",
"curve",
"=",
"\"PR\"",
")",
"for",
"class_id",
"in",
"self",
".",
"_metric_class_ids",
":",
"# TODO(ptucker): Add per-class accuracy, precision, recall.",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"CLASS_PREDICTION_MEAN",
"%",
"class_id",
")",
"]",
"=",
"(",
"_predictions_streaming_mean",
"(",
"classes",
",",
"weights",
",",
"class_id",
")",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"CLASS_LABEL_MEAN",
"%",
"class_id",
")",
"]",
"=",
"(",
"_indicator_labels_streaming_mean",
"(",
"labels",
",",
"weights",
",",
"class_id",
")",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"CLASS_PROBABILITY_MEAN",
"%",
"class_id",
")",
"]",
"=",
"(",
"_predictions_streaming_mean",
"(",
"probabilities",
",",
"weights",
",",
"class_id",
")",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"CLASS_LOGITS_MEAN",
"%",
"class_id",
")",
"]",
"=",
"(",
"_predictions_streaming_mean",
"(",
"logits",
",",
"weights",
",",
"class_id",
")",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"CLASS_AUC",
"%",
"class_id",
")",
"]",
"=",
"(",
"_streaming_auc",
"(",
"probabilities",
",",
"labels",
",",
"weights",
",",
"class_id",
")",
")",
"metrics",
"[",
"_summary_key",
"(",
"self",
".",
"head_name",
",",
"mkey",
".",
"CLASS_AUC_PR",
"%",
"class_id",
")",
"]",
"=",
"(",
"_streaming_auc",
"(",
"probabilities",
",",
"labels",
",",
"weights",
",",
"class_id",
",",
"curve",
"=",
"\"PR\"",
")",
")",
"return",
"metrics"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/estimators/head.py#L1384-L1423 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/util/nest.py | python | flatten_up_to | (shallow_tree, input_tree) | return list(_yield_flat_up_to(shallow_tree, input_tree)) | Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`. | Flattens `input_tree` up to `shallow_tree`. | [
"Flattens",
"input_tree",
"up",
"to",
"shallow_tree",
"."
] | def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree)) | [
"def",
"flatten_up_to",
"(",
"shallow_tree",
",",
"input_tree",
")",
":",
"assert_shallow_structure",
"(",
"shallow_tree",
",",
"input_tree",
")",
"return",
"list",
"(",
"_yield_flat_up_to",
"(",
"shallow_tree",
",",
"input_tree",
")",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/util/nest.py#L460-L530 |
|
JoseExposito/touchegg | 1f3fda214358d071c05da4bf17c070c33d67b5eb | cmake/cpplint.py | python | CheckEmptyBlockBody | (filename, clean_lines, linenum, error) | Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found. | Look for empty loop/conditional body with only a single semicolon. | [
"Look",
"for",
"empty",
"loop",
"/",
"conditional",
"body",
"with",
"only",
"a",
"single",
"semicolon",
"."
] | def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
body = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(body)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause')) | [
"def",
"CheckEmptyBlockBody",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
":",
"# Search for loop keywords at the beginning of the line. Because only",
"# whitespaces are allowed before the keywords, this will also ignore most",
"# do-while-loops, since those lines should start with closing brace.",
"#",
"# We also check \"if\" blocks here, since an empty conditional block",
"# is likely an error.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"matched",
"=",
"Match",
"(",
"r'\\s*(for|while|if)\\s*\\('",
",",
"line",
")",
"if",
"matched",
":",
"# Find the end of the conditional expression.",
"(",
"end_line",
",",
"end_linenum",
",",
"end_pos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"line",
".",
"find",
"(",
"'('",
")",
")",
"# Output warning if what follows the condition expression is a semicolon.",
"# No warning for all other cases, including whitespace or newline, since we",
"# have a separate check for semicolons preceded by whitespace.",
"if",
"end_pos",
">=",
"0",
"and",
"Match",
"(",
"r';'",
",",
"end_line",
"[",
"end_pos",
":",
"]",
")",
":",
"if",
"matched",
".",
"group",
"(",
"1",
")",
"==",
"'if'",
":",
"error",
"(",
"filename",
",",
"end_linenum",
",",
"'whitespace/empty_conditional_body'",
",",
"5",
",",
"'Empty conditional bodies should use {}'",
")",
"else",
":",
"error",
"(",
"filename",
",",
"end_linenum",
",",
"'whitespace/empty_loop_body'",
",",
"5",
",",
"'Empty loop bodies should use {} or continue'",
")",
"# Check for if statements that have completely empty bodies (no comments)",
"# and no else clauses.",
"if",
"end_pos",
">=",
"0",
"and",
"matched",
".",
"group",
"(",
"1",
")",
"==",
"'if'",
":",
"# Find the position of the opening { for the if statement.",
"# Return without logging an error if it has no brackets.",
"opening_linenum",
"=",
"end_linenum",
"opening_line_fragment",
"=",
"end_line",
"[",
"end_pos",
":",
"]",
"# Loop until EOF or find anything that's not whitespace or opening {.",
"while",
"not",
"Search",
"(",
"r'^\\s*\\{'",
",",
"opening_line_fragment",
")",
":",
"if",
"Search",
"(",
"r'^(?!\\s*$)'",
",",
"opening_line_fragment",
")",
":",
"# Conditional has no brackets.",
"return",
"opening_linenum",
"+=",
"1",
"if",
"opening_linenum",
"==",
"len",
"(",
"clean_lines",
".",
"elided",
")",
":",
"# Couldn't find conditional's opening { or any code before EOF.",
"return",
"opening_line_fragment",
"=",
"clean_lines",
".",
"elided",
"[",
"opening_linenum",
"]",
"# Set opening_line (opening_line_fragment may not be entire opening line).",
"opening_line",
"=",
"clean_lines",
".",
"elided",
"[",
"opening_linenum",
"]",
"# Find the position of the closing }.",
"opening_pos",
"=",
"opening_line_fragment",
".",
"find",
"(",
"'{'",
")",
"if",
"opening_linenum",
"==",
"end_linenum",
":",
"# We need to make opening_pos relative to the start of the entire line.",
"opening_pos",
"+=",
"end_pos",
"(",
"closing_line",
",",
"closing_linenum",
",",
"closing_pos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"opening_linenum",
",",
"opening_pos",
")",
"if",
"closing_pos",
"<",
"0",
":",
"return",
"# Now construct the body of the conditional. This consists of the portion",
"# of the opening line after the {, all lines until the closing line,",
"# and the portion of the closing line before the }.",
"if",
"(",
"clean_lines",
".",
"raw_lines",
"[",
"opening_linenum",
"]",
"!=",
"CleanseComments",
"(",
"clean_lines",
".",
"raw_lines",
"[",
"opening_linenum",
"]",
")",
")",
":",
"# Opening line ends with a comment, so conditional isn't empty.",
"return",
"if",
"closing_linenum",
">",
"opening_linenum",
":",
"# Opening line after the {. Ignore comments here since we checked above.",
"body",
"=",
"list",
"(",
"opening_line",
"[",
"opening_pos",
"+",
"1",
":",
"]",
")",
"# All lines until closing line, excluding closing line, with comments.",
"body",
".",
"extend",
"(",
"clean_lines",
".",
"raw_lines",
"[",
"opening_linenum",
"+",
"1",
":",
"closing_linenum",
"]",
")",
"# Closing line before the }. Won't (and can't) have comments.",
"body",
".",
"append",
"(",
"clean_lines",
".",
"elided",
"[",
"closing_linenum",
"]",
"[",
":",
"closing_pos",
"-",
"1",
"]",
")",
"body",
"=",
"'\\n'",
".",
"join",
"(",
"body",
")",
"else",
":",
"# If statement has brackets and fits on a single line.",
"body",
"=",
"opening_line",
"[",
"opening_pos",
"+",
"1",
":",
"closing_pos",
"-",
"1",
"]",
"# Check if the body is empty",
"if",
"not",
"_EMPTY_CONDITIONAL_BODY_PATTERN",
".",
"search",
"(",
"body",
")",
":",
"return",
"# The body is empty. Now make sure there's not an else clause.",
"current_linenum",
"=",
"closing_linenum",
"current_line_fragment",
"=",
"closing_line",
"[",
"closing_pos",
":",
"]",
"# Loop until EOF or find anything that's not whitespace or else clause.",
"while",
"Search",
"(",
"r'^\\s*$|^(?=\\s*else)'",
",",
"current_line_fragment",
")",
":",
"if",
"Search",
"(",
"r'^(?=\\s*else)'",
",",
"current_line_fragment",
")",
":",
"# Found an else clause, so don't log an error.",
"return",
"current_linenum",
"+=",
"1",
"if",
"current_linenum",
"==",
"len",
"(",
"clean_lines",
".",
"elided",
")",
":",
"break",
"current_line_fragment",
"=",
"clean_lines",
".",
"elided",
"[",
"current_linenum",
"]",
"# The body is empty and there's no else clause until EOF or other code.",
"error",
"(",
"filename",
",",
"end_linenum",
",",
"'whitespace/empty_if_body'",
",",
"4",
",",
"(",
"'If statement had no body and no else clause'",
")",
")"
] | https://github.com/JoseExposito/touchegg/blob/1f3fda214358d071c05da4bf17c070c33d67b5eb/cmake/cpplint.py#L4002-L4103 |
||
BitMEX/api-connectors | 37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812 | auto-generated/python/swagger_client/models/user_event.py | python | UserEvent.geoip_sub_region | (self) | return self._geoip_sub_region | Gets the geoip_sub_region of this UserEvent. # noqa: E501
:return: The geoip_sub_region of this UserEvent. # noqa: E501
:rtype: str | Gets the geoip_sub_region of this UserEvent. # noqa: E501 | [
"Gets",
"the",
"geoip_sub_region",
"of",
"this",
"UserEvent",
".",
"#",
"noqa",
":",
"E501"
] | def geoip_sub_region(self):
"""Gets the geoip_sub_region of this UserEvent. # noqa: E501
:return: The geoip_sub_region of this UserEvent. # noqa: E501
:rtype: str
"""
return self._geoip_sub_region | [
"def",
"geoip_sub_region",
"(",
"self",
")",
":",
"return",
"self",
".",
"_geoip_sub_region"
] | https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/user_event.py#L288-L295 |
|
naver/sling | 5671cd445a2caae0b4dd0332299e4cfede05062c | webkit/Tools/Scripts/webkitpy/style/filereader.py | python | TextFileReader.__init__ | (self, filesystem, processor) | Create an instance.
Arguments:
processor: A ProcessorBase instance. | Create an instance. | [
"Create",
"an",
"instance",
"."
] | def __init__(self, filesystem, processor):
"""Create an instance.
Arguments:
processor: A ProcessorBase instance.
"""
self.filesystem = filesystem
self._processor = processor
self.file_count = 0
self.delete_only_file_count = 0 | [
"def",
"__init__",
"(",
"self",
",",
"filesystem",
",",
"processor",
")",
":",
"self",
".",
"filesystem",
"=",
"filesystem",
"self",
".",
"_processor",
"=",
"processor",
"self",
".",
"file_count",
"=",
"0",
"self",
".",
"delete_only_file_count",
"=",
"0"
] | https://github.com/naver/sling/blob/5671cd445a2caae0b4dd0332299e4cfede05062c/webkit/Tools/Scripts/webkitpy/style/filereader.py#L55-L66 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/pubsub/utils/topictreeprinter.py | python | TopicTreePrinter.__init__ | (self, extra=None, width=70, indentStep=4,
bulletTopic='\\--', bulletTopicItem='|==', bulletTopicArg='-', fileObj=None) | Topic tree printer will print listeners for each topic only
if printListeners is True. The width will be used to limit
the width of text output, while indentStep is the number of
spaces added each time the text is indented further. The
three bullet parameters define the strings used for each
item (topic, topic items, and kwargs). | Topic tree printer will print listeners for each topic only
if printListeners is True. The width will be used to limit
the width of text output, while indentStep is the number of
spaces added each time the text is indented further. The
three bullet parameters define the strings used for each
item (topic, topic items, and kwargs). | [
"Topic",
"tree",
"printer",
"will",
"print",
"listeners",
"for",
"each",
"topic",
"only",
"if",
"printListeners",
"is",
"True",
".",
"The",
"width",
"will",
"be",
"used",
"to",
"limit",
"the",
"width",
"of",
"text",
"output",
"while",
"indentStep",
"is",
"the",
"number",
"of",
"spaces",
"added",
"each",
"time",
"the",
"text",
"is",
"indented",
"further",
".",
"The",
"three",
"bullet",
"parameters",
"define",
"the",
"strings",
"used",
"for",
"each",
"item",
"(",
"topic",
"topic",
"items",
"and",
"kwargs",
")",
"."
] | def __init__(self, extra=None, width=70, indentStep=4,
bulletTopic='\\--', bulletTopicItem='|==', bulletTopicArg='-', fileObj=None):
"""Topic tree printer will print listeners for each topic only
if printListeners is True. The width will be used to limit
the width of text output, while indentStep is the number of
spaces added each time the text is indented further. The
three bullet parameters define the strings used for each
item (topic, topic items, and kwargs). """
self.__contentMeth = dict(
D = self.__printTopicDescription,
A = self.__printTopicArgsAll,
a = self.__printTopicArgNames,
L = self.__printTopicListeners)
assert self.allowedExtras == set(self.__contentMeth.keys())
import sys
self.__destination = fileObj or sys.stdout
self.__output = []
self.__content = extra or ''
unknownSel = set(self.__content) - self.allowedExtras
if unknownSel:
msg = 'These extra chars not known: %s' % ','.join(unknownSel)
raise ValueError(msg)
self.__width = width
self.__wrapper = TextWrapper(width)
self.__indent = 0
self.__indentStep = indentStep
self.__topicsBullet = bulletTopic
self.__topicItemsBullet = bulletTopicItem
self.__topicArgsBullet = bulletTopicArg | [
"def",
"__init__",
"(",
"self",
",",
"extra",
"=",
"None",
",",
"width",
"=",
"70",
",",
"indentStep",
"=",
"4",
",",
"bulletTopic",
"=",
"'\\\\--'",
",",
"bulletTopicItem",
"=",
"'|=='",
",",
"bulletTopicArg",
"=",
"'-'",
",",
"fileObj",
"=",
"None",
")",
":",
"self",
".",
"__contentMeth",
"=",
"dict",
"(",
"D",
"=",
"self",
".",
"__printTopicDescription",
",",
"A",
"=",
"self",
".",
"__printTopicArgsAll",
",",
"a",
"=",
"self",
".",
"__printTopicArgNames",
",",
"L",
"=",
"self",
".",
"__printTopicListeners",
")",
"assert",
"self",
".",
"allowedExtras",
"==",
"set",
"(",
"self",
".",
"__contentMeth",
".",
"keys",
"(",
")",
")",
"import",
"sys",
"self",
".",
"__destination",
"=",
"fileObj",
"or",
"sys",
".",
"stdout",
"self",
".",
"__output",
"=",
"[",
"]",
"self",
".",
"__content",
"=",
"extra",
"or",
"''",
"unknownSel",
"=",
"set",
"(",
"self",
".",
"__content",
")",
"-",
"self",
".",
"allowedExtras",
"if",
"unknownSel",
":",
"msg",
"=",
"'These extra chars not known: %s'",
"%",
"','",
".",
"join",
"(",
"unknownSel",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"self",
".",
"__width",
"=",
"width",
"self",
".",
"__wrapper",
"=",
"TextWrapper",
"(",
"width",
")",
"self",
".",
"__indent",
"=",
"0",
"self",
".",
"__indentStep",
"=",
"indentStep",
"self",
".",
"__topicsBullet",
"=",
"bulletTopic",
"self",
".",
"__topicItemsBullet",
"=",
"bulletTopicItem",
"self",
".",
"__topicArgsBullet",
"=",
"bulletTopicArg"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/pubsub/utils/topictreeprinter.py#L46-L76 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/propgrid.py | python | PyColourProperty.__init__ | (self, *args, **kwargs) | __init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
Colour value=*wxWHITE) -> PyColourProperty | __init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
Colour value=*wxWHITE) -> PyColourProperty | [
"__init__",
"(",
"self",
"String",
"label",
"=",
"(",
"*",
"wxPGProperty",
"::",
"sm_wxPG_LABEL",
")",
"String",
"name",
"=",
"(",
"*",
"wxPGProperty",
"::",
"sm_wxPG_LABEL",
")",
"Colour",
"value",
"=",
"*",
"wxWHITE",
")",
"-",
">",
"PyColourProperty"
] | def __init__(self, *args, **kwargs):
"""
__init__(self, String label=(*wxPGProperty::sm_wxPG_LABEL), String name=(*wxPGProperty::sm_wxPG_LABEL),
Colour value=*wxWHITE) -> PyColourProperty
"""
_propgrid.PyColourProperty_swiginit(self,_propgrid.new_PyColourProperty(*args, **kwargs))
self._SetSelf(self); self._RegisterMethods() | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"_propgrid",
".",
"PyColourProperty_swiginit",
"(",
"self",
",",
"_propgrid",
".",
"new_PyColourProperty",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"self",
".",
"_SetSelf",
"(",
"self",
")",
"self",
".",
"_RegisterMethods",
"(",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/propgrid.py#L4295-L4301 |
||
trailofbits/llvm-sanitizer-tutorial | d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99 | llvm/projects/compiler-rt/lib/sanitizer_common/scripts/cpplint.py | python | _FunctionState.Count | (self) | Count line in current function body. | Count line in current function body. | [
"Count",
"line",
"in",
"current",
"function",
"body",
"."
] | def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1 | [
"def",
"Count",
"(",
"self",
")",
":",
"if",
"self",
".",
"in_a_function",
":",
"self",
".",
"lines_in_function",
"+=",
"1"
] | https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/projects/compiler-rt/lib/sanitizer_common/scripts/cpplint.py#L695-L698 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/utils.py | python | format_datetime | (dt, usegmt=False) | return _format_timetuple_and_zone(now, zone) | Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True, dt must be an aware datetime with an offset of zero. In
this case 'GMT' will be rendered instead of the normal +0000 required by
RFC2822. This is to support HTTP headers involving date stamps. | Turn a datetime into a date string as specified in RFC 2822. | [
"Turn",
"a",
"datetime",
"into",
"a",
"date",
"string",
"as",
"specified",
"in",
"RFC",
"2822",
"."
] | def format_datetime(dt, usegmt=False):
"""Turn a datetime into a date string as specified in RFC 2822.
If usegmt is True, dt must be an aware datetime with an offset of zero. In
this case 'GMT' will be rendered instead of the normal +0000 required by
RFC2822. This is to support HTTP headers involving date stamps.
"""
now = dt.timetuple()
if usegmt:
if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
raise ValueError("usegmt option requires a UTC datetime")
zone = 'GMT'
elif dt.tzinfo is None:
zone = '-0000'
else:
zone = dt.strftime("%z")
return _format_timetuple_and_zone(now, zone) | [
"def",
"format_datetime",
"(",
"dt",
",",
"usegmt",
"=",
"False",
")",
":",
"now",
"=",
"dt",
".",
"timetuple",
"(",
")",
"if",
"usegmt",
":",
"if",
"dt",
".",
"tzinfo",
"is",
"None",
"or",
"dt",
".",
"tzinfo",
"!=",
"datetime",
".",
"timezone",
".",
"utc",
":",
"raise",
"ValueError",
"(",
"\"usegmt option requires a UTC datetime\"",
")",
"zone",
"=",
"'GMT'",
"elif",
"dt",
".",
"tzinfo",
"is",
"None",
":",
"zone",
"=",
"'-0000'",
"else",
":",
"zone",
"=",
"dt",
".",
"strftime",
"(",
"\"%z\"",
")",
"return",
"_format_timetuple_and_zone",
"(",
"now",
",",
"zone",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/utils.py#L155-L171 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/aui.py | python | AuiMDIParentFrame.ActivateNext | (*args, **kwargs) | return _aui.AuiMDIParentFrame_ActivateNext(*args, **kwargs) | ActivateNext(self) | ActivateNext(self) | [
"ActivateNext",
"(",
"self",
")"
] | def ActivateNext(*args, **kwargs):
"""ActivateNext(self)"""
return _aui.AuiMDIParentFrame_ActivateNext(*args, **kwargs) | [
"def",
"ActivateNext",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiMDIParentFrame_ActivateNext",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/aui.py#L1477-L1479 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_core.py | python | PyApp.IsDisplayAvailable | (*args, **kwargs) | return _core_.PyApp_IsDisplayAvailable(*args, **kwargs) | IsDisplayAvailable() -> bool
Tests if it is possible to create a GUI in the current environment.
This will mean different things on the different platforms.
* On X Windows systems this function will return ``False`` if it is
not able to open a connection to the X server, which can happen
if $DISPLAY is not set, or is not set correctly.
* On Mac OS X a ``False`` return value will mean that wx is not
able to access the window manager, which can happen if logged in
remotely or if running from the normal version of python instead
of the framework version, (i.e., pythonw.)
* On MS Windows... | IsDisplayAvailable() -> bool | [
"IsDisplayAvailable",
"()",
"-",
">",
"bool"
] | def IsDisplayAvailable(*args, **kwargs):
"""
IsDisplayAvailable() -> bool
Tests if it is possible to create a GUI in the current environment.
This will mean different things on the different platforms.
* On X Windows systems this function will return ``False`` if it is
not able to open a connection to the X server, which can happen
if $DISPLAY is not set, or is not set correctly.
* On Mac OS X a ``False`` return value will mean that wx is not
able to access the window manager, which can happen if logged in
remotely or if running from the normal version of python instead
of the framework version, (i.e., pythonw.)
* On MS Windows...
"""
return _core_.PyApp_IsDisplayAvailable(*args, **kwargs) | [
"def",
"IsDisplayAvailable",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"PyApp_IsDisplayAvailable",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L8218-L8237 |
|
pytorch/pytorch | 7176c92687d3cc847cc046bf002269c6949a21c2 | torch/cuda/__init__.py | python | device_count | () | r"""Returns the number of GPUs available. | r"""Returns the number of GPUs available. | [
"r",
"Returns",
"the",
"number",
"of",
"GPUs",
"available",
"."
] | def device_count() -> int:
r"""Returns the number of GPUs available."""
if is_available():
return torch._C._cuda_getDeviceCount()
else:
return 0 | [
"def",
"device_count",
"(",
")",
"->",
"int",
":",
"if",
"is_available",
"(",
")",
":",
"return",
"torch",
".",
"_C",
".",
"_cuda_getDeviceCount",
"(",
")",
"else",
":",
"return",
"0"
] | https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/cuda/__init__.py#L453-L458 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/idlelib/configHandler.py | python | IdleUserConfParser.RemoveFile | (self) | Removes the user config file from disk if it exists. | Removes the user config file from disk if it exists. | [
"Removes",
"the",
"user",
"config",
"file",
"from",
"disk",
"if",
"it",
"exists",
"."
] | def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file) | [
"def",
"RemoveFile",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"file",
")",
":",
"os",
".",
"remove",
"(",
"self",
".",
"file",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/idlelib/configHandler.py#L127-L132 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/utils/_scipy_sparse_lsqr_backport.py | python | _sym_ortho | (a, b) | return c, s, r | Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf | Stable implementation of Givens rotation. | [
"Stable",
"implementation",
"of",
"Givens",
"rotation",
"."
] | def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r | [
"def",
"_sym_ortho",
"(",
"a",
",",
"b",
")",
":",
"if",
"b",
"==",
"0",
":",
"return",
"np",
".",
"sign",
"(",
"a",
")",
",",
"0",
",",
"abs",
"(",
"a",
")",
"elif",
"a",
"==",
"0",
":",
"return",
"0",
",",
"np",
".",
"sign",
"(",
"b",
")",
",",
"abs",
"(",
"b",
")",
"elif",
"abs",
"(",
"b",
")",
">",
"abs",
"(",
"a",
")",
":",
"tau",
"=",
"a",
"/",
"b",
"s",
"=",
"np",
".",
"sign",
"(",
"b",
")",
"/",
"sqrt",
"(",
"1",
"+",
"tau",
"*",
"tau",
")",
"c",
"=",
"s",
"*",
"tau",
"r",
"=",
"b",
"/",
"s",
"else",
":",
"tau",
"=",
"b",
"/",
"a",
"c",
"=",
"np",
".",
"sign",
"(",
"a",
")",
"/",
"sqrt",
"(",
"1",
"+",
"tau",
"*",
"tau",
")",
"s",
"=",
"c",
"*",
"tau",
"r",
"=",
"a",
"/",
"c",
"return",
"c",
",",
"s",
",",
"r"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/utils/_scipy_sparse_lsqr_backport.py#L63-L95 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_misc.py | python | SystemOptions.SetOptionInt | (*args, **kwargs) | return _misc_.SystemOptions_SetOptionInt(*args, **kwargs) | SetOptionInt(String name, int value) | SetOptionInt(String name, int value) | [
"SetOptionInt",
"(",
"String",
"name",
"int",
"value",
")"
] | def SetOptionInt(*args, **kwargs):
"""SetOptionInt(String name, int value)"""
return _misc_.SystemOptions_SetOptionInt(*args, **kwargs) | [
"def",
"SetOptionInt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_misc_",
".",
"SystemOptions_SetOptionInt",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L226-L228 |
|
apache/incubator-mxnet | f03fb23f1d103fec9541b5ae59ee06b1734a51d9 | python/mxnet/ndarray/numpy_extension/control_flow.py | python | _flatten | (args, inout_str) | return flat, fmts | Parse the arguments into a flattened list + an additional format array.
The format array stores the structure of the original arguments to help reconstruct the inputs.
Parameters
----------
args : NDArray, Symbol, or (nested) list of Symbol or NDArray
We allow None inside the args.
inout_str : str
The name of the HybridBlock
Returns
-------
flat : list of Symbol or NDArray
The flatten version of the input args.
fmts : (nested) list of ints
Stores the format information of the original structured args. | Parse the arguments into a flattened list + an additional format array.
The format array stores the structure of the original arguments to help reconstruct the inputs. | [
"Parse",
"the",
"arguments",
"into",
"a",
"flattened",
"list",
"+",
"an",
"additional",
"format",
"array",
".",
"The",
"format",
"array",
"stores",
"the",
"structure",
"of",
"the",
"original",
"arguments",
"to",
"help",
"reconstruct",
"the",
"inputs",
"."
] | def _flatten(args, inout_str):
"""Parse the arguments into a flattened list + an additional format array.
The format array stores the structure of the original arguments to help reconstruct the inputs.
Parameters
----------
args : NDArray, Symbol, or (nested) list of Symbol or NDArray
We allow None inside the args.
inout_str : str
The name of the HybridBlock
Returns
-------
flat : list of Symbol or NDArray
The flatten version of the input args.
fmts : (nested) list of ints
Stores the format information of the original structured args.
"""
if isinstance(args, np_ndarray):
return [args], int(0)
if isinstance(args, Symbol):
length = len(args.list_outputs())
length = length if length > 1 else 0
return [args], int(length)
if args is None:
return [None], int(-1)
if not isinstance(args, (list, tuple)):
raise ValueError("When hybridized, the input of HybridBlock {}"
" must be (nested) list of Symbol"
" or NDArray, "
"but got {} of type {}".format(inout_str, str(args), str(type(args))))
flat = []
fmts = []
for i in args:
arg, fmt = _flatten(i, inout_str)
flat.extend(arg)
fmts.append(fmt)
return flat, fmts | [
"def",
"_flatten",
"(",
"args",
",",
"inout_str",
")",
":",
"if",
"isinstance",
"(",
"args",
",",
"np_ndarray",
")",
":",
"return",
"[",
"args",
"]",
",",
"int",
"(",
"0",
")",
"if",
"isinstance",
"(",
"args",
",",
"Symbol",
")",
":",
"length",
"=",
"len",
"(",
"args",
".",
"list_outputs",
"(",
")",
")",
"length",
"=",
"length",
"if",
"length",
">",
"1",
"else",
"0",
"return",
"[",
"args",
"]",
",",
"int",
"(",
"length",
")",
"if",
"args",
"is",
"None",
":",
"return",
"[",
"None",
"]",
",",
"int",
"(",
"-",
"1",
")",
"if",
"not",
"isinstance",
"(",
"args",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"When hybridized, the input of HybridBlock {}\"",
"\" must be (nested) list of Symbol\"",
"\" or NDArray, \"",
"\"but got {} of type {}\"",
".",
"format",
"(",
"inout_str",
",",
"str",
"(",
"args",
")",
",",
"str",
"(",
"type",
"(",
"args",
")",
")",
")",
")",
"flat",
"=",
"[",
"]",
"fmts",
"=",
"[",
"]",
"for",
"i",
"in",
"args",
":",
"arg",
",",
"fmt",
"=",
"_flatten",
"(",
"i",
",",
"inout_str",
")",
"flat",
".",
"extend",
"(",
"arg",
")",
"fmts",
".",
"append",
"(",
"fmt",
")",
"return",
"flat",
",",
"fmts"
] | https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/ndarray/numpy_extension/control_flow.py#L33-L71 |
|
kristjankorjus/Replicating-DeepMind | 68539394e792b34a4d6b430a2eb73b8b8f91d8db | sandbox/example3.py | python | OutputLayer.errors | (self, y) | return np.abs(T.mean(self.output-y)) | return the error made in predicting the output value
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label | return the error made in predicting the output value
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label | [
"return",
"the",
"error",
"made",
"in",
"predicting",
"the",
"output",
"value",
":",
"type",
"y",
":",
"theano",
".",
"tensor",
".",
"TensorType",
":",
"param",
"y",
":",
"corresponds",
"to",
"a",
"vector",
"that",
"gives",
"for",
"each",
"example",
"the",
"correct",
"label"
] | def errors(self, y):
""" return the error made in predicting the output value
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of output
if y.ndim != self.output.ndim:
raise TypeError('y should have the same shape as self.output', ('y', y.type, 'output', self.output.type))
return np.abs(T.mean(self.output-y)) | [
"def",
"errors",
"(",
"self",
",",
"y",
")",
":",
"# check if y has same dimension of output",
"if",
"y",
".",
"ndim",
"!=",
"self",
".",
"output",
".",
"ndim",
":",
"raise",
"TypeError",
"(",
"'y should have the same shape as self.output'",
",",
"(",
"'y'",
",",
"y",
".",
"type",
",",
"'output'",
",",
"self",
".",
"output",
".",
"type",
")",
")",
"return",
"np",
".",
"abs",
"(",
"T",
".",
"mean",
"(",
"self",
".",
"output",
"-",
"y",
")",
")"
] | https://github.com/kristjankorjus/Replicating-DeepMind/blob/68539394e792b34a4d6b430a2eb73b8b8f91d8db/sandbox/example3.py#L75-L86 |
|
JumpingYang001/webrtc | c03d6e965e1f54aeadd670e491eabe5fdb8db968 | tools_webrtc/network_emulator/network_emulator.py | python | Cleanup | () | Stops the network emulation by flushing all Dummynet rules.
Notice that this will flush any rules that may have been created previously
before starting the emulation. | Stops the network emulation by flushing all Dummynet rules. | [
"Stops",
"the",
"network",
"emulation",
"by",
"flushing",
"all",
"Dummynet",
"rules",
"."
] | def Cleanup():
"""Stops the network emulation by flushing all Dummynet rules.
Notice that this will flush any rules that may have been created previously
before starting the emulation.
"""
_RunIpfwCommand(['-f', 'flush'], 'Failed to flush Dummynet rules!')
_RunIpfwCommand(['-f', 'pipe', 'flush'], 'Failed to flush Dummynet pipes!') | [
"def",
"Cleanup",
"(",
")",
":",
"_RunIpfwCommand",
"(",
"[",
"'-f'",
",",
"'flush'",
"]",
",",
"'Failed to flush Dummynet rules!'",
")",
"_RunIpfwCommand",
"(",
"[",
"'-f'",
",",
"'pipe'",
",",
"'flush'",
"]",
",",
"'Failed to flush Dummynet pipes!'",
")"
] | https://github.com/JumpingYang001/webrtc/blob/c03d6e965e1f54aeadd670e491eabe5fdb8db968/tools_webrtc/network_emulator/network_emulator.py#L165-L172 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/xml/sax/_exceptions.py | python | SAXException.__str__ | (self) | return self._msg | Create a string representation of the exception. | Create a string representation of the exception. | [
"Create",
"a",
"string",
"representation",
"of",
"the",
"exception",
"."
] | def __str__(self):
"Create a string representation of the exception."
return self._msg | [
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"self",
".",
"_msg"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/xml/sax/_exceptions.py#L34-L36 |
|
nvdla/sw | 79538ba1b52b040a4a4645f630e457fa01839e90 | umd/external/protobuf-2.6/python/google/protobuf/descriptor_pool.py | python | DescriptorPool._ConvertMessageDescriptor | (self, desc_proto, package=None, file_desc=None,
scope=None) | return desc | Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The added descriptor. | Adds the proto to the pool in the specified package. | [
"Adds",
"the",
"proto",
"to",
"the",
"pool",
"in",
"the",
"specified",
"package",
"."
] | def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None):
"""Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The added descriptor.
"""
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(nested, desc_name, file_desc, scope)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope)
for enum in desc_proto.enum_type]
fields = [self.MakeFieldDescriptor(field, desc_name, index)
for index, field in enumerate(desc_proto.field)]
extensions = [
self.MakeFieldDescriptor(extension, desc_name, index, is_extension=True)
for index, extension in enumerate(desc_proto.extension)]
oneofs = [
descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)),
index, None, [])
for index, desc in enumerate(desc_proto.oneof_decl)]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
oneofs=oneofs,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=desc_proto.options,
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
for field_index, field_desc in enumerate(desc_proto.field):
if field_desc.HasField('oneof_index'):
oneof_index = field_desc.oneof_index
oneofs[oneof_index].fields.append(fields[field_index])
fields[field_index].containing_oneof = oneofs[oneof_index]
scope[_PrefixWithDot(desc_name)] = desc
self._descriptors[desc_name] = desc
return desc | [
"def",
"_ConvertMessageDescriptor",
"(",
"self",
",",
"desc_proto",
",",
"package",
"=",
"None",
",",
"file_desc",
"=",
"None",
",",
"scope",
"=",
"None",
")",
":",
"if",
"package",
":",
"desc_name",
"=",
"'.'",
".",
"join",
"(",
"(",
"package",
",",
"desc_proto",
".",
"name",
")",
")",
"else",
":",
"desc_name",
"=",
"desc_proto",
".",
"name",
"if",
"file_desc",
"is",
"None",
":",
"file_name",
"=",
"None",
"else",
":",
"file_name",
"=",
"file_desc",
".",
"name",
"if",
"scope",
"is",
"None",
":",
"scope",
"=",
"{",
"}",
"nested",
"=",
"[",
"self",
".",
"_ConvertMessageDescriptor",
"(",
"nested",
",",
"desc_name",
",",
"file_desc",
",",
"scope",
")",
"for",
"nested",
"in",
"desc_proto",
".",
"nested_type",
"]",
"enums",
"=",
"[",
"self",
".",
"_ConvertEnumDescriptor",
"(",
"enum",
",",
"desc_name",
",",
"file_desc",
",",
"None",
",",
"scope",
")",
"for",
"enum",
"in",
"desc_proto",
".",
"enum_type",
"]",
"fields",
"=",
"[",
"self",
".",
"MakeFieldDescriptor",
"(",
"field",
",",
"desc_name",
",",
"index",
")",
"for",
"index",
",",
"field",
"in",
"enumerate",
"(",
"desc_proto",
".",
"field",
")",
"]",
"extensions",
"=",
"[",
"self",
".",
"MakeFieldDescriptor",
"(",
"extension",
",",
"desc_name",
",",
"index",
",",
"is_extension",
"=",
"True",
")",
"for",
"index",
",",
"extension",
"in",
"enumerate",
"(",
"desc_proto",
".",
"extension",
")",
"]",
"oneofs",
"=",
"[",
"descriptor",
".",
"OneofDescriptor",
"(",
"desc",
".",
"name",
",",
"'.'",
".",
"join",
"(",
"(",
"desc_name",
",",
"desc",
".",
"name",
")",
")",
",",
"index",
",",
"None",
",",
"[",
"]",
")",
"for",
"index",
",",
"desc",
"in",
"enumerate",
"(",
"desc_proto",
".",
"oneof_decl",
")",
"]",
"extension_ranges",
"=",
"[",
"(",
"r",
".",
"start",
",",
"r",
".",
"end",
")",
"for",
"r",
"in",
"desc_proto",
".",
"extension_range",
"]",
"if",
"extension_ranges",
":",
"is_extendable",
"=",
"True",
"else",
":",
"is_extendable",
"=",
"False",
"desc",
"=",
"descriptor",
".",
"Descriptor",
"(",
"name",
"=",
"desc_proto",
".",
"name",
",",
"full_name",
"=",
"desc_name",
",",
"filename",
"=",
"file_name",
",",
"containing_type",
"=",
"None",
",",
"fields",
"=",
"fields",
",",
"oneofs",
"=",
"oneofs",
",",
"nested_types",
"=",
"nested",
",",
"enum_types",
"=",
"enums",
",",
"extensions",
"=",
"extensions",
",",
"options",
"=",
"desc_proto",
".",
"options",
",",
"is_extendable",
"=",
"is_extendable",
",",
"extension_ranges",
"=",
"extension_ranges",
",",
"file",
"=",
"file_desc",
",",
"serialized_start",
"=",
"None",
",",
"serialized_end",
"=",
"None",
")",
"for",
"nested",
"in",
"desc",
".",
"nested_types",
":",
"nested",
".",
"containing_type",
"=",
"desc",
"for",
"enum",
"in",
"desc",
".",
"enum_types",
":",
"enum",
".",
"containing_type",
"=",
"desc",
"for",
"field_index",
",",
"field_desc",
"in",
"enumerate",
"(",
"desc_proto",
".",
"field",
")",
":",
"if",
"field_desc",
".",
"HasField",
"(",
"'oneof_index'",
")",
":",
"oneof_index",
"=",
"field_desc",
".",
"oneof_index",
"oneofs",
"[",
"oneof_index",
"]",
".",
"fields",
".",
"append",
"(",
"fields",
"[",
"field_index",
"]",
")",
"fields",
"[",
"field_index",
"]",
".",
"containing_oneof",
"=",
"oneofs",
"[",
"oneof_index",
"]",
"scope",
"[",
"_PrefixWithDot",
"(",
"desc_name",
")",
"]",
"=",
"desc",
"self",
".",
"_descriptors",
"[",
"desc_name",
"]",
"=",
"desc",
"return",
"desc"
] | https://github.com/nvdla/sw/blob/79538ba1b52b040a4a4645f630e457fa01839e90/umd/external/protobuf-2.6/python/google/protobuf/descriptor_pool.py#L324-L399 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/uuid.py | python | _unixdll_getnode | () | return UUID(bytes=_buffer.raw).node | Get the hardware address on Unix using ctypes. | Get the hardware address on Unix using ctypes. | [
"Get",
"the",
"hardware",
"address",
"on",
"Unix",
"using",
"ctypes",
"."
] | def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_buffer = ctypes.create_string_buffer(16)
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node | [
"def",
"_unixdll_getnode",
"(",
")",
":",
"_buffer",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"16",
")",
"_uuid_generate_time",
"(",
"_buffer",
")",
"return",
"UUID",
"(",
"bytes",
"=",
"_buffer",
".",
"raw",
")",
".",
"node"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/uuid.py#L442-L446 |
|
oracle/graaljs | 36a56e8e993d45fc40939a3a4d9c0c24990720f1 | graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | python | XCConfigurationList.HasBuildSetting | (self, key) | return 1 | Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1. | Determines the state of a build setting in all XCBuildConfiguration
child objects. | [
"Determines",
"the",
"state",
"of",
"a",
"build",
"setting",
"in",
"all",
"XCBuildConfiguration",
"child",
"objects",
"."
] | def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties["buildConfigurations"]:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1 | [
"def",
"HasBuildSetting",
"(",
"self",
",",
"key",
")",
":",
"has",
"=",
"None",
"value",
"=",
"None",
"for",
"configuration",
"in",
"self",
".",
"_properties",
"[",
"\"buildConfigurations\"",
"]",
":",
"configuration_has",
"=",
"configuration",
".",
"HasBuildSetting",
"(",
"key",
")",
"if",
"has",
"is",
"None",
":",
"has",
"=",
"configuration_has",
"elif",
"has",
"!=",
"configuration_has",
":",
"return",
"-",
"1",
"if",
"configuration_has",
":",
"configuration_value",
"=",
"configuration",
".",
"GetBuildSetting",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"configuration_value",
"elif",
"value",
"!=",
"configuration_value",
":",
"return",
"-",
"1",
"if",
"not",
"has",
":",
"return",
"0",
"return",
"1"
] | https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py#L1720-L1752 |
|
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/contrib/distributions/python/ops/operator_pd.py | python | _extract_batch_shape | (x, num_event_dims, name='extract_batch_shape') | Extract the batch shape from `x`.
Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
`num_event_dims` dimensions. This `Op` returns the batch shape `Tensor`.
Args:
x: `Tensor` with rank at least `num_event_dims`. If rank is not high enough
this `Op` will fail.
num_event_dims: `int32` scalar `Tensor`. The number of trailing dimensions
in `x` to be considered as part of `event_shape`.
name: A name to prepend to created `Ops`.
Returns:
batch_shape: `1-D` `int32` `Tensor` | Extract the batch shape from `x`. | [
"Extract",
"the",
"batch",
"shape",
"from",
"x",
"."
] | def _extract_batch_shape(x, num_event_dims, name='extract_batch_shape'):
"""Extract the batch shape from `x`.
Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
`num_event_dims` dimensions. This `Op` returns the batch shape `Tensor`.
Args:
x: `Tensor` with rank at least `num_event_dims`. If rank is not high enough
this `Op` will fail.
num_event_dims: `int32` scalar `Tensor`. The number of trailing dimensions
in `x` to be considered as part of `event_shape`.
name: A name to prepend to created `Ops`.
Returns:
batch_shape: `1-D` `int32` `Tensor`
"""
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x, name='x')
return array_ops.slice(
array_ops.shape(x), [0], [array_ops.rank(x) - num_event_dims]) | [
"def",
"_extract_batch_shape",
"(",
"x",
",",
"num_event_dims",
",",
"name",
"=",
"'extract_batch_shape'",
")",
":",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"x",
"]",
",",
"name",
")",
":",
"x",
"=",
"ops",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"'x'",
")",
"return",
"array_ops",
".",
"slice",
"(",
"array_ops",
".",
"shape",
"(",
"x",
")",
",",
"[",
"0",
"]",
",",
"[",
"array_ops",
".",
"rank",
"(",
"x",
")",
"-",
"num_event_dims",
"]",
")"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/distributions/python/ops/operator_pd.py#L804-L823 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/asyncio/selector_events.py | python | BaseSelectorEventLoop.remove_writer | (self, fd) | return self._remove_writer(fd) | Remove a writer callback. | Remove a writer callback. | [
"Remove",
"a",
"writer",
"callback",
"."
] | def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd) | [
"def",
"remove_writer",
"(",
"self",
",",
"fd",
")",
":",
"self",
".",
"_ensure_fd_no_transport",
"(",
"fd",
")",
"return",
"self",
".",
"_remove_writer",
"(",
"fd",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/asyncio/selector_events.py#L348-L351 |
|
argman/EAST | dca414de39a3a4915a019c9a02c1832a31cdd0ca | eval.py | python | get_images | () | return files | find image files in test data path
:return: list of files found | find image files in test data path
:return: list of files found | [
"find",
"image",
"files",
"in",
"test",
"data",
"path",
":",
"return",
":",
"list",
"of",
"files",
"found"
] | def get_images():
'''
find image files in test data path
:return: list of files found
'''
files = []
exts = ['jpg', 'png', 'jpeg', 'JPG']
for parent, dirnames, filenames in os.walk(FLAGS.test_data_path):
for filename in filenames:
for ext in exts:
if filename.endswith(ext):
files.append(os.path.join(parent, filename))
break
print('Find {} images'.format(len(files)))
return files | [
"def",
"get_images",
"(",
")",
":",
"files",
"=",
"[",
"]",
"exts",
"=",
"[",
"'jpg'",
",",
"'png'",
",",
"'jpeg'",
",",
"'JPG'",
"]",
"for",
"parent",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"FLAGS",
".",
"test_data_path",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"for",
"ext",
"in",
"exts",
":",
"if",
"filename",
".",
"endswith",
"(",
"ext",
")",
":",
"files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"parent",
",",
"filename",
")",
")",
"break",
"print",
"(",
"'Find {} images'",
".",
"format",
"(",
"len",
"(",
"files",
")",
")",
")",
"return",
"files"
] | https://github.com/argman/EAST/blob/dca414de39a3a4915a019c9a02c1832a31cdd0ca/eval.py#L22-L36 |
|
etternagame/etterna | 8775f74ac9c353320128609d4b4150672e9a6d04 | extern/SQLiteCpp/cpplint.py | python | CheckStyle | (filename, clean_lines, linenum, file_extension, nesting_state,
error) | Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Checks rules from the 'C++ style rules' section of cppguide.html. | [
"Checks",
"rules",
"from",
"the",
"C",
"++",
"style",
"rules",
"section",
"of",
"cppguide",
".",
"html",
"."
] | def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
# SRombauts:
error(filename, linenum, 'whitespace/tab', 5,
'Tab found; use spaces indents only')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
# SRombauts:
elif ((initial_spaces in (1,2,3,5,6,7)) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 4-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 3,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) | [
"def",
"CheckStyle",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"file_extension",
",",
"nesting_state",
",",
"error",
")",
":",
"# Don't use \"elided\" lines here, otherwise we can't check commented lines.",
"# Don't want to use \"raw\" either, because we don't want to check inside C++11",
"# raw strings,",
"raw_lines",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"raw_lines",
"[",
"linenum",
"]",
"if",
"line",
".",
"find",
"(",
"'\\t'",
")",
"!=",
"-",
"1",
":",
"# SRombauts:",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/tab'",
",",
"5",
",",
"'Tab found; use spaces indents only'",
")",
"# One or three blank spaces at the beginning of the line is weird; it's",
"# hard to reconcile that with 2-space indents.",
"# NOTE: here are the conditions rob pike used for his tests. Mine aren't",
"# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces",
"# if(RLENGTH > 20) complain = 0;",
"# if(match($0, \" +(error|private|public|protected):\")) complain = 0;",
"# if(match(prev, \"&& *$\")) complain = 0;",
"# if(match(prev, \"\\\\|\\\\| *$\")) complain = 0;",
"# if(match(prev, \"[\\\",=><] *$\")) complain = 0;",
"# if(match($0, \" <<\")) complain = 0;",
"# if(match(prev, \" +for \\\\(\")) complain = 0;",
"# if(prevodd && match(prevprev, \" +for \\\\(\")) complain = 0;",
"initial_spaces",
"=",
"0",
"cleansed_line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"while",
"initial_spaces",
"<",
"len",
"(",
"line",
")",
"and",
"line",
"[",
"initial_spaces",
"]",
"==",
"' '",
":",
"initial_spaces",
"+=",
"1",
"if",
"line",
"and",
"line",
"[",
"-",
"1",
"]",
".",
"isspace",
"(",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/end_of_line'",
",",
"4",
",",
"'Line ends in whitespace. Consider deleting these extra spaces.'",
")",
"# There are certain situations we allow one space, notably for labels",
"# SRombauts:",
"elif",
"(",
"(",
"initial_spaces",
"in",
"(",
"1",
",",
"2",
",",
"3",
",",
"5",
",",
"6",
",",
"7",
")",
")",
"and",
"not",
"Match",
"(",
"r'\\s*\\w+\\s*:\\s*$'",
",",
"cleansed_line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/indent'",
",",
"3",
",",
"'Weird number of spaces at line-start. '",
"'Are you using a 4-space indent?'",
")",
"# Check if the line is a header guard.",
"is_header_guard",
"=",
"False",
"if",
"file_extension",
"==",
"'h'",
":",
"cppvar",
"=",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
"if",
"(",
"line",
".",
"startswith",
"(",
"'#ifndef %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#define %s'",
"%",
"cppvar",
")",
"or",
"line",
".",
"startswith",
"(",
"'#endif // %s'",
"%",
"cppvar",
")",
")",
":",
"is_header_guard",
"=",
"True",
"# #include lines and header guards can be long, since there's no clean way to",
"# split them.",
"#",
"# URLs can be long too. It's possible to split these, but it makes them",
"# harder to cut&paste.",
"#",
"# The \"$Id:...$\" comment may also get very long without it being the",
"# developers fault.",
"if",
"(",
"not",
"line",
".",
"startswith",
"(",
"'#include'",
")",
"and",
"not",
"is_header_guard",
"and",
"not",
"Match",
"(",
"r'^\\s*//.*http(s?)://\\S*$'",
",",
"line",
")",
"and",
"not",
"Match",
"(",
"r'^// \\$Id:.*#[0-9]+ \\$$'",
",",
"line",
")",
")",
":",
"line_width",
"=",
"GetLineWidth",
"(",
"line",
")",
"extended_length",
"=",
"int",
"(",
"(",
"_line_length",
"*",
"1.25",
")",
")",
"if",
"line_width",
">",
"extended_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"4",
",",
"'Lines should very rarely be longer than %i characters'",
"%",
"extended_length",
")",
"elif",
"line_width",
">",
"_line_length",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/line_length'",
",",
"3",
",",
"'Lines should be <= %i characters long'",
"%",
"_line_length",
")",
"if",
"(",
"cleansed_line",
".",
"count",
"(",
"';'",
")",
">",
"1",
"and",
"# for loops are allowed two ;'s (and may run over two lines).",
"cleansed_line",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"and",
"(",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"'for'",
")",
"==",
"-",
"1",
"or",
"GetPreviousNonBlankLine",
"(",
"clean_lines",
",",
"linenum",
")",
"[",
"0",
"]",
".",
"find",
"(",
"';'",
")",
"!=",
"-",
"1",
")",
"and",
"# It's ok to have many commands in a switch case that fits in 1 line",
"not",
"(",
"(",
"cleansed_line",
".",
"find",
"(",
"'case '",
")",
"!=",
"-",
"1",
"or",
"cleansed_line",
".",
"find",
"(",
"'default:'",
")",
"!=",
"-",
"1",
")",
"and",
"cleansed_line",
".",
"find",
"(",
"'break;'",
")",
"!=",
"-",
"1",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/newline'",
",",
"0",
",",
"'More than one command on the same line'",
")",
"# Some more style checks",
"CheckBraces",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckEmptyBlockBody",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAccess",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
"CheckCheck",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"CheckAltTokens",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"error",
")",
"classinfo",
"=",
"nesting_state",
".",
"InnermostClass",
"(",
")",
"if",
"classinfo",
":",
"CheckSectionSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"classinfo",
",",
"linenum",
",",
"error",
")"
] | https://github.com/etternagame/etterna/blob/8775f74ac9c353320128609d4b4150672e9a6d04/extern/SQLiteCpp/cpplint.py#L3386-L3492 |
||
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py | python | xmlNode.xpointerNewContext | (self, doc, origin) | return __tmp | Create a new XPointer context | Create a new XPointer context | [
"Create",
"a",
"new",
"XPointer",
"context"
] | def xpointerNewContext(self, doc, origin):
"""Create a new XPointer context """
if doc is None: doc__o = None
else: doc__o = doc._o
if origin is None: origin__o = None
else: origin__o = origin._o
ret = libxml2mod.xmlXPtrNewContext(doc__o, self._o, origin__o)
if ret is None:raise treeError('xmlXPtrNewContext() failed')
__tmp = xpathContext(_obj=ret)
return __tmp | [
"def",
"xpointerNewContext",
"(",
"self",
",",
"doc",
",",
"origin",
")",
":",
"if",
"doc",
"is",
"None",
":",
"doc__o",
"=",
"None",
"else",
":",
"doc__o",
"=",
"doc",
".",
"_o",
"if",
"origin",
"is",
"None",
":",
"origin__o",
"=",
"None",
"else",
":",
"origin__o",
"=",
"origin",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlXPtrNewContext",
"(",
"doc__o",
",",
"self",
".",
"_o",
",",
"origin__o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlXPtrNewContext() failed'",
")",
"__tmp",
"=",
"xpathContext",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L3865-L3874 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/grid.py | python | GridSizeEvent.AltDown | (*args, **kwargs) | return _grid.GridSizeEvent_AltDown(*args, **kwargs) | AltDown(self) -> bool | AltDown(self) -> bool | [
"AltDown",
"(",
"self",
")",
"-",
">",
"bool"
] | def AltDown(*args, **kwargs):
"""AltDown(self) -> bool"""
return _grid.GridSizeEvent_AltDown(*args, **kwargs) | [
"def",
"AltDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_grid",
".",
"GridSizeEvent_AltDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/grid.py#L2377-L2379 |
|
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/internal/python_message.py | python | _GetFieldByName | (message_descriptor, field_name) | Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name. | Returns a field descriptor by field name. | [
"Returns",
"a",
"field",
"descriptor",
"by",
"field",
"name",
"."
] | def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name)) | [
"def",
"_GetFieldByName",
"(",
"message_descriptor",
",",
"field_name",
")",
":",
"try",
":",
"return",
"message_descriptor",
".",
"fields_by_name",
"[",
"field_name",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"'Protocol message %s has no \"%s\" field.'",
"%",
"(",
"message_descriptor",
".",
"name",
",",
"field_name",
")",
")"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/internal/python_message.py#L542-L555 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_core.py | python | MouseState.SetY | (*args, **kwargs) | return _core_.MouseState_SetY(*args, **kwargs) | SetY(self, int y) | SetY(self, int y) | [
"SetY",
"(",
"self",
"int",
"y",
")"
] | def SetY(*args, **kwargs):
"""SetY(self, int y)"""
return _core_.MouseState_SetY(*args, **kwargs) | [
"def",
"SetY",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"MouseState_SetY",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L4486-L4488 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py | python | Decimal.__rdivmod__ | (self, other, context=None) | return other.__divmod__(self, context=context) | Swaps self/other and returns __divmod__. | Swaps self/other and returns __divmod__. | [
"Swaps",
"self",
"/",
"other",
"and",
"returns",
"__divmod__",
"."
] | def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context) | [
"def",
"__rdivmod__",
"(",
"self",
",",
"other",
",",
"context",
"=",
"None",
")",
":",
"other",
"=",
"_convert_other",
"(",
"other",
")",
"if",
"other",
"is",
"NotImplemented",
":",
"return",
"other",
"return",
"other",
".",
"__divmod__",
"(",
"self",
",",
"context",
"=",
"context",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py#L1459-L1464 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py3/pandas/core/dtypes/base.py | python | ExtensionDtype.na_value | (self) | return np.nan | Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary. | Default NA value to use for this type. | [
"Default",
"NA",
"value",
"to",
"use",
"for",
"this",
"type",
"."
] | def na_value(self) -> object:
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan | [
"def",
"na_value",
"(",
"self",
")",
"->",
"object",
":",
"return",
"np",
".",
"nan"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/dtypes/base.py#L140-L148 |
|
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/interval/FunctionInterval.py | python | PosHprInterval.__init__ | (self, nodePath, pos, hpr, duration = 0.0,
name = None, other = None) | __init__(nodePath, pos, hpr, duration, name) | __init__(nodePath, pos, hpr, duration, name) | [
"__init__",
"(",
"nodePath",
"pos",
"hpr",
"duration",
"name",
")"
] | def __init__(self, nodePath, pos, hpr, duration = 0.0,
name = None, other = None):
"""__init__(nodePath, pos, hpr, duration, name)
"""
# Create function
def posHprFunc(np = nodePath, pos = pos, hpr = hpr, other = other):
if other:
np.setPosHpr(other, pos, hpr)
else:
np.setPosHpr(pos, hpr)
# Determine name
if name is None:
name = 'PosHprInterval-%d' % PosHprInterval.posHprIntervalNum
PosHprInterval.posHprIntervalNum += 1
# Create function interval
FunctionInterval.__init__(self, posHprFunc, name = name) | [
"def",
"__init__",
"(",
"self",
",",
"nodePath",
",",
"pos",
",",
"hpr",
",",
"duration",
"=",
"0.0",
",",
"name",
"=",
"None",
",",
"other",
"=",
"None",
")",
":",
"# Create function",
"def",
"posHprFunc",
"(",
"np",
"=",
"nodePath",
",",
"pos",
"=",
"pos",
",",
"hpr",
"=",
"hpr",
",",
"other",
"=",
"other",
")",
":",
"if",
"other",
":",
"np",
".",
"setPosHpr",
"(",
"other",
",",
"pos",
",",
"hpr",
")",
"else",
":",
"np",
".",
"setPosHpr",
"(",
"pos",
",",
"hpr",
")",
"# Determine name",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"'PosHprInterval-%d'",
"%",
"PosHprInterval",
".",
"posHprIntervalNum",
"PosHprInterval",
".",
"posHprIntervalNum",
"+=",
"1",
"# Create function interval",
"FunctionInterval",
".",
"__init__",
"(",
"self",
",",
"posHprFunc",
",",
"name",
"=",
"name",
")"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/interval/FunctionInterval.py#L238-L253 |
||
gklz1982/caffe-yolov2 | ebb27029db4ddc0d40e520634633b0fa9cdcc10d | scripts/cpp_lint.py | python | _Filters | () | return _cpplint_state.filters | Returns the module's list of output filters, as a list. | Returns the module's list of output filters, as a list. | [
"Returns",
"the",
"module",
"s",
"list",
"of",
"output",
"filters",
"as",
"a",
"list",
"."
] | def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters | [
"def",
"_Filters",
"(",
")",
":",
"return",
"_cpplint_state",
".",
"filters"
] | https://github.com/gklz1982/caffe-yolov2/blob/ebb27029db4ddc0d40e520634633b0fa9cdcc10d/scripts/cpp_lint.py#L792-L794 |
|
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/processor/conversion/aoc/upgrade_attribute_subprocessor.py | python | AoCUpgradeAttributeSubprocessor.reload_time_upgrade | (converter_group, line, value, operator, team=False) | return patches | Creates a patch for the reload time modify effect (ID: 10).
:param converter_group: Tech/Civ that gets the patch.
:type converter_group: ...dataformat.converter_object.ConverterObjectGroup
:param line: Unit/Building line that has the ability.
:type line: ...dataformat.converter_object.ConverterObjectGroup
:param value: Value used for patching the member.
:type value: MemberOperator
:param operator: Operator used for patching the member.
:type operator: MemberOperator
:returns: The forward references for the generated patches.
:rtype: list | Creates a patch for the reload time modify effect (ID: 10). | [
"Creates",
"a",
"patch",
"for",
"the",
"reload",
"time",
"modify",
"effect",
"(",
"ID",
":",
"10",
")",
"."
] | def reload_time_upgrade(converter_group, line, value, operator, team=False):
"""
Creates a patch for the reload time modify effect (ID: 10).
:param converter_group: Tech/Civ that gets the patch.
:type converter_group: ...dataformat.converter_object.ConverterObjectGroup
:param line: Unit/Building line that has the ability.
:type line: ...dataformat.converter_object.ConverterObjectGroup
:param value: Value used for patching the member.
:type value: MemberOperator
:param operator: Operator used for patching the member.
:type operator: MemberOperator
:returns: The forward references for the generated patches.
:rtype: list
"""
head_unit_id = line.get_head_unit_id()
dataset = line.data
patches = []
obj_id = converter_group.get_id()
if isinstance(converter_group, GenieTechEffectBundleGroup):
tech_lookup_dict = internal_name_lookups.get_tech_lookups(dataset.game_version)
obj_name = tech_lookup_dict[obj_id][0]
else:
civ_lookup_dict = internal_name_lookups.get_civ_lookups(dataset.game_version)
obj_name = civ_lookup_dict[obj_id][0]
name_lookup_dict = internal_name_lookups.get_entity_lookups(dataset.game_version)
game_entity_name = name_lookup_dict[head_unit_id][0]
if line.is_projectile_shooter():
patch_target_ref = f"{game_entity_name}.Attack"
patch_target_forward_ref = ForwardRef(line, patch_target_ref)
patch_target_parent = "engine.ability.type.ShootProjectile"
elif line.is_melee():
patch_target_ref = f"{game_entity_name}.Attack"
patch_target_forward_ref = ForwardRef(line, patch_target_ref)
patch_target_parent = "engine.ability.type.ApplyDiscreteEffect"
elif line.has_command(104):
patch_target_ref = f"{game_entity_name}.Convert"
patch_target_forward_ref = ForwardRef(line, patch_target_ref)
patch_target_parent = "engine.ability.type.ApplyDiscreteEffect"
else:
# No matching ability
return patches
# Wrapper
wrapper_name = f"Change{game_entity_name}ReloadTimeWrapper"
wrapper_ref = f"{obj_name}.{wrapper_name}"
wrapper_location = ForwardRef(converter_group, obj_name)
wrapper_raw_api_object = RawAPIObject(wrapper_ref,
wrapper_name,
dataset.nyan_api_objects,
wrapper_location)
wrapper_raw_api_object.add_raw_parent("engine.util.patch.Patch")
# Nyan patch
nyan_patch_name = f"Change{game_entity_name}ReloadTime"
nyan_patch_ref = f"{obj_name}.{wrapper_name}.{nyan_patch_name}"
nyan_patch_location = ForwardRef(converter_group, wrapper_ref)
nyan_patch_raw_api_object = RawAPIObject(nyan_patch_ref,
nyan_patch_name,
dataset.nyan_api_objects,
nyan_patch_location)
nyan_patch_raw_api_object.add_raw_parent("engine.util.patch.NyanPatch")
nyan_patch_raw_api_object.set_patch_target(patch_target_forward_ref)
nyan_patch_raw_api_object.add_raw_patch_member("reload_time",
value,
patch_target_parent,
operator)
patch_forward_ref = ForwardRef(converter_group, nyan_patch_ref)
wrapper_raw_api_object.add_raw_member("patch",
patch_forward_ref,
"engine.util.patch.Patch")
if team:
team_property = dataset.pregen_nyan_objects["util.patch.property.types.Team"].get_nyan_object()
properties = {
dataset.nyan_api_objects["engine.util.patch.property.type.Diplomatic"]: team_property
}
wrapper_raw_api_object.add_raw_member("properties",
properties,
"engine.util.patch.Patch")
converter_group.add_raw_api_object(wrapper_raw_api_object)
converter_group.add_raw_api_object(nyan_patch_raw_api_object)
wrapper_forward_ref = ForwardRef(converter_group, wrapper_ref)
patches.append(wrapper_forward_ref)
return patches | [
"def",
"reload_time_upgrade",
"(",
"converter_group",
",",
"line",
",",
"value",
",",
"operator",
",",
"team",
"=",
"False",
")",
":",
"head_unit_id",
"=",
"line",
".",
"get_head_unit_id",
"(",
")",
"dataset",
"=",
"line",
".",
"data",
"patches",
"=",
"[",
"]",
"obj_id",
"=",
"converter_group",
".",
"get_id",
"(",
")",
"if",
"isinstance",
"(",
"converter_group",
",",
"GenieTechEffectBundleGroup",
")",
":",
"tech_lookup_dict",
"=",
"internal_name_lookups",
".",
"get_tech_lookups",
"(",
"dataset",
".",
"game_version",
")",
"obj_name",
"=",
"tech_lookup_dict",
"[",
"obj_id",
"]",
"[",
"0",
"]",
"else",
":",
"civ_lookup_dict",
"=",
"internal_name_lookups",
".",
"get_civ_lookups",
"(",
"dataset",
".",
"game_version",
")",
"obj_name",
"=",
"civ_lookup_dict",
"[",
"obj_id",
"]",
"[",
"0",
"]",
"name_lookup_dict",
"=",
"internal_name_lookups",
".",
"get_entity_lookups",
"(",
"dataset",
".",
"game_version",
")",
"game_entity_name",
"=",
"name_lookup_dict",
"[",
"head_unit_id",
"]",
"[",
"0",
"]",
"if",
"line",
".",
"is_projectile_shooter",
"(",
")",
":",
"patch_target_ref",
"=",
"f\"{game_entity_name}.Attack\"",
"patch_target_forward_ref",
"=",
"ForwardRef",
"(",
"line",
",",
"patch_target_ref",
")",
"patch_target_parent",
"=",
"\"engine.ability.type.ShootProjectile\"",
"elif",
"line",
".",
"is_melee",
"(",
")",
":",
"patch_target_ref",
"=",
"f\"{game_entity_name}.Attack\"",
"patch_target_forward_ref",
"=",
"ForwardRef",
"(",
"line",
",",
"patch_target_ref",
")",
"patch_target_parent",
"=",
"\"engine.ability.type.ApplyDiscreteEffect\"",
"elif",
"line",
".",
"has_command",
"(",
"104",
")",
":",
"patch_target_ref",
"=",
"f\"{game_entity_name}.Convert\"",
"patch_target_forward_ref",
"=",
"ForwardRef",
"(",
"line",
",",
"patch_target_ref",
")",
"patch_target_parent",
"=",
"\"engine.ability.type.ApplyDiscreteEffect\"",
"else",
":",
"# No matching ability",
"return",
"patches",
"# Wrapper",
"wrapper_name",
"=",
"f\"Change{game_entity_name}ReloadTimeWrapper\"",
"wrapper_ref",
"=",
"f\"{obj_name}.{wrapper_name}\"",
"wrapper_location",
"=",
"ForwardRef",
"(",
"converter_group",
",",
"obj_name",
")",
"wrapper_raw_api_object",
"=",
"RawAPIObject",
"(",
"wrapper_ref",
",",
"wrapper_name",
",",
"dataset",
".",
"nyan_api_objects",
",",
"wrapper_location",
")",
"wrapper_raw_api_object",
".",
"add_raw_parent",
"(",
"\"engine.util.patch.Patch\"",
")",
"# Nyan patch",
"nyan_patch_name",
"=",
"f\"Change{game_entity_name}ReloadTime\"",
"nyan_patch_ref",
"=",
"f\"{obj_name}.{wrapper_name}.{nyan_patch_name}\"",
"nyan_patch_location",
"=",
"ForwardRef",
"(",
"converter_group",
",",
"wrapper_ref",
")",
"nyan_patch_raw_api_object",
"=",
"RawAPIObject",
"(",
"nyan_patch_ref",
",",
"nyan_patch_name",
",",
"dataset",
".",
"nyan_api_objects",
",",
"nyan_patch_location",
")",
"nyan_patch_raw_api_object",
".",
"add_raw_parent",
"(",
"\"engine.util.patch.NyanPatch\"",
")",
"nyan_patch_raw_api_object",
".",
"set_patch_target",
"(",
"patch_target_forward_ref",
")",
"nyan_patch_raw_api_object",
".",
"add_raw_patch_member",
"(",
"\"reload_time\"",
",",
"value",
",",
"patch_target_parent",
",",
"operator",
")",
"patch_forward_ref",
"=",
"ForwardRef",
"(",
"converter_group",
",",
"nyan_patch_ref",
")",
"wrapper_raw_api_object",
".",
"add_raw_member",
"(",
"\"patch\"",
",",
"patch_forward_ref",
",",
"\"engine.util.patch.Patch\"",
")",
"if",
"team",
":",
"team_property",
"=",
"dataset",
".",
"pregen_nyan_objects",
"[",
"\"util.patch.property.types.Team\"",
"]",
".",
"get_nyan_object",
"(",
")",
"properties",
"=",
"{",
"dataset",
".",
"nyan_api_objects",
"[",
"\"engine.util.patch.property.type.Diplomatic\"",
"]",
":",
"team_property",
"}",
"wrapper_raw_api_object",
".",
"add_raw_member",
"(",
"\"properties\"",
",",
"properties",
",",
"\"engine.util.patch.Patch\"",
")",
"converter_group",
".",
"add_raw_api_object",
"(",
"wrapper_raw_api_object",
")",
"converter_group",
".",
"add_raw_api_object",
"(",
"nyan_patch_raw_api_object",
")",
"wrapper_forward_ref",
"=",
"ForwardRef",
"(",
"converter_group",
",",
"wrapper_ref",
")",
"patches",
".",
"append",
"(",
"wrapper_forward_ref",
")",
"return",
"patches"
] | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/processor/conversion/aoc/upgrade_attribute_subprocessor.py#L1853-L1951 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/internals/blocks.py | python | Block._slice | (self, slicer) | return self.values[slicer] | return a slice of my values | return a slice of my values | [
"return",
"a",
"slice",
"of",
"my",
"values"
] | def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer] | [
"def",
"_slice",
"(",
"self",
",",
"slicer",
")",
":",
"return",
"self",
".",
"values",
"[",
"slicer",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/internals/blocks.py#L266-L268 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | third_party/protobuf/python/google/protobuf/message.py | python | Message.__setstate__ | (self, state) | Support the pickle protocol. | Support the pickle protocol. | [
"Support",
"the",
"pickle",
"protocol",
"."
] | def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized']) | [
"def",
"__setstate__",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"__init__",
"(",
")",
"self",
".",
"ParseFromString",
"(",
"state",
"[",
"'serialized'",
"]",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/protobuf/python/google/protobuf/message.py#L277-L280 |
||
intel/caffe | 3f494b442ee3f9d17a07b09ecbd5fa2bbda00836 | scripts/cpp_lint.py | python | GetHeaderGuardCPPVariable | (filename) | return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' | Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file. | Returns the CPP variable that should be used as a header guard. | [
"Returns",
"the",
"CPP",
"variable",
"that",
"should",
"be",
"used",
"as",
"a",
"header",
"guard",
"."
] | def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' | [
"def",
"GetHeaderGuardCPPVariable",
"(",
"filename",
")",
":",
"# Restores original filename in case that cpplint is invoked from Emacs's",
"# flymake.",
"filename",
"=",
"re",
".",
"sub",
"(",
"r'_flymake\\.h$'",
",",
"'.h'",
",",
"filename",
")",
"filename",
"=",
"re",
".",
"sub",
"(",
"r'/\\.flymake/([^/]*)$'",
",",
"r'/\\1'",
",",
"filename",
")",
"fileinfo",
"=",
"FileInfo",
"(",
"filename",
")",
"file_path_from_root",
"=",
"fileinfo",
".",
"RepositoryName",
"(",
")",
"if",
"_root",
":",
"file_path_from_root",
"=",
"re",
".",
"sub",
"(",
"'^'",
"+",
"_root",
"+",
"os",
".",
"sep",
",",
"''",
",",
"file_path_from_root",
")",
"return",
"re",
".",
"sub",
"(",
"r'[-./\\s]'",
",",
"'_'",
",",
"file_path_from_root",
")",
".",
"upper",
"(",
")",
"+",
"'_'"
] | https://github.com/intel/caffe/blob/3f494b442ee3f9d17a07b09ecbd5fa2bbda00836/scripts/cpp_lint.py#L1384-L1405 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/src/extern/flatnotebook.py | python | PageInfo.SetPosition | (self, value) | Sets the tab position. | Sets the tab position. | [
"Sets",
"the",
"tab",
"position",
"."
] | def SetPosition(self, value):
""" Sets the tab position. """
self._pos = value | [
"def",
"SetPosition",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_pos",
"=",
"value"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/extern/flatnotebook.py#L924-L927 |
||
zeroc-ice/ice | 6df7df6039674d58fb5ab9a08e46f28591a210f7 | python/python/Ice/__init__.py | python | BatchRequestInterceptor.enqueue | (self, request, queueCount, queueSize) | Invoked when a request is batched. | Invoked when a request is batched. | [
"Invoked",
"when",
"a",
"request",
"is",
"batched",
"."
] | def enqueue(self, request, queueCount, queueSize):
'''Invoked when a request is batched.'''
pass | [
"def",
"enqueue",
"(",
"self",
",",
"request",
",",
"queueCount",
",",
"queueSize",
")",
":",
"pass"
] | https://github.com/zeroc-ice/ice/blob/6df7df6039674d58fb5ab9a08e46f28591a210f7/python/python/Ice/__init__.py#L834-L836 |
||
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/internal/python_message.py | python | _ExtensionDict.__getitem__ | (self, extension_handle) | return result | Returns the current value of the given extension handle. | Returns the current value of the given extension handle. | [
"Returns",
"the",
"current",
"value",
"of",
"the",
"given",
"extension",
"handle",
"."
] | def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result | [
"def",
"__getitem__",
"(",
"self",
",",
"extension_handle",
")",
":",
"_VerifyExtensionHandle",
"(",
"self",
".",
"_extended_message",
",",
"extension_handle",
")",
"result",
"=",
"self",
".",
"_extended_message",
".",
"_fields",
".",
"get",
"(",
"extension_handle",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"if",
"extension_handle",
".",
"label",
"==",
"_FieldDescriptor",
".",
"LABEL_REPEATED",
":",
"result",
"=",
"extension_handle",
".",
"_default_constructor",
"(",
"self",
".",
"_extended_message",
")",
"elif",
"extension_handle",
".",
"cpp_type",
"==",
"_FieldDescriptor",
".",
"CPPTYPE_MESSAGE",
":",
"result",
"=",
"extension_handle",
".",
"message_type",
".",
"_concrete_class",
"(",
")",
"try",
":",
"result",
".",
"_SetListener",
"(",
"self",
".",
"_extended_message",
".",
"_listener_for_children",
")",
"except",
"ReferenceError",
":",
"pass",
"else",
":",
"# Singular scalar -- just return the default without inserting into the",
"# dict.",
"return",
"extension_handle",
".",
"default_value",
"# Atomically check if another thread has preempted us and, if not, swap",
"# in the new object we just created. If someone has preempted us, we",
"# take that object and discard ours.",
"# WARNING: We are relying on setdefault() being atomic. This is true",
"# in CPython but we haven't investigated others. This warning appears",
"# in several other locations in this file.",
"result",
"=",
"self",
".",
"_extended_message",
".",
"_fields",
".",
"setdefault",
"(",
"extension_handle",
",",
"result",
")",
"return",
"result"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/internal/python_message.py#L1453-L1484 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/requests/cookies.py | python | RequestsCookieJar.update | (self, other) | Updates this jar with cookies from another CookieJar or dict-like | Updates this jar with cookies from another CookieJar or dict-like | [
"Updates",
"this",
"jar",
"with",
"cookies",
"from",
"another",
"CookieJar",
"or",
"dict",
"-",
"like"
] | def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other) | [
"def",
"update",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"cookielib",
".",
"CookieJar",
")",
":",
"for",
"cookie",
"in",
"other",
":",
"self",
".",
"set_cookie",
"(",
"copy",
".",
"copy",
"(",
"cookie",
")",
")",
"else",
":",
"super",
"(",
"RequestsCookieJar",
",",
"self",
")",
".",
"update",
"(",
"other",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/requests/cookies.py#L348-L354 |
||
google/earthenterprise | 0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9 | earth_enterprise/src/google/protobuf-py/ez_setup.py | python | main | (argv, version=DEFAULT_VERSION) | Install or upgrade setuptools and EasyInstall | Install or upgrade setuptools and EasyInstall | [
"Install",
"or",
"upgrade",
"setuptools",
"and",
"EasyInstall"
] | def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' | [
"def",
"main",
"(",
"argv",
",",
"version",
"=",
"DEFAULT_VERSION",
")",
":",
"try",
":",
"import",
"setuptools",
"except",
"ImportError",
":",
"egg",
"=",
"None",
"try",
":",
"egg",
"=",
"download_setuptools",
"(",
"version",
",",
"delay",
"=",
"0",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"egg",
")",
"from",
"setuptools",
".",
"command",
".",
"easy_install",
"import",
"main",
"return",
"main",
"(",
"list",
"(",
"argv",
")",
"+",
"[",
"egg",
"]",
")",
"# we're done here",
"finally",
":",
"if",
"egg",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"egg",
")",
":",
"os",
".",
"unlink",
"(",
"egg",
")",
"else",
":",
"if",
"setuptools",
".",
"__version__",
"==",
"'0.0.1'",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"You have an obsolete version of setuptools installed. Please\\n\"",
"\"remove it from your system entirely before rerunning this script.\"",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"req",
"=",
"\"setuptools>=\"",
"+",
"version",
"import",
"pkg_resources",
"try",
":",
"pkg_resources",
".",
"require",
"(",
"req",
")",
"except",
"pkg_resources",
".",
"VersionConflict",
":",
"try",
":",
"from",
"setuptools",
".",
"command",
".",
"easy_install",
"import",
"main",
"except",
"ImportError",
":",
"from",
"easy_install",
"import",
"main",
"main",
"(",
"list",
"(",
"argv",
")",
"+",
"[",
"download_setuptools",
"(",
"delay",
"=",
"0",
")",
"]",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"# try to force an exit",
"else",
":",
"if",
"argv",
":",
"from",
"setuptools",
".",
"command",
".",
"easy_install",
"import",
"main",
"main",
"(",
"argv",
")",
"else",
":",
"print",
"\"Setuptools version\"",
",",
"version",
",",
"\"or greater has been installed.\"",
"print",
"'(Run \"ez_setup.py -U setuptools\" to reinstall or upgrade.)'"
] | https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/google/protobuf-py/ez_setup.py#L223-L262 |
||
macchina-io/macchina.io | ef24ba0e18379c3dd48fb84e6dbf991101cb8db0 | platform/JS/V8/v8/gypfiles/vs_toolchain.py | python | GetVisualStudioVersion | () | return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION) | Return GYP_MSVS_VERSION of Visual Studio. | Return GYP_MSVS_VERSION of Visual Studio. | [
"Return",
"GYP_MSVS_VERSION",
"of",
"Visual",
"Studio",
"."
] | def GetVisualStudioVersion():
"""Return GYP_MSVS_VERSION of Visual Studio.
"""
return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION) | [
"def",
"GetVisualStudioVersion",
"(",
")",
":",
"return",
"os",
".",
"environ",
".",
"get",
"(",
"'GYP_MSVS_VERSION'",
",",
"CURRENT_DEFAULT_TOOLCHAIN_VERSION",
")"
] | https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/gypfiles/vs_toolchain.py#L110-L113 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/decomposition/dict_learning.py | python | dict_learning | (X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False) | Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init : array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
verbose :
Degree of output the procedure will print.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA | Solves a dictionary learning matrix factorization problem. | [
"Solves",
"a",
"dictionary",
"learning",
"matrix",
"factorization",
"problem",
"."
] | def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init : array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
verbose :
Degree of output the procedure will print.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors | [
"def",
"dict_learning",
"(",
"X",
",",
"n_components",
",",
"alpha",
",",
"max_iter",
"=",
"100",
",",
"tol",
"=",
"1e-8",
",",
"method",
"=",
"'lars'",
",",
"n_jobs",
"=",
"1",
",",
"dict_init",
"=",
"None",
",",
"code_init",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"verbose",
"=",
"False",
",",
"random_state",
"=",
"None",
",",
"return_n_iter",
"=",
"False",
")",
":",
"if",
"method",
"not",
"in",
"(",
"'lars'",
",",
"'cd'",
")",
":",
"raise",
"ValueError",
"(",
"'Coding method %r not supported as a fit algorithm.'",
"%",
"method",
")",
"method",
"=",
"'lasso_'",
"+",
"method",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"# Avoid integer division problems",
"alpha",
"=",
"float",
"(",
"alpha",
")",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"if",
"n_jobs",
"==",
"-",
"1",
":",
"n_jobs",
"=",
"cpu_count",
"(",
")",
"# Init the code and the dictionary with SVD of Y",
"if",
"code_init",
"is",
"not",
"None",
"and",
"dict_init",
"is",
"not",
"None",
":",
"code",
"=",
"np",
".",
"array",
"(",
"code_init",
",",
"order",
"=",
"'F'",
")",
"# Don't copy V, it will happen below",
"dictionary",
"=",
"dict_init",
"else",
":",
"code",
",",
"S",
",",
"dictionary",
"=",
"linalg",
".",
"svd",
"(",
"X",
",",
"full_matrices",
"=",
"False",
")",
"dictionary",
"=",
"S",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"*",
"dictionary",
"r",
"=",
"len",
"(",
"dictionary",
")",
"if",
"n_components",
"<=",
"r",
":",
"# True even if n_components=None",
"code",
"=",
"code",
"[",
":",
",",
":",
"n_components",
"]",
"dictionary",
"=",
"dictionary",
"[",
":",
"n_components",
",",
":",
"]",
"else",
":",
"code",
"=",
"np",
".",
"c_",
"[",
"code",
",",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"code",
")",
",",
"n_components",
"-",
"r",
")",
")",
"]",
"dictionary",
"=",
"np",
".",
"r_",
"[",
"dictionary",
",",
"np",
".",
"zeros",
"(",
"(",
"n_components",
"-",
"r",
",",
"dictionary",
".",
"shape",
"[",
"1",
"]",
")",
")",
"]",
"# Fortran-order dict, as we are going to access its row vectors",
"dictionary",
"=",
"np",
".",
"array",
"(",
"dictionary",
",",
"order",
"=",
"'F'",
")",
"residuals",
"=",
"0",
"errors",
"=",
"[",
"]",
"current_cost",
"=",
"np",
".",
"nan",
"if",
"verbose",
"==",
"1",
":",
"print",
"(",
"'[dict_learning]'",
",",
"end",
"=",
"' '",
")",
"# If max_iter is 0, number of iterations returned should be zero",
"ii",
"=",
"-",
"1",
"for",
"ii",
"in",
"range",
"(",
"max_iter",
")",
":",
"dt",
"=",
"(",
"time",
".",
"time",
"(",
")",
"-",
"t0",
")",
"if",
"verbose",
"==",
"1",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\".\"",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"elif",
"verbose",
":",
"print",
"(",
"\"Iteration % 3i \"",
"\"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)\"",
"%",
"(",
"ii",
",",
"dt",
",",
"dt",
"/",
"60",
",",
"current_cost",
")",
")",
"# Update code",
"code",
"=",
"sparse_encode",
"(",
"X",
",",
"dictionary",
",",
"algorithm",
"=",
"method",
",",
"alpha",
"=",
"alpha",
",",
"init",
"=",
"code",
",",
"n_jobs",
"=",
"n_jobs",
")",
"# Update dictionary",
"dictionary",
",",
"residuals",
"=",
"_update_dict",
"(",
"dictionary",
".",
"T",
",",
"X",
".",
"T",
",",
"code",
".",
"T",
",",
"verbose",
"=",
"verbose",
",",
"return_r2",
"=",
"True",
",",
"random_state",
"=",
"random_state",
")",
"dictionary",
"=",
"dictionary",
".",
"T",
"# Cost function",
"current_cost",
"=",
"0.5",
"*",
"residuals",
"+",
"alpha",
"*",
"np",
".",
"sum",
"(",
"np",
".",
"abs",
"(",
"code",
")",
")",
"errors",
".",
"append",
"(",
"current_cost",
")",
"if",
"ii",
">",
"0",
":",
"dE",
"=",
"errors",
"[",
"-",
"2",
"]",
"-",
"errors",
"[",
"-",
"1",
"]",
"# assert(dE >= -tol * errors[-1])",
"if",
"dE",
"<",
"tol",
"*",
"errors",
"[",
"-",
"1",
"]",
":",
"if",
"verbose",
"==",
"1",
":",
"# A line return",
"print",
"(",
"\"\"",
")",
"elif",
"verbose",
":",
"print",
"(",
"\"--- Convergence reached after %d iterations\"",
"%",
"ii",
")",
"break",
"if",
"ii",
"%",
"5",
"==",
"0",
"and",
"callback",
"is",
"not",
"None",
":",
"callback",
"(",
"locals",
"(",
")",
")",
"if",
"return_n_iter",
":",
"return",
"code",
",",
"dictionary",
",",
"errors",
",",
"ii",
"+",
"1",
"else",
":",
"return",
"code",
",",
"dictionary",
",",
"errors"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/decomposition/dict_learning.py#L378-L546 |
||
andre-martins/TurboParser | a87b8e45694c18b826bb3c42e8344bd32928007d | python/tokenizer/universal_word_tokenizer.py | python | UniversalWordTokenizer.tokenize | (self, text, track_positions=False) | Return a tokenized copy of *s*.
:rtype: list of str | Return a tokenized copy of *s*. | [
"Return",
"a",
"tokenized",
"copy",
"of",
"*",
"s",
"*",
"."
] | def tokenize(self, text, track_positions=False):
"""
Return a tokenized copy of *s*.
:rtype: list of str
"""
if track_positions:
from aligned_string import AlignedString
text = AlignedString(text)
sub = substitute_with_positions
else:
sub = substitute_without_positions
# Replace non-breaking spaces by spaces.
# Note: the Portuguese sentence tokenizer should also do this!!
text = sub(ur'\u00A0', ' ', text)
# Replace tabs by spaces [ATM 3/12/2014].
text = sub(ur'\t', ' ', text)
# Replace U+0096 by dashes.
text = sub(ur'\u0096', ' -- ', text)
if self.language == u'pt-cintil':
# Replace all parenthesis by single quotes.
# This looks a really terrible idea. However, since there are
# no sentences with parenthesis in the CINTIL corpus (I don't
# know why), further processing units in the pipeline (such as
# a POS tagger or a parser) trained on that corpora would get
# confused and so stupid things. Pretending everything is a
# single quote seems to be the least of all evils.
text = sub(r'\(|\)', '\'', text)
if self.language == u'pt-cintil':
# Replace all quotes by single quotes.
# This looks a terrible idea, but necessary for consistency with
# the CINTIL corpus.
text = sub(ur'"|«|»|``|“|”|\'|`', '\' ', text)
else:
# starting quotes.
text = sub(ur'„', r'``', text) # Non-ASCII starting quotes.
text = sub(ur'«', r'``', text) # Non-ASCII starting quotes.
text = sub(ur'»', r'"', text) # Non-ASCII ending quotes.
# In German this is actually the ending quote.
text = sub(ur'“', r'``', text) # Non-ASCII starting quotes.
text = sub(ur'”', r'"', text) # Non-ASCII ending quotes.
text = sub(r'^\"', r'``', text)
text = sub(r'(``)', r' \1 ', text)
text = sub(r'([ (\[{<])"', r'\1 `` ', text)
# Special apostrophe or single quote.
text = sub(ur'’', '\'', text)
# I added these for single quotes -- to avoid things like
# "o 'apartheid social ' ".
# However, we excluded this for English for now to handle well
# contractions such as "can't -> ca + n't".
if self.language not in ['en', 'en-ptb', 'fr']:
text = sub(ur'\'', '\' ', text)
else:
text = sub(ur'([^\p{IsAlpha}])\'', ur"\1' ", text)
text = sub(ur'^\'', ur"' ", text)
if self.language != 'en-ptb':
# No special coding of starting quotes.
text = sub(ur' `` ', r' " ', text)
text = sub(ur" '' ", r' " ', text)
# Punctuation.
text = sub(ur'([:,])([^\d])', ur' \1 \2', text)
text = sub(ur'\.\.\.', ur' ... ', text)
# text = sub(ur'[;@#$%&]', ur' \g<0> ', text)
text = sub(ur'([;@#$%&])', ur' \1 ', text)
text = sub(ur'([^\.])(\.)([\]\)}>"\']*)\s*$', ur'\1 \2\3 ', text)
# text = sub(ur'[?!]', ur' \g<0> ', text)
text = sub(ur'([?!])', ur' \1 ', text)
if self.language in ['en', 'en-ptb']:
text = sub(ur"([^'])' ", ur"\1 ' ", text)
else:
text = sub(ur"([^'])'", ur"\1' ", text)
text = sub(ur"([^\p{IsAlpha}])' ", ur"\1 ' ", text)
# Parens, brackets, etc.
# text = sub(ur'[\]\[\(\)\{\}\<\>]', ur' \g<0> ', text)
text = sub(ur'([\]\[\(\)\{\}\<\>])', ur' \1 ', text)
text = sub(ur'([^-])---([^-])', ur'\1 -- \2', text)
text = sub(ur'([^-])--([^-])', ur' -- ', text)
# Add extra space to make things easier.
# text = " " + text + " "
# Ending quotes.
if self.language == 'en-ptb':
text = sub(r'"', " '' ", text)
else:
text = sub(r'"', ' " ', text)
text = sub(r'(\S)(\'\')', r'\1 \2 ', text)
# Clean up extraneous spaces.
# text = sub(" +", " ", text)
# text = text.strip()
text = sub(r' +', r' ', text)
text = sub(r'^ ', r'', text)
text = sub(r' $', r'', text)
# Add space at end to match up with MacIntyre's output (for debugging).
# if text != "":
# text += " "
if track_positions:
initial_tokens = text.string.split(u' ')
offset = 0
positions = []
tokens = []
for token in initial_tokens:
length = len(token)
if length > 0:
start = text.start_positions[offset]
end = text.end_positions[offset + length - 1]
subtokens = self.contraction_splitter.split_if_contraction(
token).split()
for subtoken in subtokens:
positions.append((start, end))
tokens.append(subtoken)
offset += length + 1
return tokens, positions
else:
# Split on contractions and clitics.
words = text.split(' ')
words = map(self.contraction_splitter.split_if_contraction, words)
text = ' '.join(words)
return text.split() | [
"def",
"tokenize",
"(",
"self",
",",
"text",
",",
"track_positions",
"=",
"False",
")",
":",
"if",
"track_positions",
":",
"from",
"aligned_string",
"import",
"AlignedString",
"text",
"=",
"AlignedString",
"(",
"text",
")",
"sub",
"=",
"substitute_with_positions",
"else",
":",
"sub",
"=",
"substitute_without_positions",
"# Replace non-breaking spaces by spaces.",
"# Note: the Portuguese sentence tokenizer should also do this!!",
"text",
"=",
"sub",
"(",
"ur'\\u00A0'",
",",
"' '",
",",
"text",
")",
"# Replace tabs by spaces [ATM 3/12/2014].",
"text",
"=",
"sub",
"(",
"ur'\\t'",
",",
"' '",
",",
"text",
")",
"# Replace U+0096 by dashes.",
"text",
"=",
"sub",
"(",
"ur'\\u0096'",
",",
"' -- '",
",",
"text",
")",
"if",
"self",
".",
"language",
"==",
"u'pt-cintil'",
":",
"# Replace all parenthesis by single quotes.",
"# This looks a really terrible idea. However, since there are",
"# no sentences with parenthesis in the CINTIL corpus (I don't",
"# know why), further processing units in the pipeline (such as",
"# a POS tagger or a parser) trained on that corpora would get",
"# confused and so stupid things. Pretending everything is a",
"# single quote seems to be the least of all evils.",
"text",
"=",
"sub",
"(",
"r'\\(|\\)'",
",",
"'\\''",
",",
"text",
")",
"if",
"self",
".",
"language",
"==",
"u'pt-cintil'",
":",
"# Replace all quotes by single quotes.",
"# This looks a terrible idea, but necessary for consistency with",
"# the CINTIL corpus.",
"text",
"=",
"sub",
"(",
"ur'\"|«|»|``|“|”|\\'|`', '\\' ",
"'",
" text",
")",
"",
"",
"else",
":",
"# starting quotes.",
"text",
"=",
"sub",
"(",
"ur'„', ",
"r",
"``', ",
"t",
"xt) ",
" ",
"Non-ASCII starting quotes.",
"text",
"=",
"sub",
"(",
"ur'«',",
" ",
"'``',",
" ",
"ext)",
" ",
" Non-ASCII starting quotes.",
"text",
"=",
"sub",
"(",
"ur'»',",
" ",
"'\"',",
" ",
"ext)",
" ",
" Non-ASCII ending quotes.",
"# In German this is actually the ending quote.",
"text",
"=",
"sub",
"(",
"ur'“', ",
"r",
"``', ",
"t",
"xt) ",
" ",
"Non-ASCII starting quotes.",
"text",
"=",
"sub",
"(",
"ur'”', ",
"r",
"\"', ",
"t",
"xt) ",
" ",
"Non-ASCII ending quotes.",
"text",
"=",
"sub",
"(",
"r'^\\\"'",
",",
"r'``'",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"r'(``)'",
",",
"r' \\1 '",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"r'([ (\\[{<])\"'",
",",
"r'\\1 `` '",
",",
"text",
")",
"# Special apostrophe or single quote.",
"text",
"=",
"sub",
"(",
"ur'’', ",
"'",
"'', ",
"t",
"xt)",
"",
"# I added these for single quotes -- to avoid things like",
"# \"o 'apartheid social ' \".",
"# However, we excluded this for English for now to handle well",
"# contractions such as \"can't -> ca + n't\".",
"if",
"self",
".",
"language",
"not",
"in",
"[",
"'en'",
",",
"'en-ptb'",
",",
"'fr'",
"]",
":",
"text",
"=",
"sub",
"(",
"ur'\\''",
",",
"'\\' '",
",",
"text",
")",
"else",
":",
"text",
"=",
"sub",
"(",
"ur'([^\\p{IsAlpha}])\\''",
",",
"ur\"\\1' \"",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur'^\\''",
",",
"ur\"' \"",
",",
"text",
")",
"if",
"self",
".",
"language",
"!=",
"'en-ptb'",
":",
"# No special coding of starting quotes.",
"text",
"=",
"sub",
"(",
"ur' `` '",
",",
"r' \" '",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur\" '' \"",
",",
"r' \" '",
",",
"text",
")",
"# Punctuation.",
"text",
"=",
"sub",
"(",
"ur'([:,])([^\\d])'",
",",
"ur' \\1 \\2'",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur'\\.\\.\\.'",
",",
"ur' ... '",
",",
"text",
")",
"# text = sub(ur'[;@#$%&]', ur' \\g<0> ', text)",
"text",
"=",
"sub",
"(",
"ur'([;@#$%&])'",
",",
"ur' \\1 '",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur'([^\\.])(\\.)([\\]\\)}>\"\\']*)\\s*$'",
",",
"ur'\\1 \\2\\3 '",
",",
"text",
")",
"# text = sub(ur'[?!]', ur' \\g<0> ', text)",
"text",
"=",
"sub",
"(",
"ur'([?!])'",
",",
"ur' \\1 '",
",",
"text",
")",
"if",
"self",
".",
"language",
"in",
"[",
"'en'",
",",
"'en-ptb'",
"]",
":",
"text",
"=",
"sub",
"(",
"ur\"([^'])' \"",
",",
"ur\"\\1 ' \"",
",",
"text",
")",
"else",
":",
"text",
"=",
"sub",
"(",
"ur\"([^'])'\"",
",",
"ur\"\\1' \"",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur\"([^\\p{IsAlpha}])' \"",
",",
"ur\"\\1 ' \"",
",",
"text",
")",
"# Parens, brackets, etc.",
"# text = sub(ur'[\\]\\[\\(\\)\\{\\}\\<\\>]', ur' \\g<0> ', text)",
"text",
"=",
"sub",
"(",
"ur'([\\]\\[\\(\\)\\{\\}\\<\\>])'",
",",
"ur' \\1 '",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur'([^-])---([^-])'",
",",
"ur'\\1 -- \\2'",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"ur'([^-])--([^-])'",
",",
"ur' -- '",
",",
"text",
")",
"# Add extra space to make things easier.",
"# text = \" \" + text + \" \"",
"# Ending quotes.",
"if",
"self",
".",
"language",
"==",
"'en-ptb'",
":",
"text",
"=",
"sub",
"(",
"r'\"'",
",",
"\" '' \"",
",",
"text",
")",
"else",
":",
"text",
"=",
"sub",
"(",
"r'\"'",
",",
"' \" '",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"r'(\\S)(\\'\\')'",
",",
"r'\\1 \\2 '",
",",
"text",
")",
"# Clean up extraneous spaces.",
"# text = sub(\" +\", \" \", text)",
"# text = text.strip()",
"text",
"=",
"sub",
"(",
"r' +'",
",",
"r' '",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"r'^ '",
",",
"r''",
",",
"text",
")",
"text",
"=",
"sub",
"(",
"r' $'",
",",
"r''",
",",
"text",
")",
"# Add space at end to match up with MacIntyre's output (for debugging).",
"# if text != \"\":",
"# text += \" \"",
"if",
"track_positions",
":",
"initial_tokens",
"=",
"text",
".",
"string",
".",
"split",
"(",
"u' '",
")",
"offset",
"=",
"0",
"positions",
"=",
"[",
"]",
"tokens",
"=",
"[",
"]",
"for",
"token",
"in",
"initial_tokens",
":",
"length",
"=",
"len",
"(",
"token",
")",
"if",
"length",
">",
"0",
":",
"start",
"=",
"text",
".",
"start_positions",
"[",
"offset",
"]",
"end",
"=",
"text",
".",
"end_positions",
"[",
"offset",
"+",
"length",
"-",
"1",
"]",
"subtokens",
"=",
"self",
".",
"contraction_splitter",
".",
"split_if_contraction",
"(",
"token",
")",
".",
"split",
"(",
")",
"for",
"subtoken",
"in",
"subtokens",
":",
"positions",
".",
"append",
"(",
"(",
"start",
",",
"end",
")",
")",
"tokens",
".",
"append",
"(",
"subtoken",
")",
"offset",
"+=",
"length",
"+",
"1",
"return",
"tokens",
",",
"positions",
"else",
":",
"# Split on contractions and clitics.",
"words",
"=",
"text",
".",
"split",
"(",
"' '",
")",
"words",
"=",
"map",
"(",
"self",
".",
"contraction_splitter",
".",
"split_if_contraction",
",",
"words",
")",
"text",
"=",
"' '",
".",
"join",
"(",
"words",
")",
"return",
"text",
".",
"split",
"(",
")"
] | https://github.com/andre-martins/TurboParser/blob/a87b8e45694c18b826bb3c42e8344bd32928007d/python/tokenizer/universal_word_tokenizer.py#L75-L207 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/jira/client.py | python | JIRA.priorities | (self) | return priorities | Get a list of priority Resources from the server. | Get a list of priority Resources from the server. | [
"Get",
"a",
"list",
"of",
"priority",
"Resources",
"from",
"the",
"server",
"."
] | def priorities(self):
"""Get a list of priority Resources from the server."""
r_json = self._get_json('priority')
priorities = [Priority(
self._options, self._session, raw_priority_json) for raw_priority_json in r_json]
return priorities | [
"def",
"priorities",
"(",
"self",
")",
":",
"r_json",
"=",
"self",
".",
"_get_json",
"(",
"'priority'",
")",
"priorities",
"=",
"[",
"Priority",
"(",
"self",
".",
"_options",
",",
"self",
".",
"_session",
",",
"raw_priority_json",
")",
"for",
"raw_priority_json",
"in",
"r_json",
"]",
"return",
"priorities"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/jira/client.py#L1835-L1840 |
|
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TFlt.IsNum | (self, *args) | return _snap.TFlt_IsNum(self, *args) | IsNum(TFlt self, double const & Val) -> bool
Parameters:
Val: double const &
IsNum(TFlt self) -> bool
Parameters:
self: TFlt const * | IsNum(TFlt self, double const & Val) -> bool | [
"IsNum",
"(",
"TFlt",
"self",
"double",
"const",
"&",
"Val",
")",
"-",
">",
"bool"
] | def IsNum(self, *args):
"""
IsNum(TFlt self, double const & Val) -> bool
Parameters:
Val: double const &
IsNum(TFlt self) -> bool
Parameters:
self: TFlt const *
"""
return _snap.TFlt_IsNum(self, *args) | [
"def",
"IsNum",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_snap",
".",
"TFlt_IsNum",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L14498-L14511 |
|
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/FS.py | python | Dir.walk | (self, func, arg) | Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common). | Walk this directory tree by calling the specified function
for each directory in the tree. | [
"Walk",
"this",
"directory",
"tree",
"by",
"calling",
"the",
"specified",
"function",
"for",
"each",
"directory",
"in",
"the",
"tree",
"."
] | def walk(self, func, arg):
"""
Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common).
"""
entries = self.entries
names = list(entries.keys())
names.remove('.')
names.remove('..')
func(arg, self, names)
for dirname in [n for n in names if isinstance(entries[n], Dir)]:
entries[dirname].walk(func, arg) | [
"def",
"walk",
"(",
"self",
",",
"func",
",",
"arg",
")",
":",
"entries",
"=",
"self",
".",
"entries",
"names",
"=",
"list",
"(",
"entries",
".",
"keys",
"(",
")",
")",
"names",
".",
"remove",
"(",
"'.'",
")",
"names",
".",
"remove",
"(",
"'..'",
")",
"func",
"(",
"arg",
",",
"self",
",",
"names",
")",
"for",
"dirname",
"in",
"[",
"n",
"for",
"n",
"in",
"names",
"if",
"isinstance",
"(",
"entries",
"[",
"n",
"]",
",",
"Dir",
")",
"]",
":",
"entries",
"[",
"dirname",
"]",
".",
"walk",
"(",
"func",
",",
"arg",
")"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Node/FS.py#L2107-L2131 |
||
gimli-org/gimli | 17aa2160de9b15ababd9ef99e89b1bc3277bbb23 | pygimli/core/matrix.py | python | __ElementMatrix_str | (self) | return s | Show entries of an ElementMatrix. | Show entries of an ElementMatrix. | [
"Show",
"entries",
"of",
"an",
"ElementMatrix",
"."
] | def __ElementMatrix_str(self):
"""Show entries of an ElementMatrix."""
import pygimli as pg
if self.mat().cols() == 0 and self.mat().rows() == 0:
return 'Empty ElementMatrix\n'
maxRowID = int(np.log10(max(self.rowIDs())))+2
s = '\n ' + ' ' * maxRowID
# print(self.mat())
# print(self.colIDs())
# print(self.rowIDs())
for i in range(self.mat().cols()):
s += str(self.colIDs()[i]).rjust(9)
s += '\n'
s += ' ' + '-'*self.mat().cols()*(9 + maxRowID) + '-\n'
for i in range(self.mat().rows()):
s += str(self.rowIDs()[i]).rjust(maxRowID) + " :"
for v in self.row(i):
s += pg.pf(v).rjust(9)
s += '\n'
return s | [
"def",
"__ElementMatrix_str",
"(",
"self",
")",
":",
"import",
"pygimli",
"as",
"pg",
"if",
"self",
".",
"mat",
"(",
")",
".",
"cols",
"(",
")",
"==",
"0",
"and",
"self",
".",
"mat",
"(",
")",
".",
"rows",
"(",
")",
"==",
"0",
":",
"return",
"'Empty ElementMatrix\\n'",
"maxRowID",
"=",
"int",
"(",
"np",
".",
"log10",
"(",
"max",
"(",
"self",
".",
"rowIDs",
"(",
")",
")",
")",
")",
"+",
"2",
"s",
"=",
"'\\n '",
"+",
"' '",
"*",
"maxRowID",
"# print(self.mat())",
"# print(self.colIDs())",
"# print(self.rowIDs())",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"mat",
"(",
")",
".",
"cols",
"(",
")",
")",
":",
"s",
"+=",
"str",
"(",
"self",
".",
"colIDs",
"(",
")",
"[",
"i",
"]",
")",
".",
"rjust",
"(",
"9",
")",
"s",
"+=",
"'\\n'",
"s",
"+=",
"' '",
"+",
"'-'",
"*",
"self",
".",
"mat",
"(",
")",
".",
"cols",
"(",
")",
"*",
"(",
"9",
"+",
"maxRowID",
")",
"+",
"'-\\n'",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"mat",
"(",
")",
".",
"rows",
"(",
")",
")",
":",
"s",
"+=",
"str",
"(",
"self",
".",
"rowIDs",
"(",
")",
"[",
"i",
"]",
")",
".",
"rjust",
"(",
"maxRowID",
")",
"+",
"\" :\"",
"for",
"v",
"in",
"self",
".",
"row",
"(",
"i",
")",
":",
"s",
"+=",
"pg",
".",
"pf",
"(",
"v",
")",
".",
"rjust",
"(",
"9",
")",
"s",
"+=",
"'\\n'",
"return",
"s"
] | https://github.com/gimli-org/gimli/blob/17aa2160de9b15ababd9ef99e89b1bc3277bbb23/pygimli/core/matrix.py#L66-L89 |
|
apiaryio/drafter | 4634ebd07f6c6f257cc656598ccd535492fdfb55 | tools/gyp/pylib/gyp/xcodeproj_file.py | python | XCConfigurationList.HasBuildSetting | (self, key) | return 1 | Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1. | Determines the state of a build setting in all XCBuildConfiguration
child objects. | [
"Determines",
"the",
"state",
"of",
"a",
"build",
"setting",
"in",
"all",
"XCBuildConfiguration",
"child",
"objects",
"."
] | def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1 | [
"def",
"HasBuildSetting",
"(",
"self",
",",
"key",
")",
":",
"has",
"=",
"None",
"value",
"=",
"None",
"for",
"configuration",
"in",
"self",
".",
"_properties",
"[",
"'buildConfigurations'",
"]",
":",
"configuration_has",
"=",
"configuration",
".",
"HasBuildSetting",
"(",
"key",
")",
"if",
"has",
"is",
"None",
":",
"has",
"=",
"configuration_has",
"elif",
"has",
"!=",
"configuration_has",
":",
"return",
"-",
"1",
"if",
"configuration_has",
":",
"configuration_value",
"=",
"configuration",
".",
"GetBuildSetting",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"configuration_value",
"elif",
"value",
"!=",
"configuration_value",
":",
"return",
"-",
"1",
"if",
"not",
"has",
":",
"return",
"0",
"return",
"1"
] | https://github.com/apiaryio/drafter/blob/4634ebd07f6c6f257cc656598ccd535492fdfb55/tools/gyp/pylib/gyp/xcodeproj_file.py#L1617-L1649 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/cgitb.py | python | text | (einfo, context=5) | return head + ''.join(frames) + ''.join(exception) + '''
The above is a description of an error in a Python program. Here is
the original traceback:
%s
''' % ''.join(traceback.format_exception(etype, evalue, etb)) | Return a plain text document describing a given traceback. | Return a plain text document describing a given traceback. | [
"Return",
"a",
"plain",
"text",
"document",
"describing",
"a",
"given",
"traceback",
"."
] | def text(einfo, context=5):
"""Return a plain text document describing a given traceback."""
etype, evalue, etb = einfo
if type(etype) is types.ClassType:
etype = etype.__name__
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
date = time.ctime(time.time())
head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
A problem occurred in a Python script. Here is the sequence of
function calls leading up to the error, in the order they occurred.
'''
frames = []
records = inspect.getinnerframes(etb, context)
for frame, file, lnum, func, lines, index in records:
file = file and os.path.abspath(file) or '?'
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
call = 'in ' + func + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.text.repr(value))
highlight = {}
def reader(lnum=[lnum]):
highlight[lnum[0]] = 1
try: return linecache.getline(file, lnum[0])
finally: lnum[0] += 1
vars = scanvars(reader, frame, locals)
rows = [' %s %s' % (file, call)]
if index is not None:
i = lnum - index
for line in lines:
num = '%5d ' % i
rows.append(num+line.rstrip())
i += 1
done, dump = {}, []
for name, where, value in vars:
if name in done: continue
done[name] = 1
if value is not __UNDEF__:
if where == 'global': name = 'global ' + name
elif where != 'local': name = where + name.split('.')[-1]
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
else:
dump.append(name + ' undefined')
rows.append('\n'.join(dump))
frames.append('\n%s\n' % '\n'.join(rows))
exception = ['%s: %s' % (str(etype), str(evalue))]
if isinstance(evalue, BaseException):
for name in dir(evalue):
value = pydoc.text.repr(getattr(evalue, name))
exception.append('\n%s%s = %s' % (" "*4, name, value))
return head + ''.join(frames) + ''.join(exception) + '''
The above is a description of an error in a Python program. Here is
the original traceback:
%s
''' % ''.join(traceback.format_exception(etype, evalue, etb)) | [
"def",
"text",
"(",
"einfo",
",",
"context",
"=",
"5",
")",
":",
"etype",
",",
"evalue",
",",
"etb",
"=",
"einfo",
"if",
"type",
"(",
"etype",
")",
"is",
"types",
".",
"ClassType",
":",
"etype",
"=",
"etype",
".",
"__name__",
"pyver",
"=",
"'Python '",
"+",
"sys",
".",
"version",
".",
"split",
"(",
")",
"[",
"0",
"]",
"+",
"': '",
"+",
"sys",
".",
"executable",
"date",
"=",
"time",
".",
"ctime",
"(",
"time",
".",
"time",
"(",
")",
")",
"head",
"=",
"\"%s\\n%s\\n%s\\n\"",
"%",
"(",
"str",
"(",
"etype",
")",
",",
"pyver",
",",
"date",
")",
"+",
"'''\nA problem occurred in a Python script. Here is the sequence of\nfunction calls leading up to the error, in the order they occurred.\n'''",
"frames",
"=",
"[",
"]",
"records",
"=",
"inspect",
".",
"getinnerframes",
"(",
"etb",
",",
"context",
")",
"for",
"frame",
",",
"file",
",",
"lnum",
",",
"func",
",",
"lines",
",",
"index",
"in",
"records",
":",
"file",
"=",
"file",
"and",
"os",
".",
"path",
".",
"abspath",
"(",
"file",
")",
"or",
"'?'",
"args",
",",
"varargs",
",",
"varkw",
",",
"locals",
"=",
"inspect",
".",
"getargvalues",
"(",
"frame",
")",
"call",
"=",
"''",
"if",
"func",
"!=",
"'?'",
":",
"call",
"=",
"'in '",
"+",
"func",
"+",
"inspect",
".",
"formatargvalues",
"(",
"args",
",",
"varargs",
",",
"varkw",
",",
"locals",
",",
"formatvalue",
"=",
"lambda",
"value",
":",
"'='",
"+",
"pydoc",
".",
"text",
".",
"repr",
"(",
"value",
")",
")",
"highlight",
"=",
"{",
"}",
"def",
"reader",
"(",
"lnum",
"=",
"[",
"lnum",
"]",
")",
":",
"highlight",
"[",
"lnum",
"[",
"0",
"]",
"]",
"=",
"1",
"try",
":",
"return",
"linecache",
".",
"getline",
"(",
"file",
",",
"lnum",
"[",
"0",
"]",
")",
"finally",
":",
"lnum",
"[",
"0",
"]",
"+=",
"1",
"vars",
"=",
"scanvars",
"(",
"reader",
",",
"frame",
",",
"locals",
")",
"rows",
"=",
"[",
"' %s %s'",
"%",
"(",
"file",
",",
"call",
")",
"]",
"if",
"index",
"is",
"not",
"None",
":",
"i",
"=",
"lnum",
"-",
"index",
"for",
"line",
"in",
"lines",
":",
"num",
"=",
"'%5d '",
"%",
"i",
"rows",
".",
"append",
"(",
"num",
"+",
"line",
".",
"rstrip",
"(",
")",
")",
"i",
"+=",
"1",
"done",
",",
"dump",
"=",
"{",
"}",
",",
"[",
"]",
"for",
"name",
",",
"where",
",",
"value",
"in",
"vars",
":",
"if",
"name",
"in",
"done",
":",
"continue",
"done",
"[",
"name",
"]",
"=",
"1",
"if",
"value",
"is",
"not",
"__UNDEF__",
":",
"if",
"where",
"==",
"'global'",
":",
"name",
"=",
"'global '",
"+",
"name",
"elif",
"where",
"!=",
"'local'",
":",
"name",
"=",
"where",
"+",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"dump",
".",
"append",
"(",
"'%s = %s'",
"%",
"(",
"name",
",",
"pydoc",
".",
"text",
".",
"repr",
"(",
"value",
")",
")",
")",
"else",
":",
"dump",
".",
"append",
"(",
"name",
"+",
"' undefined'",
")",
"rows",
".",
"append",
"(",
"'\\n'",
".",
"join",
"(",
"dump",
")",
")",
"frames",
".",
"append",
"(",
"'\\n%s\\n'",
"%",
"'\\n'",
".",
"join",
"(",
"rows",
")",
")",
"exception",
"=",
"[",
"'%s: %s'",
"%",
"(",
"str",
"(",
"etype",
")",
",",
"str",
"(",
"evalue",
")",
")",
"]",
"if",
"isinstance",
"(",
"evalue",
",",
"BaseException",
")",
":",
"for",
"name",
"in",
"dir",
"(",
"evalue",
")",
":",
"value",
"=",
"pydoc",
".",
"text",
".",
"repr",
"(",
"getattr",
"(",
"evalue",
",",
"name",
")",
")",
"exception",
".",
"append",
"(",
"'\\n%s%s = %s'",
"%",
"(",
"\" \"",
"*",
"4",
",",
"name",
",",
"value",
")",
")",
"return",
"head",
"+",
"''",
".",
"join",
"(",
"frames",
")",
"+",
"''",
".",
"join",
"(",
"exception",
")",
"+",
"'''\n\nThe above is a description of an error in a Python program. Here is\nthe original traceback:\n\n%s\n'''",
"%",
"''",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"etype",
",",
"evalue",
",",
"etb",
")",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/cgitb.py#L193-L257 |
|
wyrover/book-code | 7f4883d9030d553bc6bcfa3da685e34789839900 | 3rdparty/protobuf/python/google/protobuf/text_format.py | python | _Printer._TryPrintAsAnyMessage | (self, message) | Serializes if message is a google.protobuf.Any field. | Serializes if message is a google.protobuf.Any field. | [
"Serializes",
"if",
"message",
"is",
"a",
"google",
".",
"protobuf",
".",
"Any",
"field",
"."
] | def _TryPrintAsAnyMessage(self, message):
"""Serializes if message is a google.protobuf.Any field."""
packed_message = _BuildMessageFromTypeName(message.TypeName(),
self.descriptor_pool)
if packed_message:
packed_message.MergeFromString(message.value)
self.out.write('%s[%s]' % (self.indent * ' ', message.type_url))
self._PrintMessageFieldValue(packed_message)
self.out.write(' ' if self.as_one_line else '\n')
return True
else:
return False | [
"def",
"_TryPrintAsAnyMessage",
"(",
"self",
",",
"message",
")",
":",
"packed_message",
"=",
"_BuildMessageFromTypeName",
"(",
"message",
".",
"TypeName",
"(",
")",
",",
"self",
".",
"descriptor_pool",
")",
"if",
"packed_message",
":",
"packed_message",
".",
"MergeFromString",
"(",
"message",
".",
"value",
")",
"self",
".",
"out",
".",
"write",
"(",
"'%s[%s]'",
"%",
"(",
"self",
".",
"indent",
"*",
"' '",
",",
"message",
".",
"type_url",
")",
")",
"self",
".",
"_PrintMessageFieldValue",
"(",
"packed_message",
")",
"self",
".",
"out",
".",
"write",
"(",
"' '",
"if",
"self",
".",
"as_one_line",
"else",
"'\\n'",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/text_format.py#L287-L298 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/ogl/_drawn.py | python | DrawnShape.Translate | (self, x, y) | Translate the shape by the given amount. | Translate the shape by the given amount. | [
"Translate",
"the",
"shape",
"by",
"the",
"given",
"amount",
"."
] | def Translate(self, x, y):
"""Translate the shape by the given amount."""
for i in range(4):
if self._metafiles[i].IsValid():
self._metafiles[i].Translate(x, y)
self._metafiles[i].CalculateSize(self) | [
"def",
"Translate",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"for",
"i",
"in",
"range",
"(",
"4",
")",
":",
"if",
"self",
".",
"_metafiles",
"[",
"i",
"]",
".",
"IsValid",
"(",
")",
":",
"self",
".",
"_metafiles",
"[",
"i",
"]",
".",
"Translate",
"(",
"x",
",",
"y",
")",
"self",
".",
"_metafiles",
"[",
"i",
"]",
".",
"CalculateSize",
"(",
"self",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/ogl/_drawn.py#L675-L680 |
||
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBInstruction.EmulateWithFrame | (self, *args) | return _lldb.SBInstruction_EmulateWithFrame(self, *args) | EmulateWithFrame(self, SBFrame frame, uint32_t evaluate_options) -> bool | EmulateWithFrame(self, SBFrame frame, uint32_t evaluate_options) -> bool | [
"EmulateWithFrame",
"(",
"self",
"SBFrame",
"frame",
"uint32_t",
"evaluate_options",
")",
"-",
">",
"bool"
] | def EmulateWithFrame(self, *args):
"""EmulateWithFrame(self, SBFrame frame, uint32_t evaluate_options) -> bool"""
return _lldb.SBInstruction_EmulateWithFrame(self, *args) | [
"def",
"EmulateWithFrame",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_lldb",
".",
"SBInstruction_EmulateWithFrame",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L5227-L5229 |
|
arkenthera/electron-vibrancy | 383153ef9ccb23a6c7517150d6bb0794dff3115e | scripts/cpplint.py | python | NestingState.InnermostClass | (self) | return None | Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise. | Get class info on the top of the stack. | [
"Get",
"class",
"info",
"on",
"the",
"top",
"of",
"the",
"stack",
"."
] | def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None | [
"def",
"InnermostClass",
"(",
"self",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"stack",
")",
",",
"0",
",",
"-",
"1",
")",
":",
"classinfo",
"=",
"self",
".",
"stack",
"[",
"i",
"-",
"1",
"]",
"if",
"isinstance",
"(",
"classinfo",
",",
"_ClassInfo",
")",
":",
"return",
"classinfo",
"return",
"None"
] | https://github.com/arkenthera/electron-vibrancy/blob/383153ef9ccb23a6c7517150d6bb0794dff3115e/scripts/cpplint.py#L2287-L2297 |
|
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/learn/python/learn/experiment.py | python | Experiment.run_std_server | (self) | Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server. | Starts a TensorFlow server and joins the serving thread. | [
"Starts",
"a",
"TensorFlow",
"server",
"and",
"joins",
"the",
"serving",
"thread",
"."
] | def run_std_server(self):
"""Starts a TensorFlow server and joins the serving thread.
Typically used for parameter servers.
Raises:
ValueError: if not enough information is available in the estimator's
config to create a server.
"""
self._start_server().join() | [
"def",
"run_std_server",
"(",
"self",
")",
":",
"self",
".",
"_start_server",
"(",
")",
".",
"join",
"(",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/experiment.py#L613-L622 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/docs/docstring.py | python | LazyLoadedDocstring.__init__ | (self, *args, **kwargs) | The args and kwargs are the same as the underlying document
generation function. These just get proxied to the underlying
function. | The args and kwargs are the same as the underlying document
generation function. These just get proxied to the underlying
function. | [
"The",
"args",
"and",
"kwargs",
"are",
"the",
"same",
"as",
"the",
"underlying",
"document",
"generation",
"function",
".",
"These",
"just",
"get",
"proxied",
"to",
"the",
"underlying",
"function",
"."
] | def __init__(self, *args, **kwargs):
"""
The args and kwargs are the same as the underlying document
generation function. These just get proxied to the underlying
function.
"""
super(LazyLoadedDocstring, self).__init__()
self._gen_args = args
self._gen_kwargs = kwargs
self._docstring = None | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"LazyLoadedDocstring",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_gen_args",
"=",
"args",
"self",
".",
"_gen_kwargs",
"=",
"kwargs",
"self",
".",
"_docstring",
"=",
"None"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/docs/docstring.py#L27-L36 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/s3transfer/manager.py | python | TransferCoordinatorController.wait | (self) | Wait until there are no more inprogress transfers
This will not stop when failures are encountered and not propogate any
of these errors from failed transfers, but it can be interrupted with
a KeyboardInterrupt. | Wait until there are no more inprogress transfers | [
"Wait",
"until",
"there",
"are",
"no",
"more",
"inprogress",
"transfers"
] | def wait(self):
"""Wait until there are no more inprogress transfers
This will not stop when failures are encountered and not propogate any
of these errors from failed transfers, but it can be interrupted with
a KeyboardInterrupt.
"""
try:
transfer_coordinator = None
for transfer_coordinator in self.tracked_transfer_coordinators:
transfer_coordinator.result()
except KeyboardInterrupt:
logger.debug('Received KeyboardInterrupt in wait()')
# If Keyboard interrupt is raised while waiting for
# the result, then exit out of the wait and raise the
# exception
if transfer_coordinator:
logger.debug(
'On KeyboardInterrupt was waiting for %s',
transfer_coordinator)
raise
except Exception:
# A general exception could have been thrown because
# of result(). We just want to ignore this and continue
# because we at least know that the transfer coordinator
# has completed.
pass | [
"def",
"wait",
"(",
"self",
")",
":",
"try",
":",
"transfer_coordinator",
"=",
"None",
"for",
"transfer_coordinator",
"in",
"self",
".",
"tracked_transfer_coordinators",
":",
"transfer_coordinator",
".",
"result",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"logger",
".",
"debug",
"(",
"'Received KeyboardInterrupt in wait()'",
")",
"# If Keyboard interrupt is raised while waiting for",
"# the result, then exit out of the wait and raise the",
"# exception",
"if",
"transfer_coordinator",
":",
"logger",
".",
"debug",
"(",
"'On KeyboardInterrupt was waiting for %s'",
",",
"transfer_coordinator",
")",
"raise",
"except",
"Exception",
":",
"# A general exception could have been thrown because",
"# of result(). We just want to ignore this and continue",
"# because we at least know that the transfer coordinator",
"# has completed.",
"pass"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/s3transfer/manager.py#L632-L658 |
||
tfwu/FaceDetection-ConvNet-3D | f9251c48eb40c5aec8fba7455115c355466555be | python/build/lib.linux-x86_64-2.7/mxnet/lr_scheduler.py | python | MultiFactorScheduler.__call__ | (self, num_update) | return self.base_lr | Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight. | Call to schedule current learning rate | [
"Call",
"to",
"schedule",
"current",
"learning",
"rate"
] | def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if self.cur_step_ind <= len(self.step)-1:
if num_update > self.step[self.cur_step_ind]:
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr | [
"def",
"__call__",
"(",
"self",
",",
"num_update",
")",
":",
"if",
"self",
".",
"cur_step_ind",
"<=",
"len",
"(",
"self",
".",
"step",
")",
"-",
"1",
":",
"if",
"num_update",
">",
"self",
".",
"step",
"[",
"self",
".",
"cur_step_ind",
"]",
":",
"self",
".",
"count",
"=",
"self",
".",
"step",
"[",
"self",
".",
"cur_step_ind",
"]",
"self",
".",
"cur_step_ind",
"+=",
"1",
"self",
".",
"base_lr",
"*=",
"self",
".",
"factor",
"logging",
".",
"info",
"(",
"\"Update[%d]: Change learning rate to %0.5e\"",
",",
"num_update",
",",
"self",
".",
"base_lr",
")",
"return",
"self",
".",
"base_lr"
] | https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/build/lib.linux-x86_64-2.7/mxnet/lr_scheduler.py#L108-L125 |
|
Slicer/SlicerGitSVNArchive | 65e92bb16c2b32ea47a1a66bee71f238891ee1ca | Modules/Scripted/DICOMLib/DICOMPlugin.py | python | DICOMPlugin.load | (self,loadable) | return True | Accept a DICOMLoadable and perform the operation to convert
the referenced data into MRML nodes
Virtual: should be overridden by the subclass | Accept a DICOMLoadable and perform the operation to convert
the referenced data into MRML nodes
Virtual: should be overridden by the subclass | [
"Accept",
"a",
"DICOMLoadable",
"and",
"perform",
"the",
"operation",
"to",
"convert",
"the",
"referenced",
"data",
"into",
"MRML",
"nodes",
"Virtual",
":",
"should",
"be",
"overridden",
"by",
"the",
"subclass"
] | def load(self,loadable):
"""Accept a DICOMLoadable and perform the operation to convert
the referenced data into MRML nodes
Virtual: should be overridden by the subclass
"""
return True | [
"def",
"load",
"(",
"self",
",",
"loadable",
")",
":",
"return",
"True"
] | https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Modules/Scripted/DICOMLib/DICOMPlugin.py#L127-L132 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | TreeCtrl.GetChildrenCount | (*args, **kwargs) | return _controls_.TreeCtrl_GetChildrenCount(*args, **kwargs) | GetChildrenCount(self, TreeItemId item, bool recursively=True) -> size_t | GetChildrenCount(self, TreeItemId item, bool recursively=True) -> size_t | [
"GetChildrenCount",
"(",
"self",
"TreeItemId",
"item",
"bool",
"recursively",
"=",
"True",
")",
"-",
">",
"size_t"
] | def GetChildrenCount(*args, **kwargs):
"""GetChildrenCount(self, TreeItemId item, bool recursively=True) -> size_t"""
return _controls_.TreeCtrl_GetChildrenCount(*args, **kwargs) | [
"def",
"GetChildrenCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TreeCtrl_GetChildrenCount",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L5355-L5357 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/py2/scipy/linalg/_interpolative_backend.py | python | idd_frm | (n, w, x) | return _id.idd_frm(n, w, x) | Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idd_sfrm`, this routine works best when the length of
the transformed vector is the power-of-two integer output by
:func:`idd_frmi`, or when the length is not specified but instead
determined a posteriori from the output. The returned transformed vector is
randomly permuted.
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idd_frmi`; `n` is also the length of the output vector.
:type n: int
:param w:
Initialization array constructed by :func:`idd_frmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray` | Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT. | [
"Transform",
"real",
"vector",
"via",
"a",
"composition",
"of",
"Rokhlin",
"s",
"random",
"transform",
"random",
"subselection",
"and",
"an",
"FFT",
"."
] | def idd_frm(n, w, x):
"""
Transform real vector via a composition of Rokhlin's random transform,
random subselection, and an FFT.
In contrast to :func:`idd_sfrm`, this routine works best when the length of
the transformed vector is the power-of-two integer output by
:func:`idd_frmi`, or when the length is not specified but instead
determined a posteriori from the output. The returned transformed vector is
randomly permuted.
:param n:
Greatest power-of-two integer satisfying `n <= x.size` as obtained from
:func:`idd_frmi`; `n` is also the length of the output vector.
:type n: int
:param w:
Initialization array constructed by :func:`idd_frmi`.
:type w: :class:`numpy.ndarray`
:param x:
Vector to be transformed.
:type x: :class:`numpy.ndarray`
:return:
Transformed vector.
:rtype: :class:`numpy.ndarray`
"""
return _id.idd_frm(n, w, x) | [
"def",
"idd_frm",
"(",
"n",
",",
"w",
",",
"x",
")",
":",
"return",
"_id",
".",
"idd_frm",
"(",
"n",
",",
"w",
",",
"x",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/linalg/_interpolative_backend.py#L84-L110 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scikit-learn/py2/sklearn/ensemble/weight_boosting.py | python | AdaBoostClassifier.staged_predict_proba | (self, X) | Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute. | Predict class probabilities for X. | [
"Predict",
"class",
"probabilities",
"for",
"X",
"."
] | def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba | [
"def",
"staged_predict_proba",
"(",
"self",
",",
"X",
")",
":",
"X",
"=",
"self",
".",
"_validate_X_predict",
"(",
"X",
")",
"n_classes",
"=",
"self",
".",
"n_classes_",
"proba",
"=",
"None",
"norm",
"=",
"0.",
"for",
"weight",
",",
"estimator",
"in",
"zip",
"(",
"self",
".",
"estimator_weights_",
",",
"self",
".",
"estimators_",
")",
":",
"norm",
"+=",
"weight",
"if",
"self",
".",
"algorithm",
"==",
"'SAMME.R'",
":",
"# The weights are all 1. for SAMME.R",
"current_proba",
"=",
"_samme_proba",
"(",
"estimator",
",",
"n_classes",
",",
"X",
")",
"else",
":",
"# elif self.algorithm == \"SAMME\":",
"current_proba",
"=",
"estimator",
".",
"predict_proba",
"(",
"X",
")",
"*",
"weight",
"if",
"proba",
"is",
"None",
":",
"proba",
"=",
"current_proba",
"else",
":",
"proba",
"+=",
"current_proba",
"real_proba",
"=",
"np",
".",
"exp",
"(",
"(",
"1.",
"/",
"(",
"n_classes",
"-",
"1",
")",
")",
"*",
"(",
"proba",
"/",
"norm",
")",
")",
"normalizer",
"=",
"real_proba",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"normalizer",
"[",
"normalizer",
"==",
"0.0",
"]",
"=",
"1.0",
"real_proba",
"/=",
"normalizer",
"yield",
"real_proba"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/ensemble/weight_boosting.py#L774-L824 |
||
Yijunmaverick/GenerativeFaceCompletion | f72dea0fa27c779fef7b65d2f01e82bcc23a0eb2 | scripts/cpp_lint.py | python | CheckCStyleCast | (filename, linenum, line, raw_line, cast_type, pattern,
error) | return True | Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise. | Checks for a C-style cast by looking for the pattern. | [
"Checks",
"for",
"a",
"C",
"-",
"style",
"cast",
"by",
"looking",
"for",
"the",
"pattern",
"."
] | def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with sizeof, since sizeof looks like a cast.
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True | [
"def",
"CheckCStyleCast",
"(",
"filename",
",",
"linenum",
",",
"line",
",",
"raw_line",
",",
"cast_type",
",",
"pattern",
",",
"error",
")",
":",
"match",
"=",
"Search",
"(",
"pattern",
",",
"line",
")",
"if",
"not",
"match",
":",
"return",
"False",
"# Exclude lines with sizeof, since sizeof looks like a cast.",
"sizeof_match",
"=",
"Match",
"(",
"r'.*sizeof\\s*$'",
",",
"line",
"[",
"0",
":",
"match",
".",
"start",
"(",
"1",
")",
"-",
"1",
"]",
")",
"if",
"sizeof_match",
":",
"return",
"False",
"# operator++(int) and operator--(int)",
"if",
"(",
"line",
"[",
"0",
":",
"match",
".",
"start",
"(",
"1",
")",
"-",
"1",
"]",
".",
"endswith",
"(",
"' operator++'",
")",
"or",
"line",
"[",
"0",
":",
"match",
".",
"start",
"(",
"1",
")",
"-",
"1",
"]",
".",
"endswith",
"(",
"' operator--'",
")",
")",
":",
"return",
"False",
"# A single unnamed argument for a function tends to look like old",
"# style cast. If we see those, don't issue warnings for deprecated",
"# casts, instead issue warnings for unnamed arguments where",
"# appropriate.",
"#",
"# These are things that we want warnings for, since the style guide",
"# explicitly require all parameters to be named:",
"# Function(int);",
"# Function(int) {",
"# ConstMember(int) const;",
"# ConstMember(int) const {",
"# ExceptionMember(int) throw (...);",
"# ExceptionMember(int) throw (...) {",
"# PureVirtual(int) = 0;",
"#",
"# These are functions of some sort, where the compiler would be fine",
"# if they had named parameters, but people often omit those",
"# identifiers to reduce clutter:",
"# (FunctionPointer)(int);",
"# (FunctionPointer)(int) = value;",
"# Function((function_pointer_arg)(int))",
"# <TemplateArgument(int)>;",
"# <(FunctionPointerTemplateArgument)(int)>;",
"remainder",
"=",
"line",
"[",
"match",
".",
"end",
"(",
"0",
")",
":",
"]",
"if",
"Match",
"(",
"r'^\\s*(?:;|const\\b|throw\\b|=|>|\\{|\\))'",
",",
"remainder",
")",
":",
"# Looks like an unnamed parameter.",
"# Don't warn on any kind of template arguments.",
"if",
"Match",
"(",
"r'^\\s*>'",
",",
"remainder",
")",
":",
"return",
"False",
"# Don't warn on assignments to function pointers, but keep warnings for",
"# unnamed parameters to pure virtual functions. Note that this pattern",
"# will also pass on assignments of \"0\" to function pointers, but the",
"# preferred values for those would be \"nullptr\" or \"NULL\".",
"matched_zero",
"=",
"Match",
"(",
"r'^\\s=\\s*(\\S+)\\s*;'",
",",
"remainder",
")",
"if",
"matched_zero",
"and",
"matched_zero",
".",
"group",
"(",
"1",
")",
"!=",
"'0'",
":",
"return",
"False",
"# Don't warn on function pointer declarations. For this we need",
"# to check what came before the \"(type)\" string.",
"if",
"Match",
"(",
"r'.*\\)\\s*$'",
",",
"line",
"[",
"0",
":",
"match",
".",
"start",
"(",
"0",
")",
"]",
")",
":",
"return",
"False",
"# Don't warn if the parameter is named with block comments, e.g.:",
"# Function(int /*unused_param*/);",
"if",
"'/*'",
"in",
"raw_line",
":",
"return",
"False",
"# Passed all filters, issue warning here.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/function'",
",",
"3",
",",
"'All parameters should be named in a function'",
")",
"return",
"True",
"# At this point, all that should be left is actual casts.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/casting'",
",",
"4",
",",
"'Using C-style cast. Use %s<%s>(...) instead'",
"%",
"(",
"cast_type",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"return",
"True"
] | https://github.com/Yijunmaverick/GenerativeFaceCompletion/blob/f72dea0fa27c779fef7b65d2f01e82bcc23a0eb2/scripts/cpp_lint.py#L4247-L4338 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/common/tensor.py | python | Tensor.squeeze | (self, axis=None) | return tensor_operator_registry.get('reshape')()(self, new_shape) | Remove the dimension of shape 1 from the Tensor
Args:
axis (Union[None, int, list(int), tuple(int)], optional): Selects a subset of the entries of
length one in the shape. If an axis is selected with shape entry greater than one,
an error is raised. Default is None.
Returns:
Tensor, with all or a subset of the dimensions of length 1 removed.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If axis is greater than one.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
See also:
:func:`mindspore.Tensor.expand_as`: Expand the dimension of target tensor to the dimension of input tensor.
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> x = Tensor(np.ones((1,2,2), dtype=np.float32))
>>> print(x)
[[[1. 1.]
[1. 1.]]]
>>> print(x.shape)
(1, 2, 2)
>>> y = x.squeeze()
>>> print(y)
[[1. 1.]
[1. 1.]]
>>> print(y.shape)
(2, 2)
>>> y = x.squeeze(axis=0)
>>> print(y)
[[1. 1.]
[1. 1.]]
>>> print(y.shape)
(2, 2) | Remove the dimension of shape 1 from the Tensor | [
"Remove",
"the",
"dimension",
"of",
"shape",
"1",
"from",
"the",
"Tensor"
] | def squeeze(self, axis=None):
"""
Remove the dimension of shape 1 from the Tensor
Args:
axis (Union[None, int, list(int), tuple(int)], optional): Selects a subset of the entries of
length one in the shape. If an axis is selected with shape entry greater than one,
an error is raised. Default is None.
Returns:
Tensor, with all or a subset of the dimensions of length 1 removed.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If axis is greater than one.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
See also:
:func:`mindspore.Tensor.expand_as`: Expand the dimension of target tensor to the dimension of input tensor.
:func:`mindspore.Tensor.reshape`: Give a new shape to a tensor without changing its data.
Examples:
>>> import numpy as np
>>> from mindspore import Tensor
>>> x = Tensor(np.ones((1,2,2), dtype=np.float32))
>>> print(x)
[[[1. 1.]
[1. 1.]]]
>>> print(x.shape)
(1, 2, 2)
>>> y = x.squeeze()
>>> print(y)
[[1. 1.]
[1. 1.]]
>>> print(y.shape)
(2, 2)
>>> y = x.squeeze(axis=0)
>>> print(y)
[[1. 1.]
[1. 1.]]
>>> print(y.shape)
(2, 2)
"""
self._init_check()
if axis is None:
return tensor_operator_registry.get('squeeze')(self)
new_shape = validator.prepare_shape_for_squeeze(self.shape, axis)
return tensor_operator_registry.get('reshape')()(self, new_shape) | [
"def",
"squeeze",
"(",
"self",
",",
"axis",
"=",
"None",
")",
":",
"self",
".",
"_init_check",
"(",
")",
"if",
"axis",
"is",
"None",
":",
"return",
"tensor_operator_registry",
".",
"get",
"(",
"'squeeze'",
")",
"(",
"self",
")",
"new_shape",
"=",
"validator",
".",
"prepare_shape_for_squeeze",
"(",
"self",
".",
"shape",
",",
"axis",
")",
"return",
"tensor_operator_registry",
".",
"get",
"(",
"'reshape'",
")",
"(",
")",
"(",
"self",
",",
"new_shape",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/common/tensor.py#L905-L955 |
|
kevinlin311tw/caffe-cvprw15 | 45c2a1bf0368569c54e0be4edf8d34285cf79e70 | python/caffe/pycaffe.py | python | _Net_set_raw_scale | (self, input_, scale) | Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Take
input_: which input to assign this scale factor
scale: scale coefficient | Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255. | [
"Set",
"the",
"scale",
"of",
"raw",
"features",
"s",
".",
"t",
".",
"the",
"input",
"blob",
"=",
"input",
"*",
"scale",
".",
"While",
"Python",
"represents",
"images",
"in",
"[",
"0",
"1",
"]",
"certain",
"Caffe",
"models",
"like",
"CaffeNet",
"and",
"AlexNet",
"represent",
"images",
"in",
"[",
"0",
"255",
"]",
"so",
"the",
"raw_scale",
"of",
"these",
"models",
"must",
"be",
"255",
"."
] | def _Net_set_raw_scale(self, input_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Take
input_: which input to assign this scale factor
scale: scale coefficient
"""
if input_ not in self.inputs:
raise Exception('Input not in {}'.format(self.inputs))
self.raw_scale[input_] = scale | [
"def",
"_Net_set_raw_scale",
"(",
"self",
",",
"input_",
",",
"scale",
")",
":",
"if",
"input_",
"not",
"in",
"self",
".",
"inputs",
":",
"raise",
"Exception",
"(",
"'Input not in {}'",
".",
"format",
"(",
"self",
".",
"inputs",
")",
")",
"self",
".",
"raw_scale",
"[",
"input_",
"]",
"=",
"scale"
] | https://github.com/kevinlin311tw/caffe-cvprw15/blob/45c2a1bf0368569c54e0be4edf8d34285cf79e70/python/caffe/pycaffe.py#L245-L258 |
||
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/node-10.15.3/deps/v8/third_party/jinja2/filters.py | python | do_float | (value, default=0.0) | Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter. | Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter. | [
"Convert",
"the",
"value",
"into",
"a",
"floating",
"point",
"number",
".",
"If",
"the",
"conversion",
"doesn",
"t",
"work",
"it",
"will",
"return",
"0",
".",
"0",
".",
"You",
"can",
"override",
"this",
"default",
"using",
"the",
"first",
"parameter",
"."
] | def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default | [
"def",
"do_float",
"(",
"value",
",",
"default",
"=",
"0.0",
")",
":",
"try",
":",
"return",
"float",
"(",
"value",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"default"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/v8/third_party/jinja2/filters.py#L662-L670 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.