nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wanderine/BROCCOLI | ff7613de40d97429ba76ee2948cb5e1d7dd991d0 | code/bids/fslinstaller.py | python | check_fsl_install | (folder) | return False | Check if this folder contains FSL install | Check if this folder contains FSL install | [
"Check",
"if",
"this",
"folder",
"contains",
"FSL",
"install"
] | def check_fsl_install(folder):
'''Check if this folder contains FSL install'''
from os import path
fsldir = '/'.join((folder,'fsl'))
if path.isdir(fsldir):
return True
return False | [
"def",
"check_fsl_install",
"(",
"folder",
")",
":",
"from",
"os",
"import",
"path",
"fsldir",
"=",
"'/'",
".",
"join",
"(",
"(",
"folder",
",",
"'fsl'",
")",
")",
"if",
"path",
".",
"isdir",
"(",
"fsldir",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/wanderine/BROCCOLI/blob/ff7613de40d97429ba76ee2948cb5e1d7dd991d0/code/bids/fslinstaller.py#L1595-L1601 |
|
rrwick/Unicycler | 96ffea71e3a78d63ade19d6124946773e65cf129 | unicycler/assembly_graph.py | python | build_reverse_links | (links) | return reverse_links | This function builds a dictionary of links going the other way. I.e. if given a dictionary
of start to end links, it will return a dictionary of end to start links. | This function builds a dictionary of links going the other way. I.e. if given a dictionary
of start to end links, it will return a dictionary of end to start links. | [
"This",
"function",
"builds",
"a",
"dictionary",
"of",
"links",
"going",
"the",
"other",
"way",
".",
"I",
".",
"e",
".",
"if",
"given",
"a",
"dictionary",
"of",
"start",
"to",
"end",
"links",
"it",
"will",
"return",
"a",
"dictionary",
"of",
"end",
"to",
"start",
"links",
"."
] | def build_reverse_links(links):
"""
This function builds a dictionary of links going the other way. I.e. if given a dictionary
of start to end links, it will return a dictionary of end to start links.
"""
reverse_links = {}
for start, ends in links.items():
for end in ends:
if end not in reverse_links:
reverse_links[end] = []
reverse_links[end].append(start)
return reverse_links | [
"def",
"build_reverse_links",
"(",
"links",
")",
":",
"reverse_links",
"=",
"{",
"}",
"for",
"start",
",",
"ends",
"in",
"links",
".",
"items",
"(",
")",
":",
"for",
"end",
"in",
"ends",
":",
"if",
"end",
"not",
"in",
"reverse_links",
":",
"reverse_links",
"[",
"end",
"]",
"=",
"[",
"]",
"reverse_links",
"[",
"end",
"]",
".",
"append",
"(",
"start",
")",
"return",
"reverse_links"
] | https://github.com/rrwick/Unicycler/blob/96ffea71e3a78d63ade19d6124946773e65cf129/unicycler/assembly_graph.py#L2483-L2494 |
|
alexgkendall/caffe-posenet | 62aafbd7c45df91acdba14f5d1406d8295c2bc6f | scripts/cpp_lint.py | python | RemoveMultiLineComments | (filename, lines, error) | Removes multiline (c-style) comments from lines. | Removes multiline (c-style) comments from lines. | [
"Removes",
"multiline",
"(",
"c",
"-",
"style",
")",
"comments",
"from",
"lines",
"."
] | def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1 | [
"def",
"RemoveMultiLineComments",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"lineix",
"=",
"0",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"lineix_begin",
"=",
"FindNextMultiLineCommentStart",
"(",
"lines",
",",
"lineix",
")",
"if",
"lineix_begin",
">=",
"len",
"(",
"lines",
")",
":",
"return",
"lineix_end",
"=",
"FindNextMultiLineCommentEnd",
"(",
"lines",
",",
"lineix_begin",
")",
"if",
"lineix_end",
">=",
"len",
"(",
"lines",
")",
":",
"error",
"(",
"filename",
",",
"lineix_begin",
"+",
"1",
",",
"'readability/multiline_comment'",
",",
"5",
",",
"'Could not find end of multi-line comment'",
")",
"return",
"RemoveMultiLineCommentsFromRange",
"(",
"lines",
",",
"lineix_begin",
",",
"lineix_end",
"+",
"1",
")",
"lineix",
"=",
"lineix_end",
"+",
"1"
] | https://github.com/alexgkendall/caffe-posenet/blob/62aafbd7c45df91acdba14f5d1406d8295c2bc6f/scripts/cpp_lint.py#L1151-L1164 |
||
openvinotoolkit/openvino | dedcbeafa8b84cccdc55ca64b8da516682b381c7 | tools/pot/openvino/tools/pot/configs/config.py | python | Config.validate_algo_config | (self) | Validates the correctness of algorithm parameters in config | Validates the correctness of algorithm parameters in config | [
"Validates",
"the",
"correctness",
"of",
"algorithm",
"parameters",
"in",
"config"
] | def validate_algo_config(self):
"""
Validates the correctness of algorithm parameters in config
"""
range_estimator_parameters = {
'preset': None,
'min': {
'type': None,
'outlier_prob': None,
'granularity': None,
'clipping_value': None
},
'max': {
'type': None,
'outlier_prob': None,
'granularity': None,
'clipping_value': None
}
}
weights_params = {
'bits': None,
'mode': None,
'level_low': None,
'level_high': None,
'granularity': None,
'range_estimator': range_estimator_parameters
}
activations_params = deepcopy(weights_params)
activations_params['range_estimator']['min'].update({'aggregator': None})
activations_params['range_estimator']['max'].update({'aggregator': None})
ignored = {'ignored': {}}
ignored_content = {
'skip_model': None,
'scope': None,
'operations': None
}
if self.model.cascade:
for model in self.model.cascade:
ignored['ignored'].update({model.name: ignored_content})
else:
ignored['ignored'] = ignored_content
bias_correction_params = {
'stat_subset_size': None,
'shuffle_data': None,
'seed': None,
'apply_for_all_nodes': None,
'threshold': None
}
layerwise_finetuning_params = {
'num_samples_for_tuning': None,
'batch_size': None,
'optimizer': None,
'loss': None,
'tuning_iterations': None,
'random_seed': None,
'use_ranking_subset': None,
'calibration_indices_pool': None,
'calculate_grads_on_loss_increase_only': None,
'weight_decay': None
}
supported_params = {
'ActivationChannelAlignment': {
'stat_subset_size': None,
'shuffle_data': None,
'seed': None
},
'MinMaxQuantization': {
'preset': None,
'stat_subset_size': None,
'shuffle_data': None,
'seed': None,
'range_estimator': range_estimator_parameters,
'weights': weights_params,
'activations': activations_params,
'saturation_fix': None
},
'FastBiasCorrection': bias_correction_params,
'BiasCorrection': bias_correction_params,
'DefaultQuantization': {
'use_fast_bias': None,
'use_layerwise_tuning': None
},
'ParamsTuningSearch': {},
'DataFreeQuantization': {
'preset': None,
'weights': weights_params,
'activations': activations_params
},
'AccuracyAwareQuantization': {
'metric_subset_ratio': None,
'ranking_subset_size': None,
'max_iter_num': None,
'maximal_drop': None,
'drop_type': None,
'use_prev_if_drop_increase': None,
'metrics': None,
'base_algorithm': 'DefaultQuantization',
'annotation_free': None,
'tune_hyperparams': None,
'annotation_conf_threshold': None,
'convert_to_mixed_preset': None
},
'RangeOptimization': {
'stat_subset_size': None,
'result_filename': None,
'maxiter': None,
'lower_boxsize': None,
'upper_boxsize': None,
'zero_boxsize': None,
'optimization_scope': None,
'activation_ranges_to_set': None,
'metric_name': None,
'optimizer_name': None,
'stochastic': None,
'dump_model_prefix': None,
'error_function': None,
'opt_backend': None,
},
'TunableQuantization': {
'outlier_prob_choices': None
},
'MagnitudeSparsity': {
'sparsity_level': None,
'normed_threshold': None,
},
'BaseWeightSparsity': {
'use_fast_bias': None,
},
'WeightSparsity': {
'use_layerwise_tuning': None,
},
'OverflowCorrection': {
'stat_subset_size': None,
'shuffle_data': None,
'seed': None,
},
'Ranger': {
'stat_subset_size': None,
'shuffle_data': None,
'seed': None,
},
}
# completing supported parameters
for algo_name in supported_params:
supported_params[algo_name].update(ignored)
for algo_name in ['DefaultQuantization', 'WeightSparsity']:
supported_params[algo_name].update(layerwise_finetuning_params)
for algo_name in ['ActivationChannelAlignment', 'MinMaxQuantization', 'FastBiasCorrection', 'BiasCorrection']:
supported_params['DefaultQuantization'].update(supported_params[algo_name])
supported_params['ParamsTuningSearch'].update(supported_params[algo_name])
for algo_name in ['MagnitudeSparsity', 'FastBiasCorrection']:
supported_params['BaseWeightSparsity'].update(supported_params[algo_name])
for algo_name in ['BaseWeightSparsity']:
supported_params['WeightSparsity'].update(supported_params[algo_name])
supported_params['TunableQuantization'].update(supported_params['MinMaxQuantization'])
# check algorithm parameters
for algo in self['compression']['algorithms']:
algo_name = algo['name']
if algo_name in supported_params:
if algo_name == 'AccuracyAwareQuantization':
backup = deepcopy(supported_params['AccuracyAwareQuantization'])
base_algo = supported_params['AccuracyAwareQuantization']['base_algorithm']
if 'base_algorithm' in algo['params'] and algo['params']['base_algorithm'] in supported_params:
base_algo = algo['params']['base_algorithm']
supported_params['AccuracyAwareQuantization'].update(supported_params[base_algo])
check_params(algo_name, algo['params'], supported_params[algo_name])
if algo_name == 'AccuracyAwareQuantization':
supported_params['AccuracyAwareQuantization'] = backup | [
"def",
"validate_algo_config",
"(",
"self",
")",
":",
"range_estimator_parameters",
"=",
"{",
"'preset'",
":",
"None",
",",
"'min'",
":",
"{",
"'type'",
":",
"None",
",",
"'outlier_prob'",
":",
"None",
",",
"'granularity'",
":",
"None",
",",
"'clipping_value'",
":",
"None",
"}",
",",
"'max'",
":",
"{",
"'type'",
":",
"None",
",",
"'outlier_prob'",
":",
"None",
",",
"'granularity'",
":",
"None",
",",
"'clipping_value'",
":",
"None",
"}",
"}",
"weights_params",
"=",
"{",
"'bits'",
":",
"None",
",",
"'mode'",
":",
"None",
",",
"'level_low'",
":",
"None",
",",
"'level_high'",
":",
"None",
",",
"'granularity'",
":",
"None",
",",
"'range_estimator'",
":",
"range_estimator_parameters",
"}",
"activations_params",
"=",
"deepcopy",
"(",
"weights_params",
")",
"activations_params",
"[",
"'range_estimator'",
"]",
"[",
"'min'",
"]",
".",
"update",
"(",
"{",
"'aggregator'",
":",
"None",
"}",
")",
"activations_params",
"[",
"'range_estimator'",
"]",
"[",
"'max'",
"]",
".",
"update",
"(",
"{",
"'aggregator'",
":",
"None",
"}",
")",
"ignored",
"=",
"{",
"'ignored'",
":",
"{",
"}",
"}",
"ignored_content",
"=",
"{",
"'skip_model'",
":",
"None",
",",
"'scope'",
":",
"None",
",",
"'operations'",
":",
"None",
"}",
"if",
"self",
".",
"model",
".",
"cascade",
":",
"for",
"model",
"in",
"self",
".",
"model",
".",
"cascade",
":",
"ignored",
"[",
"'ignored'",
"]",
".",
"update",
"(",
"{",
"model",
".",
"name",
":",
"ignored_content",
"}",
")",
"else",
":",
"ignored",
"[",
"'ignored'",
"]",
"=",
"ignored_content",
"bias_correction_params",
"=",
"{",
"'stat_subset_size'",
":",
"None",
",",
"'shuffle_data'",
":",
"None",
",",
"'seed'",
":",
"None",
",",
"'apply_for_all_nodes'",
":",
"None",
",",
"'threshold'",
":",
"None",
"}",
"layerwise_finetuning_params",
"=",
"{",
"'num_samples_for_tuning'",
":",
"None",
",",
"'batch_size'",
":",
"None",
",",
"'optimizer'",
":",
"None",
",",
"'loss'",
":",
"None",
",",
"'tuning_iterations'",
":",
"None",
",",
"'random_seed'",
":",
"None",
",",
"'use_ranking_subset'",
":",
"None",
",",
"'calibration_indices_pool'",
":",
"None",
",",
"'calculate_grads_on_loss_increase_only'",
":",
"None",
",",
"'weight_decay'",
":",
"None",
"}",
"supported_params",
"=",
"{",
"'ActivationChannelAlignment'",
":",
"{",
"'stat_subset_size'",
":",
"None",
",",
"'shuffle_data'",
":",
"None",
",",
"'seed'",
":",
"None",
"}",
",",
"'MinMaxQuantization'",
":",
"{",
"'preset'",
":",
"None",
",",
"'stat_subset_size'",
":",
"None",
",",
"'shuffle_data'",
":",
"None",
",",
"'seed'",
":",
"None",
",",
"'range_estimator'",
":",
"range_estimator_parameters",
",",
"'weights'",
":",
"weights_params",
",",
"'activations'",
":",
"activations_params",
",",
"'saturation_fix'",
":",
"None",
"}",
",",
"'FastBiasCorrection'",
":",
"bias_correction_params",
",",
"'BiasCorrection'",
":",
"bias_correction_params",
",",
"'DefaultQuantization'",
":",
"{",
"'use_fast_bias'",
":",
"None",
",",
"'use_layerwise_tuning'",
":",
"None",
"}",
",",
"'ParamsTuningSearch'",
":",
"{",
"}",
",",
"'DataFreeQuantization'",
":",
"{",
"'preset'",
":",
"None",
",",
"'weights'",
":",
"weights_params",
",",
"'activations'",
":",
"activations_params",
"}",
",",
"'AccuracyAwareQuantization'",
":",
"{",
"'metric_subset_ratio'",
":",
"None",
",",
"'ranking_subset_size'",
":",
"None",
",",
"'max_iter_num'",
":",
"None",
",",
"'maximal_drop'",
":",
"None",
",",
"'drop_type'",
":",
"None",
",",
"'use_prev_if_drop_increase'",
":",
"None",
",",
"'metrics'",
":",
"None",
",",
"'base_algorithm'",
":",
"'DefaultQuantization'",
",",
"'annotation_free'",
":",
"None",
",",
"'tune_hyperparams'",
":",
"None",
",",
"'annotation_conf_threshold'",
":",
"None",
",",
"'convert_to_mixed_preset'",
":",
"None",
"}",
",",
"'RangeOptimization'",
":",
"{",
"'stat_subset_size'",
":",
"None",
",",
"'result_filename'",
":",
"None",
",",
"'maxiter'",
":",
"None",
",",
"'lower_boxsize'",
":",
"None",
",",
"'upper_boxsize'",
":",
"None",
",",
"'zero_boxsize'",
":",
"None",
",",
"'optimization_scope'",
":",
"None",
",",
"'activation_ranges_to_set'",
":",
"None",
",",
"'metric_name'",
":",
"None",
",",
"'optimizer_name'",
":",
"None",
",",
"'stochastic'",
":",
"None",
",",
"'dump_model_prefix'",
":",
"None",
",",
"'error_function'",
":",
"None",
",",
"'opt_backend'",
":",
"None",
",",
"}",
",",
"'TunableQuantization'",
":",
"{",
"'outlier_prob_choices'",
":",
"None",
"}",
",",
"'MagnitudeSparsity'",
":",
"{",
"'sparsity_level'",
":",
"None",
",",
"'normed_threshold'",
":",
"None",
",",
"}",
",",
"'BaseWeightSparsity'",
":",
"{",
"'use_fast_bias'",
":",
"None",
",",
"}",
",",
"'WeightSparsity'",
":",
"{",
"'use_layerwise_tuning'",
":",
"None",
",",
"}",
",",
"'OverflowCorrection'",
":",
"{",
"'stat_subset_size'",
":",
"None",
",",
"'shuffle_data'",
":",
"None",
",",
"'seed'",
":",
"None",
",",
"}",
",",
"'Ranger'",
":",
"{",
"'stat_subset_size'",
":",
"None",
",",
"'shuffle_data'",
":",
"None",
",",
"'seed'",
":",
"None",
",",
"}",
",",
"}",
"# completing supported parameters",
"for",
"algo_name",
"in",
"supported_params",
":",
"supported_params",
"[",
"algo_name",
"]",
".",
"update",
"(",
"ignored",
")",
"for",
"algo_name",
"in",
"[",
"'DefaultQuantization'",
",",
"'WeightSparsity'",
"]",
":",
"supported_params",
"[",
"algo_name",
"]",
".",
"update",
"(",
"layerwise_finetuning_params",
")",
"for",
"algo_name",
"in",
"[",
"'ActivationChannelAlignment'",
",",
"'MinMaxQuantization'",
",",
"'FastBiasCorrection'",
",",
"'BiasCorrection'",
"]",
":",
"supported_params",
"[",
"'DefaultQuantization'",
"]",
".",
"update",
"(",
"supported_params",
"[",
"algo_name",
"]",
")",
"supported_params",
"[",
"'ParamsTuningSearch'",
"]",
".",
"update",
"(",
"supported_params",
"[",
"algo_name",
"]",
")",
"for",
"algo_name",
"in",
"[",
"'MagnitudeSparsity'",
",",
"'FastBiasCorrection'",
"]",
":",
"supported_params",
"[",
"'BaseWeightSparsity'",
"]",
".",
"update",
"(",
"supported_params",
"[",
"algo_name",
"]",
")",
"for",
"algo_name",
"in",
"[",
"'BaseWeightSparsity'",
"]",
":",
"supported_params",
"[",
"'WeightSparsity'",
"]",
".",
"update",
"(",
"supported_params",
"[",
"algo_name",
"]",
")",
"supported_params",
"[",
"'TunableQuantization'",
"]",
".",
"update",
"(",
"supported_params",
"[",
"'MinMaxQuantization'",
"]",
")",
"# check algorithm parameters",
"for",
"algo",
"in",
"self",
"[",
"'compression'",
"]",
"[",
"'algorithms'",
"]",
":",
"algo_name",
"=",
"algo",
"[",
"'name'",
"]",
"if",
"algo_name",
"in",
"supported_params",
":",
"if",
"algo_name",
"==",
"'AccuracyAwareQuantization'",
":",
"backup",
"=",
"deepcopy",
"(",
"supported_params",
"[",
"'AccuracyAwareQuantization'",
"]",
")",
"base_algo",
"=",
"supported_params",
"[",
"'AccuracyAwareQuantization'",
"]",
"[",
"'base_algorithm'",
"]",
"if",
"'base_algorithm'",
"in",
"algo",
"[",
"'params'",
"]",
"and",
"algo",
"[",
"'params'",
"]",
"[",
"'base_algorithm'",
"]",
"in",
"supported_params",
":",
"base_algo",
"=",
"algo",
"[",
"'params'",
"]",
"[",
"'base_algorithm'",
"]",
"supported_params",
"[",
"'AccuracyAwareQuantization'",
"]",
".",
"update",
"(",
"supported_params",
"[",
"base_algo",
"]",
")",
"check_params",
"(",
"algo_name",
",",
"algo",
"[",
"'params'",
"]",
",",
"supported_params",
"[",
"algo_name",
"]",
")",
"if",
"algo_name",
"==",
"'AccuracyAwareQuantization'",
":",
"supported_params",
"[",
"'AccuracyAwareQuantization'",
"]",
"=",
"backup"
] | https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/tools/pot/openvino/tools/pot/configs/config.py#L123-L303 |
||
baidu/bigflow | 449245016c0df7d1252e85581e588bfc60cefad3 | bigflow_python/python/bigflow/pcollection.py | python | PCollection.aggregate | (self, zero, aggregate_fn, combine_fn, *side_inputs, **options) | return transforms.aggregate(self, zero, aggregate_fn, combine_fn, *side_inputs, **options) | 等同于
:func:`bigflow.transforms.aggregate(self, aggregate_fn, combine_fn, *side_inputs, **options)
<bigflow.transforms.aggregate>`
Args:
pcollection (PCollection): 输入PCollection
zero (value or function): 初始值,或是一个返回初始值的方法
accumulate_fn (function): 聚合方法
*side_inputs: 参与运算的SideInputs
**options: 可配置选项
Returns:
PObject: 聚合结果 | 等同于
:func:`bigflow.transforms.aggregate(self, aggregate_fn, combine_fn, *side_inputs, **options)
<bigflow.transforms.aggregate>` | [
"等同于",
":",
"func",
":",
"bigflow",
".",
"transforms",
".",
"aggregate",
"(",
"self",
"aggregate_fn",
"combine_fn",
"*",
"side_inputs",
"**",
"options",
")",
"<bigflow",
".",
"transforms",
".",
"aggregate",
">"
] | def aggregate(self, zero, aggregate_fn, combine_fn, *side_inputs, **options):
"""
等同于
:func:`bigflow.transforms.aggregate(self, aggregate_fn, combine_fn, *side_inputs, **options)
<bigflow.transforms.aggregate>`
Args:
pcollection (PCollection): 输入PCollection
zero (value or function): 初始值,或是一个返回初始值的方法
accumulate_fn (function): 聚合方法
*side_inputs: 参与运算的SideInputs
**options: 可配置选项
Returns:
PObject: 聚合结果
"""
return transforms.aggregate(self, zero, aggregate_fn, combine_fn, *side_inputs, **options) | [
"def",
"aggregate",
"(",
"self",
",",
"zero",
",",
"aggregate_fn",
",",
"combine_fn",
",",
"*",
"side_inputs",
",",
"*",
"*",
"options",
")",
":",
"return",
"transforms",
".",
"aggregate",
"(",
"self",
",",
"zero",
",",
"aggregate_fn",
",",
"combine_fn",
",",
"*",
"side_inputs",
",",
"*",
"*",
"options",
")"
] | https://github.com/baidu/bigflow/blob/449245016c0df7d1252e85581e588bfc60cefad3/bigflow_python/python/bigflow/pcollection.py#L64-L82 |
|
domino-team/openwrt-cc | 8b181297c34d14d3ca521cc9f31430d561dbc688 | package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | python | _RegistryQuery | (key, value=None) | return text | r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure. | r"""Use reg.exe to read a particular key through _RegistryQueryBase. | [
"r",
"Use",
"reg",
".",
"exe",
"to",
"read",
"a",
"particular",
"key",
"through",
"_RegistryQueryBase",
"."
] | def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text | [
"def",
"_RegistryQuery",
"(",
"key",
",",
"value",
"=",
"None",
")",
":",
"text",
"=",
"None",
"try",
":",
"text",
"=",
"_RegistryQueryBase",
"(",
"'Sysnative'",
",",
"key",
",",
"value",
")",
"except",
"OSError",
",",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"text",
"=",
"_RegistryQueryBase",
"(",
"'System32'",
",",
"key",
",",
"value",
")",
"else",
":",
"raise",
"return",
"text"
] | https://github.com/domino-team/openwrt-cc/blob/8b181297c34d14d3ca521cc9f31430d561dbc688/package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py#L141-L166 |
|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/pipes.py | python | Template.open_r | (self, file) | return os.popen(cmd, 'r') | t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively. | t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively. | [
"t",
".",
"open_r",
"(",
"file",
")",
"and",
"t",
".",
"open_w",
"(",
"file",
")",
"implement",
"t",
".",
"open",
"(",
"file",
"r",
")",
"and",
"t",
".",
"open",
"(",
"file",
"w",
")",
"respectively",
"."
] | def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError, \
'Template.open_r: pipeline ends width SINK'
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r') | [
"def",
"open_r",
"(",
"self",
",",
"file",
")",
":",
"if",
"not",
"self",
".",
"steps",
":",
"return",
"open",
"(",
"file",
",",
"'r'",
")",
"if",
"self",
".",
"steps",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"==",
"SINK",
":",
"raise",
"ValueError",
",",
"'Template.open_r: pipeline ends width SINK'",
"cmd",
"=",
"self",
".",
"makepipeline",
"(",
"file",
",",
"''",
")",
"return",
"os",
".",
"popen",
"(",
"cmd",
",",
"'r'",
")"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/pipes.py#L162-L171 |
|
Z3Prover/z3 | d745d03afdfdf638d66093e2bfbacaf87187f35b | src/api/python/z3/z3.py | python | Optimize.statistics | (self) | return Statistics(Z3_optimize_get_statistics(self.ctx.ref(), self.optimize), self.ctx) | Return statistics for the last check`. | Return statistics for the last check`. | [
"Return",
"statistics",
"for",
"the",
"last",
"check",
"."
] | def statistics(self):
"""Return statistics for the last check`.
"""
return Statistics(Z3_optimize_get_statistics(self.ctx.ref(), self.optimize), self.ctx) | [
"def",
"statistics",
"(",
"self",
")",
":",
"return",
"Statistics",
"(",
"Z3_optimize_get_statistics",
"(",
"self",
".",
"ctx",
".",
"ref",
"(",
")",
",",
"self",
".",
"optimize",
")",
",",
"self",
".",
"ctx",
")"
] | https://github.com/Z3Prover/z3/blob/d745d03afdfdf638d66093e2bfbacaf87187f35b/src/api/python/z3/z3.py#L7979-L7982 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py | python | cudnn_rnn_opaque_params_size | (rnn_mode,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32,
dropout=0,
seed=0,
num_proj=None,
name=None) | return gen_cudnn_rnn_ops.cudnn_rnn_params_size(
rnn_mode=rnn_mode,
num_layers=num_layers,
num_units=num_units,
input_size=input_size,
num_proj=num_proj,
T=dtype,
S=dtypes.int32,
dropout=dropout,
seed=seed,
seed2=seed2,
input_mode=input_mode,
direction=direction,
name=name)[0] | Returns opaque params size for specific Cudnn config.
Args:
rnn_mode: a string specifies the mode, under which this RNN model runs.
Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the num_units.
input_mode: indicate whether there is a linear projection between the input
and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'. 'linear_input' (default)
always applies a linear projection of input onto RNN hidden state.
(standard RNN behavior). 'skip_input' is only allowed when input_size ==
num_units; 'auto_select' implies 'skip_input' when input_size ==
num_units; otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dtype: one of tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See
`tf.compat.v1.set_random_seed` for behavior.
num_proj: The output dimensionality for the projection matrices.
If None or 0, no projection is performed.
name: name of the operation.
Returns:
a int, size of Cudnn opaque params.
Raises:
ValueError: if rnn_mode or direction is invalid. | Returns opaque params size for specific Cudnn config. | [
"Returns",
"opaque",
"params",
"size",
"for",
"specific",
"Cudnn",
"config",
"."
] | def cudnn_rnn_opaque_params_size(rnn_mode,
num_layers,
num_units,
input_size,
input_mode=CUDNN_INPUT_LINEAR_MODE,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32,
dropout=0,
seed=0,
num_proj=None,
name=None):
"""Returns opaque params size for specific Cudnn config.
Args:
rnn_mode: a string specifies the mode, under which this RNN model runs.
Could be either 'lstm', 'gru', 'rnn_tanh' or 'rnn_relu'.
num_layers: the number of layers for the RNN model.
num_units: the number of units within the RNN model.
input_size: the size of the input, it could be different from the num_units.
input_mode: indicate whether there is a linear projection between the input
and the actual computation before the first layer. It could be
'linear_input', 'skip_input' or 'auto_select'. 'linear_input' (default)
always applies a linear projection of input onto RNN hidden state.
(standard RNN behavior). 'skip_input' is only allowed when input_size ==
num_units; 'auto_select' implies 'skip_input' when input_size ==
num_units; otherwise, it implies 'linear_input'.
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dtype: one of tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
seed: the op seed used for initializing dropout. See
`tf.compat.v1.set_random_seed` for behavior.
num_proj: The output dimensionality for the projection matrices.
If None or 0, no projection is performed.
name: name of the operation.
Returns:
a int, size of Cudnn opaque params.
Raises:
ValueError: if rnn_mode or direction is invalid.
"""
_check_rnn_mode(rnn_mode)
check_direction(direction)
check_input_mode(input_mode)
seed, seed2 = random_seed.get_seed(seed)
return gen_cudnn_rnn_ops.cudnn_rnn_params_size(
rnn_mode=rnn_mode,
num_layers=num_layers,
num_units=num_units,
input_size=input_size,
num_proj=num_proj,
T=dtype,
S=dtypes.int32,
dropout=dropout,
seed=seed,
seed2=seed2,
input_mode=input_mode,
direction=direction,
name=name)[0] | [
"def",
"cudnn_rnn_opaque_params_size",
"(",
"rnn_mode",
",",
"num_layers",
",",
"num_units",
",",
"input_size",
",",
"input_mode",
"=",
"CUDNN_INPUT_LINEAR_MODE",
",",
"direction",
"=",
"CUDNN_RNN_UNIDIRECTION",
",",
"dtype",
"=",
"dtypes",
".",
"float32",
",",
"dropout",
"=",
"0",
",",
"seed",
"=",
"0",
",",
"num_proj",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"_check_rnn_mode",
"(",
"rnn_mode",
")",
"check_direction",
"(",
"direction",
")",
"check_input_mode",
"(",
"input_mode",
")",
"seed",
",",
"seed2",
"=",
"random_seed",
".",
"get_seed",
"(",
"seed",
")",
"return",
"gen_cudnn_rnn_ops",
".",
"cudnn_rnn_params_size",
"(",
"rnn_mode",
"=",
"rnn_mode",
",",
"num_layers",
"=",
"num_layers",
",",
"num_units",
"=",
"num_units",
",",
"input_size",
"=",
"input_size",
",",
"num_proj",
"=",
"num_proj",
",",
"T",
"=",
"dtype",
",",
"S",
"=",
"dtypes",
".",
"int32",
",",
"dropout",
"=",
"dropout",
",",
"seed",
"=",
"seed",
",",
"seed2",
"=",
"seed2",
",",
"input_mode",
"=",
"input_mode",
",",
"direction",
"=",
"direction",
",",
"name",
"=",
"name",
")",
"[",
"0",
"]"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py#L1585-L1643 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/propgrid.py | python | PropertyGridInterface.SetPropertyHelpString | (*args, **kwargs) | return _propgrid.PropertyGridInterface_SetPropertyHelpString(*args, **kwargs) | SetPropertyHelpString(self, PGPropArg id, String helpString) | SetPropertyHelpString(self, PGPropArg id, String helpString) | [
"SetPropertyHelpString",
"(",
"self",
"PGPropArg",
"id",
"String",
"helpString",
")"
] | def SetPropertyHelpString(*args, **kwargs):
"""SetPropertyHelpString(self, PGPropArg id, String helpString)"""
return _propgrid.PropertyGridInterface_SetPropertyHelpString(*args, **kwargs) | [
"def",
"SetPropertyHelpString",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_propgrid",
".",
"PropertyGridInterface_SetPropertyHelpString",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L1433-L1435 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/_pydecimal.py | python | Decimal._round_05up | (self, prec) | Round down unless digit prec-1 is 0 or 5. | Round down unless digit prec-1 is 0 or 5. | [
"Round",
"down",
"unless",
"digit",
"prec",
"-",
"1",
"is",
"0",
"or",
"5",
"."
] | def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec) | [
"def",
"_round_05up",
"(",
"self",
",",
"prec",
")",
":",
"if",
"prec",
"and",
"self",
".",
"_int",
"[",
"prec",
"-",
"1",
"]",
"not",
"in",
"'05'",
":",
"return",
"self",
".",
"_round_down",
"(",
"prec",
")",
"else",
":",
"return",
"-",
"self",
".",
"_round_down",
"(",
"prec",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/_pydecimal.py#L1812-L1817 |
||
microsoft/TSS.MSR | 0f2516fca2cd9929c31d5450e39301c9bde43688 | TSS.Py/src/TpmTypes.py | python | TPMS_SIGNATURE_ECSCHNORR.fromTpm | (buf) | return buf.createObj(TPMS_SIGNATURE_ECSCHNORR) | Returns new TPMS_SIGNATURE_ECSCHNORR object constructed from its
marshaled representation in the given TpmBuffer buffer | Returns new TPMS_SIGNATURE_ECSCHNORR object constructed from its
marshaled representation in the given TpmBuffer buffer | [
"Returns",
"new",
"TPMS_SIGNATURE_ECSCHNORR",
"object",
"constructed",
"from",
"its",
"marshaled",
"representation",
"in",
"the",
"given",
"TpmBuffer",
"buffer"
] | def fromTpm(buf):
""" Returns new TPMS_SIGNATURE_ECSCHNORR object constructed from its
marshaled representation in the given TpmBuffer buffer
"""
return buf.createObj(TPMS_SIGNATURE_ECSCHNORR) | [
"def",
"fromTpm",
"(",
"buf",
")",
":",
"return",
"buf",
".",
"createObj",
"(",
"TPMS_SIGNATURE_ECSCHNORR",
")"
] | https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L7721-L7725 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | third_party/catapult/third_party/gsutil/third_party/boto/boto/cloudhsm/layer1.py | python | CloudHSMConnection.list_available_zones | (self) | return self.make_request(action='ListAvailableZones',
body=json.dumps(params)) | Lists the Availability Zones that have available AWS CloudHSM
capacity. | Lists the Availability Zones that have available AWS CloudHSM
capacity. | [
"Lists",
"the",
"Availability",
"Zones",
"that",
"have",
"available",
"AWS",
"CloudHSM",
"capacity",
"."
] | def list_available_zones(self):
"""
Lists the Availability Zones that have available AWS CloudHSM
capacity.
"""
params = {}
return self.make_request(action='ListAvailableZones',
body=json.dumps(params)) | [
"def",
"list_available_zones",
"(",
"self",
")",
":",
"params",
"=",
"{",
"}",
"return",
"self",
".",
"make_request",
"(",
"action",
"=",
"'ListAvailableZones'",
",",
"body",
"=",
"json",
".",
"dumps",
"(",
"params",
")",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/cloudhsm/layer1.py#L268-L277 |
|
hanpfei/chromium-net | 392cc1fa3a8f92f42e4071ab6e674d8e0482f83f | tools/generate_stubs/generate_stubs.py | python | CreateWindowsDefForSigFiles | (sig_files, out_dir, module_name) | For all signature files, create a single windows def file.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated def goes.
module_name: Name of the output DLL or LIB which will link in the def file. | For all signature files, create a single windows def file. | [
"For",
"all",
"signature",
"files",
"create",
"a",
"single",
"windows",
"def",
"file",
"."
] | def CreateWindowsDefForSigFiles(sig_files, out_dir, module_name):
"""For all signature files, create a single windows def file.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated def goes.
module_name: Name of the output DLL or LIB which will link in the def file.
"""
signatures = []
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures += ParseSignatures(infile)
finally:
infile.close()
def_file_path = os.path.join(
out_dir, os.path.splitext(os.path.basename(module_name))[0] + '.def')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close() | [
"def",
"CreateWindowsDefForSigFiles",
"(",
"sig_files",
",",
"out_dir",
",",
"module_name",
")",
":",
"signatures",
"=",
"[",
"]",
"for",
"input_path",
"in",
"sig_files",
":",
"infile",
"=",
"open",
"(",
"input_path",
",",
"'r'",
")",
"try",
":",
"signatures",
"+=",
"ParseSignatures",
"(",
"infile",
")",
"finally",
":",
"infile",
".",
"close",
"(",
")",
"def_file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"module_name",
")",
")",
"[",
"0",
"]",
"+",
"'.def'",
")",
"outfile",
"=",
"open",
"(",
"def_file_path",
",",
"'w'",
")",
"try",
":",
"WriteWindowsDefFile",
"(",
"module_name",
",",
"signatures",
",",
"outfile",
")",
"finally",
":",
"outfile",
".",
"close",
"(",
")"
] | https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/generate_stubs/generate_stubs.py#L1043-L1066 |
||
Yijunmaverick/GenerativeFaceCompletion | f72dea0fa27c779fef7b65d2f01e82bcc23a0eb2 | python/caffe/io.py | python | Transformer.set_mean | (self, in_, mean) | Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable) | Set the mean to subtract for centering the data. | [
"Set",
"the",
"mean",
"to",
"subtract",
"for",
"centering",
"the",
"data",
"."
] | def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean | [
"def",
"set_mean",
"(",
"self",
",",
"in_",
",",
"mean",
")",
":",
"self",
".",
"__check_input",
"(",
"in_",
")",
"ms",
"=",
"mean",
".",
"shape",
"if",
"mean",
".",
"ndim",
"==",
"1",
":",
"# broadcast channels",
"if",
"ms",
"[",
"0",
"]",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'Mean channels incompatible with input.'",
")",
"mean",
"=",
"mean",
"[",
":",
",",
"np",
".",
"newaxis",
",",
"np",
".",
"newaxis",
"]",
"else",
":",
"# elementwise mean",
"if",
"len",
"(",
"ms",
")",
"==",
"2",
":",
"ms",
"=",
"(",
"1",
",",
")",
"+",
"ms",
"if",
"len",
"(",
"ms",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Mean shape invalid'",
")",
"if",
"ms",
"!=",
"self",
".",
"inputs",
"[",
"in_",
"]",
"[",
"1",
":",
"]",
":",
"raise",
"ValueError",
"(",
"'Mean shape incompatible with input shape.'",
")",
"self",
".",
"mean",
"[",
"in_",
"]",
"=",
"mean"
] | https://github.com/Yijunmaverick/GenerativeFaceCompletion/blob/f72dea0fa27c779fef7b65d2f01e82bcc23a0eb2/python/caffe/io.py#L235-L259 |
||
espressomd/espresso | 7e29f9052e710fe1ebf0f5d2a8076b32921fbc6a | doc/tutorials/convert.py | python | split_matplotlib_cells | (nb) | If a cell imports matplotlib, split the cell to keep the
import statement separate from the code that uses matplotlib.
This prevents a known bug in the Jupyter backend which causes
the plot object to be represented as a string instead of a canvas
when created in the cell where matplotlib is imported for the
first time (https://github.com/jupyter/notebook/issues/3523). | If a cell imports matplotlib, split the cell to keep the
import statement separate from the code that uses matplotlib.
This prevents a known bug in the Jupyter backend which causes
the plot object to be represented as a string instead of a canvas
when created in the cell where matplotlib is imported for the
first time (https://github.com/jupyter/notebook/issues/3523). | [
"If",
"a",
"cell",
"imports",
"matplotlib",
"split",
"the",
"cell",
"to",
"keep",
"the",
"import",
"statement",
"separate",
"from",
"the",
"code",
"that",
"uses",
"matplotlib",
".",
"This",
"prevents",
"a",
"known",
"bug",
"in",
"the",
"Jupyter",
"backend",
"which",
"causes",
"the",
"plot",
"object",
"to",
"be",
"represented",
"as",
"a",
"string",
"instead",
"of",
"a",
"canvas",
"when",
"created",
"in",
"the",
"cell",
"where",
"matplotlib",
"is",
"imported",
"for",
"the",
"first",
"time",
"(",
"https",
":",
"//",
"github",
".",
"com",
"/",
"jupyter",
"/",
"notebook",
"/",
"issues",
"/",
"3523",
")",
"."
] | def split_matplotlib_cells(nb):
"""
If a cell imports matplotlib, split the cell to keep the
import statement separate from the code that uses matplotlib.
This prevents a known bug in the Jupyter backend which causes
the plot object to be represented as a string instead of a canvas
when created in the cell where matplotlib is imported for the
first time (https://github.com/jupyter/notebook/issues/3523).
"""
for i in range(len(nb['cells']) - 1, -1, -1):
cell = nb['cells'][i]
if cell['cell_type'] == 'code' and 'matplotlib' in cell['source']:
code = iw.protect_ipython_magics(cell['source'])
# split cells after matplotlib imports
mapping = iw.delimit_statements(code)
tree = ast.parse(code)
visitor = iw.GetMatplotlibPyplot()
visitor.visit(tree)
if visitor.matplotlib_first:
code = iw.deprotect_ipython_magics(code)
lines = code.split('\n')
lineno_end = mapping[visitor.matplotlib_first]
split_code = '\n'.join(lines[lineno_end:]).lstrip('\n')
if split_code:
new_cell = nbformat.v4.new_code_cell(source=split_code)
nb['cells'].insert(i + 1, new_cell)
lines = lines[:lineno_end]
nb['cells'][i]['source'] = '\n'.join(lines).rstrip('\n') | [
"def",
"split_matplotlib_cells",
"(",
"nb",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"nb",
"[",
"'cells'",
"]",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"cell",
"=",
"nb",
"[",
"'cells'",
"]",
"[",
"i",
"]",
"if",
"cell",
"[",
"'cell_type'",
"]",
"==",
"'code'",
"and",
"'matplotlib'",
"in",
"cell",
"[",
"'source'",
"]",
":",
"code",
"=",
"iw",
".",
"protect_ipython_magics",
"(",
"cell",
"[",
"'source'",
"]",
")",
"# split cells after matplotlib imports",
"mapping",
"=",
"iw",
".",
"delimit_statements",
"(",
"code",
")",
"tree",
"=",
"ast",
".",
"parse",
"(",
"code",
")",
"visitor",
"=",
"iw",
".",
"GetMatplotlibPyplot",
"(",
")",
"visitor",
".",
"visit",
"(",
"tree",
")",
"if",
"visitor",
".",
"matplotlib_first",
":",
"code",
"=",
"iw",
".",
"deprotect_ipython_magics",
"(",
"code",
")",
"lines",
"=",
"code",
".",
"split",
"(",
"'\\n'",
")",
"lineno_end",
"=",
"mapping",
"[",
"visitor",
".",
"matplotlib_first",
"]",
"split_code",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
"[",
"lineno_end",
":",
"]",
")",
".",
"lstrip",
"(",
"'\\n'",
")",
"if",
"split_code",
":",
"new_cell",
"=",
"nbformat",
".",
"v4",
".",
"new_code_cell",
"(",
"source",
"=",
"split_code",
")",
"nb",
"[",
"'cells'",
"]",
".",
"insert",
"(",
"i",
"+",
"1",
",",
"new_cell",
")",
"lines",
"=",
"lines",
"[",
":",
"lineno_end",
"]",
"nb",
"[",
"'cells'",
"]",
"[",
"i",
"]",
"[",
"'source'",
"]",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
".",
"rstrip",
"(",
"'\\n'",
")"
] | https://github.com/espressomd/espresso/blob/7e29f9052e710fe1ebf0f5d2a8076b32921fbc6a/doc/tutorials/convert.py#L94-L121 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | gpu/command_buffer/build_gles2_cmd_buffer.py | python | PUTnHandler.WriteImmediateCmdSetHeader | (self, func, file) | Overrriden from TypeHandler. | Overrriden from TypeHandler. | [
"Overrriden",
"from",
"TypeHandler",
"."
] | def WriteImmediateCmdSetHeader(self, func, file):
"""Overrriden from TypeHandler."""
file.Write(" void SetHeader(GLsizei count) {\n")
file.Write(
" header.SetCmdByTotalSize<ValueType>(ComputeSize(count));\n")
file.Write(" }\n")
file.Write("\n") | [
"def",
"WriteImmediateCmdSetHeader",
"(",
"self",
",",
"func",
",",
"file",
")",
":",
"file",
".",
"Write",
"(",
"\" void SetHeader(GLsizei count) {\\n\"",
")",
"file",
".",
"Write",
"(",
"\" header.SetCmdByTotalSize<ValueType>(ComputeSize(count));\\n\"",
")",
"file",
".",
"Write",
"(",
"\" }\\n\"",
")",
"file",
".",
"Write",
"(",
"\"\\n\"",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/gpu/command_buffer/build_gles2_cmd_buffer.py#L5142-L5148 |
||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | registerHTTPPostCallbacks | () | By default, libxml submits HTTP output requests using the
"PUT" method. Calling this method changes the HTTP output
method to use the "POST" method instead. | By default, libxml submits HTTP output requests using the
"PUT" method. Calling this method changes the HTTP output
method to use the "POST" method instead. | [
"By",
"default",
"libxml",
"submits",
"HTTP",
"output",
"requests",
"using",
"the",
"PUT",
"method",
".",
"Calling",
"this",
"method",
"changes",
"the",
"HTTP",
"output",
"method",
"to",
"use",
"the",
"POST",
"method",
"instead",
"."
] | def registerHTTPPostCallbacks():
"""By default, libxml submits HTTP output requests using the
"PUT" method. Calling this method changes the HTTP output
method to use the "POST" method instead. """
libxml2mod.xmlRegisterHTTPPostCallbacks() | [
"def",
"registerHTTPPostCallbacks",
"(",
")",
":",
"libxml2mod",
".",
"xmlRegisterHTTPPostCallbacks",
"(",
")"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L1917-L1921 |
||
LLNL/Caliper | 60e06980fc65057e1da01296e6eebbbed30f59c8 | examples/scripts/topdown/topdown.py | python | determine_boundedness | (row) | return boundedness | Determine the boundedness of a single row with topdown metrics | Determine the boundedness of a single row with topdown metrics | [
"Determine",
"the",
"boundedness",
"of",
"a",
"single",
"row",
"with",
"topdown",
"metrics"
] | def determine_boundedness(row):
""" Determine the boundedness of a single row with topdown metrics """
boundedness = []
level_1 = max_column(row, ['retiring',
'bad_speculation',
'frontend_bound',
'backend_bound'])
if str(row[level_1]) != 'nan' and str(row[level_1]) != 'inf':
boundedness.append(level_1 + ' ' + percentage_string(row[level_1]))
if level_1 == 'bad_speculation':
level_2 = max_column(row, ['branch_mispredict',
'machine_clear'])
boundedness.append(level_2 + ' ' + percentage_string(row[level_2]))
elif level_1 == 'frontend_bound':
level_2 = max_column(row, ['frontend_latency',
'frontend_bandwidth'])
boundedness.append(level_2 + ' ' + percentage_string(row[level_2]))
elif level_1 == 'backend_bound':
level_2 = max_column(row, ['core_bound',
'memory_bound'])
boundedness.append(level_2 + ' ' + percentage_string(row[level_2]))
if level_2 == 'memory_bound':
level_3 = max_column(row, ['l1_bound',
'l2_bound',
'l3_bound',
'mem_bound',
'uncore_bound'])
boundedness.append(level_3 + ' ' + percentage_string(row[level_3]))
if len(boundedness) == 0:
boundedness.append('undetermined')
return boundedness | [
"def",
"determine_boundedness",
"(",
"row",
")",
":",
"boundedness",
"=",
"[",
"]",
"level_1",
"=",
"max_column",
"(",
"row",
",",
"[",
"'retiring'",
",",
"'bad_speculation'",
",",
"'frontend_bound'",
",",
"'backend_bound'",
"]",
")",
"if",
"str",
"(",
"row",
"[",
"level_1",
"]",
")",
"!=",
"'nan'",
"and",
"str",
"(",
"row",
"[",
"level_1",
"]",
")",
"!=",
"'inf'",
":",
"boundedness",
".",
"append",
"(",
"level_1",
"+",
"' '",
"+",
"percentage_string",
"(",
"row",
"[",
"level_1",
"]",
")",
")",
"if",
"level_1",
"==",
"'bad_speculation'",
":",
"level_2",
"=",
"max_column",
"(",
"row",
",",
"[",
"'branch_mispredict'",
",",
"'machine_clear'",
"]",
")",
"boundedness",
".",
"append",
"(",
"level_2",
"+",
"' '",
"+",
"percentage_string",
"(",
"row",
"[",
"level_2",
"]",
")",
")",
"elif",
"level_1",
"==",
"'frontend_bound'",
":",
"level_2",
"=",
"max_column",
"(",
"row",
",",
"[",
"'frontend_latency'",
",",
"'frontend_bandwidth'",
"]",
")",
"boundedness",
".",
"append",
"(",
"level_2",
"+",
"' '",
"+",
"percentage_string",
"(",
"row",
"[",
"level_2",
"]",
")",
")",
"elif",
"level_1",
"==",
"'backend_bound'",
":",
"level_2",
"=",
"max_column",
"(",
"row",
",",
"[",
"'core_bound'",
",",
"'memory_bound'",
"]",
")",
"boundedness",
".",
"append",
"(",
"level_2",
"+",
"' '",
"+",
"percentage_string",
"(",
"row",
"[",
"level_2",
"]",
")",
")",
"if",
"level_2",
"==",
"'memory_bound'",
":",
"level_3",
"=",
"max_column",
"(",
"row",
",",
"[",
"'l1_bound'",
",",
"'l2_bound'",
",",
"'l3_bound'",
",",
"'mem_bound'",
",",
"'uncore_bound'",
"]",
")",
"boundedness",
".",
"append",
"(",
"level_3",
"+",
"' '",
"+",
"percentage_string",
"(",
"row",
"[",
"level_3",
"]",
")",
")",
"if",
"len",
"(",
"boundedness",
")",
"==",
"0",
":",
"boundedness",
".",
"append",
"(",
"'undetermined'",
")",
"return",
"boundedness"
] | https://github.com/LLNL/Caliper/blob/60e06980fc65057e1da01296e6eebbbed30f59c8/examples/scripts/topdown/topdown.py#L136-L171 |
|
yuxng/DA-RNN | 77fbb50b4272514588a10a9f90b7d5f8d46974fb | lib/datasets/rgbd_scene.py | python | rgbd_scene.gt_roidb | (self) | return gt_roidb | Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls. | Return the database of ground-truth regions of interest. | [
"Return",
"the",
"database",
"of",
"ground",
"-",
"truth",
"regions",
"of",
"interest",
"."
] | def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_rgbd_scene_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb | [
"def",
"gt_roidb",
"(",
"self",
")",
":",
"cache_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache_path",
",",
"self",
".",
"name",
"+",
"'_gt_roidb.pkl'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cache_file",
")",
":",
"with",
"open",
"(",
"cache_file",
",",
"'rb'",
")",
"as",
"fid",
":",
"roidb",
"=",
"cPickle",
".",
"load",
"(",
"fid",
")",
"print",
"'{} gt roidb loaded from {}'",
".",
"format",
"(",
"self",
".",
"name",
",",
"cache_file",
")",
"return",
"roidb",
"gt_roidb",
"=",
"[",
"self",
".",
"_load_rgbd_scene_annotation",
"(",
"index",
")",
"for",
"index",
"in",
"self",
".",
"image_index",
"]",
"with",
"open",
"(",
"cache_file",
",",
"'wb'",
")",
"as",
"fid",
":",
"cPickle",
".",
"dump",
"(",
"gt_roidb",
",",
"fid",
",",
"cPickle",
".",
"HIGHEST_PROTOCOL",
")",
"print",
"'wrote gt roidb to {}'",
".",
"format",
"(",
"cache_file",
")",
"return",
"gt_roidb"
] | https://github.com/yuxng/DA-RNN/blob/77fbb50b4272514588a10a9f90b7d5f8d46974fb/lib/datasets/rgbd_scene.py#L115-L136 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/losses/losses_impl.py | python | mean_squared_error | (
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS) | Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None. | Adds a Sum-of-Squares loss to the training procedure. | [
"Adds",
"a",
"Sum",
"-",
"of",
"-",
"Squares",
"loss",
"to",
"the",
"training",
"procedure",
"."
] | def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
"""
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction) | [
"def",
"mean_squared_error",
"(",
"labels",
",",
"predictions",
",",
"weights",
"=",
"1.0",
",",
"scope",
"=",
"None",
",",
"loss_collection",
"=",
"ops",
".",
"GraphKeys",
".",
"LOSSES",
",",
"reduction",
"=",
"Reduction",
".",
"SUM_BY_NONZERO_WEIGHTS",
")",
":",
"if",
"labels",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"labels must not be None.\"",
")",
"if",
"predictions",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"predictions must not be None.\"",
")",
"with",
"ops",
".",
"name_scope",
"(",
"scope",
",",
"\"mean_squared_error\"",
",",
"(",
"predictions",
",",
"labels",
",",
"weights",
")",
")",
"as",
"scope",
":",
"predictions",
"=",
"math_ops",
".",
"to_float",
"(",
"predictions",
")",
"labels",
"=",
"math_ops",
".",
"to_float",
"(",
"labels",
")",
"predictions",
".",
"get_shape",
"(",
")",
".",
"assert_is_compatible_with",
"(",
"labels",
".",
"get_shape",
"(",
")",
")",
"losses",
"=",
"math_ops",
".",
"squared_difference",
"(",
"predictions",
",",
"labels",
")",
"return",
"compute_weighted_loss",
"(",
"losses",
",",
"weights",
",",
"scope",
",",
"loss_collection",
",",
"reduction",
"=",
"reduction",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/losses/losses_impl.py#L533-L577 |
||
osquery/osquery | fd529718e48348853f708d56990720c3c84c7152 | tools/formatting/git-clang-format.py | python | compute_diff | (commit, files) | return p | Return a subprocess object producing the diff from `commit`.
The return value's `stdin` file object will produce a patch with the
differences between the working directory and `commit`, filtered on `files`
(if non-empty). Zero context lines are used in the patch. | Return a subprocess object producing the diff from `commit`. | [
"Return",
"a",
"subprocess",
"object",
"producing",
"the",
"diff",
"from",
"commit",
"."
] | def compute_diff(commit, files):
"""Return a subprocess object producing the diff from `commit`.
The return value's `stdin` file object will produce a patch with the
differences between the working directory and `commit`, filtered on `files`
(if non-empty). Zero context lines are used in the patch."""
cmd = ['git', 'diff-index', '-p', '-U0', commit, '--']
cmd.extend(files)
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf8')
p.stdin.close()
return p | [
"def",
"compute_diff",
"(",
"commit",
",",
"files",
")",
":",
"cmd",
"=",
"[",
"'git'",
",",
"'diff-index'",
",",
"'-p'",
",",
"'-U0'",
",",
"commit",
",",
"'--'",
"]",
"cmd",
".",
"extend",
"(",
"files",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"encoding",
"=",
"'utf8'",
")",
"p",
".",
"stdin",
".",
"close",
"(",
")",
"return",
"p"
] | https://github.com/osquery/osquery/blob/fd529718e48348853f708d56990720c3c84c7152/tools/formatting/git-clang-format.py#L272-L285 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/plat-mac/findertools.py | python | _getwindowposition | (folder_alias) | Get the size of a Finder window for folder, Specify by path. | Get the size of a Finder window for folder, Specify by path. | [
"Get",
"the",
"size",
"of",
"a",
"Finder",
"window",
"for",
"folder",
"Specify",
"by",
"path",
"."
] | def _getwindowposition(folder_alias):
"""Get the size of a Finder window for folder, Specify by path."""
finder = _getfinder()
args = {}
attrs = {}
aeobj_0 = aetypes.ObjectSpecifier(want=aetypes.Type('cfol'),
form="alis", seld=folder_alias, fr=None)
aeobj_1 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('cwnd'), fr=aeobj_0)
aeobj_2 = aetypes.ObjectSpecifier(want=aetypes.Type('prop'),
form="prop", seld=aetypes.Type('ptsz'), fr=aeobj_1)
args['----'] = aeobj_2
_reply, args, attrs = finder.send('core', 'getd', args, attrs)
if 'errn' in args:
raise Error, aetools.decodeerror(args)
if '----' in args:
return args['----'] | [
"def",
"_getwindowposition",
"(",
"folder_alias",
")",
":",
"finder",
"=",
"_getfinder",
"(",
")",
"args",
"=",
"{",
"}",
"attrs",
"=",
"{",
"}",
"aeobj_0",
"=",
"aetypes",
".",
"ObjectSpecifier",
"(",
"want",
"=",
"aetypes",
".",
"Type",
"(",
"'cfol'",
")",
",",
"form",
"=",
"\"alis\"",
",",
"seld",
"=",
"folder_alias",
",",
"fr",
"=",
"None",
")",
"aeobj_1",
"=",
"aetypes",
".",
"ObjectSpecifier",
"(",
"want",
"=",
"aetypes",
".",
"Type",
"(",
"'prop'",
")",
",",
"form",
"=",
"\"prop\"",
",",
"seld",
"=",
"aetypes",
".",
"Type",
"(",
"'cwnd'",
")",
",",
"fr",
"=",
"aeobj_0",
")",
"aeobj_2",
"=",
"aetypes",
".",
"ObjectSpecifier",
"(",
"want",
"=",
"aetypes",
".",
"Type",
"(",
"'prop'",
")",
",",
"form",
"=",
"\"prop\"",
",",
"seld",
"=",
"aetypes",
".",
"Type",
"(",
"'ptsz'",
")",
",",
"fr",
"=",
"aeobj_1",
")",
"args",
"[",
"'----'",
"]",
"=",
"aeobj_2",
"_reply",
",",
"args",
",",
"attrs",
"=",
"finder",
".",
"send",
"(",
"'core'",
",",
"'getd'",
",",
"args",
",",
"attrs",
")",
"if",
"'errn'",
"in",
"args",
":",
"raise",
"Error",
",",
"aetools",
".",
"decodeerror",
"(",
"args",
")",
"if",
"'----'",
"in",
"args",
":",
"return",
"args",
"[",
"'----'",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/plat-mac/findertools.py#L515-L531 |
||
MegEngine/MegEngine | ce9ad07a27ec909fb8db4dd67943d24ba98fb93a | imperative/python/megengine/hub/hub.py | python | load_serialized_obj_from_url | (url: str, model_dir=None) | return state_dict | Loads MegEngine serialized object from the given URL.
If the object is already present in ``model_dir``, it's deserialized and
returned. If no ``model_dir`` is specified, it will be ``MGE_HOME/serialized``.
Args:
url: url to serialized object.
model_dir: dir to cache target serialized file.
Returns:
loaded object. | Loads MegEngine serialized object from the given URL. | [
"Loads",
"MegEngine",
"serialized",
"object",
"from",
"the",
"given",
"URL",
"."
] | def load_serialized_obj_from_url(url: str, model_dir=None) -> Any:
"""Loads MegEngine serialized object from the given URL.
If the object is already present in ``model_dir``, it's deserialized and
returned. If no ``model_dir`` is specified, it will be ``MGE_HOME/serialized``.
Args:
url: url to serialized object.
model_dir: dir to cache target serialized file.
Returns:
loaded object.
"""
if model_dir is None:
model_dir = os.path.join(_get_megengine_home(), "serialized")
os.makedirs(model_dir, exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
# use hash as prefix to avoid filename conflict from different urls
sha256 = hashlib.sha256()
sha256.update(url.encode())
digest = sha256.hexdigest()[:6]
filename = digest + "_" + filename
cached_file = os.path.join(model_dir, filename)
logger.info(
"load_serialized_obj_from_url: download to or using cached %s", cached_file
)
if not os.path.exists(cached_file):
if is_distributed():
logger.warning(
"Downloading serialized object in DISTRIBUTED mode\n"
" File may be downloaded multiple times. We recommend\n"
" users to download in single process first."
)
download_from_url(url, cached_file)
state_dict = _mge_load_serialized(cached_file)
return state_dict | [
"def",
"load_serialized_obj_from_url",
"(",
"url",
":",
"str",
",",
"model_dir",
"=",
"None",
")",
"->",
"Any",
":",
"if",
"model_dir",
"is",
"None",
":",
"model_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_get_megengine_home",
"(",
")",
",",
"\"serialized\"",
")",
"os",
".",
"makedirs",
"(",
"model_dir",
",",
"exist_ok",
"=",
"True",
")",
"parts",
"=",
"urlparse",
"(",
"url",
")",
"filename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"parts",
".",
"path",
")",
"# use hash as prefix to avoid filename conflict from different urls",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"sha256",
".",
"update",
"(",
"url",
".",
"encode",
"(",
")",
")",
"digest",
"=",
"sha256",
".",
"hexdigest",
"(",
")",
"[",
":",
"6",
"]",
"filename",
"=",
"digest",
"+",
"\"_\"",
"+",
"filename",
"cached_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_dir",
",",
"filename",
")",
"logger",
".",
"info",
"(",
"\"load_serialized_obj_from_url: download to or using cached %s\"",
",",
"cached_file",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cached_file",
")",
":",
"if",
"is_distributed",
"(",
")",
":",
"logger",
".",
"warning",
"(",
"\"Downloading serialized object in DISTRIBUTED mode\\n\"",
"\" File may be downloaded multiple times. We recommend\\n\"",
"\" users to download in single process first.\"",
")",
"download_from_url",
"(",
"url",
",",
"cached_file",
")",
"state_dict",
"=",
"_mge_load_serialized",
"(",
"cached_file",
")",
"return",
"state_dict"
] | https://github.com/MegEngine/MegEngine/blob/ce9ad07a27ec909fb8db4dd67943d24ba98fb93a/imperative/python/megengine/hub/hub.py#L228-L268 |
|
gv22ga/dlib-face-recognition-android | 42d6305cbd85833f2b85bb79b70ab9ab004153c9 | tools/lint/cpplint.py | python | FindNextMultiLineCommentEnd | (lines, lineix) | return len(lines) | We are inside a comment, find the end marker. | We are inside a comment, find the end marker. | [
"We",
"are",
"inside",
"a",
"comment",
"find",
"the",
"end",
"marker",
"."
] | def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines) | [
"def",
"FindNextMultiLineCommentEnd",
"(",
"lines",
",",
"lineix",
")",
":",
"while",
"lineix",
"<",
"len",
"(",
"lines",
")",
":",
"if",
"lines",
"[",
"lineix",
"]",
".",
"strip",
"(",
")",
".",
"endswith",
"(",
"'*/'",
")",
":",
"return",
"lineix",
"lineix",
"+=",
"1",
"return",
"len",
"(",
"lines",
")"
] | https://github.com/gv22ga/dlib-face-recognition-android/blob/42d6305cbd85833f2b85bb79b70ab9ab004153c9/tools/lint/cpplint.py#L1270-L1276 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/PIL/TiffImagePlugin.py | python | TiffImageFile._load_libtiff | (self) | return Image.Image.load(self) | Overload method triggered when we detect a compressed tiff
Calls out to libtiff | Overload method triggered when we detect a compressed tiff
Calls out to libtiff | [
"Overload",
"method",
"triggered",
"when",
"we",
"detect",
"a",
"compressed",
"tiff",
"Calls",
"out",
"to",
"libtiff"
] | def _load_libtiff(self):
""" Overload method triggered when we detect a compressed tiff
Calls out to libtiff """
pixel = Image.Image.load(self)
if self.tile is None:
raise OSError("cannot load this image")
if not self.tile:
return pixel
self.load_prepare()
if not len(self.tile) == 1:
raise OSError("Not exactly one tile")
# (self._compression, (extents tuple),
# 0, (rawmode, self._compression, fp))
extents = self.tile[0][1]
args = list(self.tile[0][3])
# To be nice on memory footprint, if there's a
# file descriptor, use that instead of reading
# into a string in python.
# libtiff closes the file descriptor, so pass in a dup.
try:
fp = hasattr(self.fp, "fileno") and os.dup(self.fp.fileno())
# flush the file descriptor, prevents error on pypy 2.4+
# should also eliminate the need for fp.tell
# in _seek
if hasattr(self.fp, "flush"):
self.fp.flush()
except OSError:
# io.BytesIO have a fileno, but returns an IOError if
# it doesn't use a file descriptor.
fp = False
if fp:
args[2] = fp
decoder = Image._getdecoder(
self.mode, "libtiff", tuple(args), self.decoderconfig
)
try:
decoder.setimage(self.im, extents)
except ValueError:
raise OSError("Couldn't set the image")
close_self_fp = self._exclusive_fp and not self._is_animated
if hasattr(self.fp, "getvalue"):
# We've got a stringio like thing passed in. Yay for all in memory.
# The decoder needs the entire file in one shot, so there's not
# a lot we can do here other than give it the entire file.
# unless we could do something like get the address of the
# underlying string for stringio.
#
# Rearranging for supporting byteio items, since they have a fileno
# that returns an IOError if there's no underlying fp. Easier to
# deal with here by reordering.
if DEBUG:
print("have getvalue. just sending in a string from getvalue")
n, err = decoder.decode(self.fp.getvalue())
elif fp:
# we've got a actual file on disk, pass in the fp.
if DEBUG:
print("have fileno, calling fileno version of the decoder.")
if not close_self_fp:
self.fp.seek(0)
# 4 bytes, otherwise the trace might error out
n, err = decoder.decode(b"fpfp")
else:
# we have something else.
if DEBUG:
print("don't have fileno or getvalue. just reading")
self.fp.seek(0)
# UNDONE -- so much for that buffer size thing.
n, err = decoder.decode(self.fp.read())
self.tile = []
self.readonly = 0
self.load_end()
# libtiff closed the fp in a, we need to close self.fp, if possible
if close_self_fp:
self.fp.close()
self.fp = None # might be shared
if err < 0:
raise OSError(err)
return Image.Image.load(self) | [
"def",
"_load_libtiff",
"(",
"self",
")",
":",
"pixel",
"=",
"Image",
".",
"Image",
".",
"load",
"(",
"self",
")",
"if",
"self",
".",
"tile",
"is",
"None",
":",
"raise",
"OSError",
"(",
"\"cannot load this image\"",
")",
"if",
"not",
"self",
".",
"tile",
":",
"return",
"pixel",
"self",
".",
"load_prepare",
"(",
")",
"if",
"not",
"len",
"(",
"self",
".",
"tile",
")",
"==",
"1",
":",
"raise",
"OSError",
"(",
"\"Not exactly one tile\"",
")",
"# (self._compression, (extents tuple),",
"# 0, (rawmode, self._compression, fp))",
"extents",
"=",
"self",
".",
"tile",
"[",
"0",
"]",
"[",
"1",
"]",
"args",
"=",
"list",
"(",
"self",
".",
"tile",
"[",
"0",
"]",
"[",
"3",
"]",
")",
"# To be nice on memory footprint, if there's a",
"# file descriptor, use that instead of reading",
"# into a string in python.",
"# libtiff closes the file descriptor, so pass in a dup.",
"try",
":",
"fp",
"=",
"hasattr",
"(",
"self",
".",
"fp",
",",
"\"fileno\"",
")",
"and",
"os",
".",
"dup",
"(",
"self",
".",
"fp",
".",
"fileno",
"(",
")",
")",
"# flush the file descriptor, prevents error on pypy 2.4+",
"# should also eliminate the need for fp.tell",
"# in _seek",
"if",
"hasattr",
"(",
"self",
".",
"fp",
",",
"\"flush\"",
")",
":",
"self",
".",
"fp",
".",
"flush",
"(",
")",
"except",
"OSError",
":",
"# io.BytesIO have a fileno, but returns an IOError if",
"# it doesn't use a file descriptor.",
"fp",
"=",
"False",
"if",
"fp",
":",
"args",
"[",
"2",
"]",
"=",
"fp",
"decoder",
"=",
"Image",
".",
"_getdecoder",
"(",
"self",
".",
"mode",
",",
"\"libtiff\"",
",",
"tuple",
"(",
"args",
")",
",",
"self",
".",
"decoderconfig",
")",
"try",
":",
"decoder",
".",
"setimage",
"(",
"self",
".",
"im",
",",
"extents",
")",
"except",
"ValueError",
":",
"raise",
"OSError",
"(",
"\"Couldn't set the image\"",
")",
"close_self_fp",
"=",
"self",
".",
"_exclusive_fp",
"and",
"not",
"self",
".",
"_is_animated",
"if",
"hasattr",
"(",
"self",
".",
"fp",
",",
"\"getvalue\"",
")",
":",
"# We've got a stringio like thing passed in. Yay for all in memory.",
"# The decoder needs the entire file in one shot, so there's not",
"# a lot we can do here other than give it the entire file.",
"# unless we could do something like get the address of the",
"# underlying string for stringio.",
"#",
"# Rearranging for supporting byteio items, since they have a fileno",
"# that returns an IOError if there's no underlying fp. Easier to",
"# deal with here by reordering.",
"if",
"DEBUG",
":",
"print",
"(",
"\"have getvalue. just sending in a string from getvalue\"",
")",
"n",
",",
"err",
"=",
"decoder",
".",
"decode",
"(",
"self",
".",
"fp",
".",
"getvalue",
"(",
")",
")",
"elif",
"fp",
":",
"# we've got a actual file on disk, pass in the fp.",
"if",
"DEBUG",
":",
"print",
"(",
"\"have fileno, calling fileno version of the decoder.\"",
")",
"if",
"not",
"close_self_fp",
":",
"self",
".",
"fp",
".",
"seek",
"(",
"0",
")",
"# 4 bytes, otherwise the trace might error out",
"n",
",",
"err",
"=",
"decoder",
".",
"decode",
"(",
"b\"fpfp\"",
")",
"else",
":",
"# we have something else.",
"if",
"DEBUG",
":",
"print",
"(",
"\"don't have fileno or getvalue. just reading\"",
")",
"self",
".",
"fp",
".",
"seek",
"(",
"0",
")",
"# UNDONE -- so much for that buffer size thing.",
"n",
",",
"err",
"=",
"decoder",
".",
"decode",
"(",
"self",
".",
"fp",
".",
"read",
"(",
")",
")",
"self",
".",
"tile",
"=",
"[",
"]",
"self",
".",
"readonly",
"=",
"0",
"self",
".",
"load_end",
"(",
")",
"# libtiff closed the fp in a, we need to close self.fp, if possible",
"if",
"close_self_fp",
":",
"self",
".",
"fp",
".",
"close",
"(",
")",
"self",
".",
"fp",
"=",
"None",
"# might be shared",
"if",
"err",
"<",
"0",
":",
"raise",
"OSError",
"(",
"err",
")",
"return",
"Image",
".",
"Image",
".",
"load",
"(",
"self",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/PIL/TiffImagePlugin.py#L1093-L1184 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/richtext.py | python | TextBoxAttr.SetVerticalAlignment | (*args, **kwargs) | return _richtext.TextBoxAttr_SetVerticalAlignment(*args, **kwargs) | SetVerticalAlignment(self, int verticalAlignment) | SetVerticalAlignment(self, int verticalAlignment) | [
"SetVerticalAlignment",
"(",
"self",
"int",
"verticalAlignment",
")"
] | def SetVerticalAlignment(*args, **kwargs):
"""SetVerticalAlignment(self, int verticalAlignment)"""
return _richtext.TextBoxAttr_SetVerticalAlignment(*args, **kwargs) | [
"def",
"SetVerticalAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"TextBoxAttr_SetVerticalAlignment",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L620-L622 |
|
raspberrypi/tools | 13474ee775d0c5ec8a7da4fb0a9fa84187abfc87 | arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/share/gdb/python/gdb/command/explore.py | python | Explorer.return_to_enclosing_type_prompt | () | A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the enclosing type.
Useful when exploring types. | A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the enclosing type.
Useful when exploring types. | [
"A",
"utility",
"function",
"which",
"prompts",
"the",
"user",
"to",
"press",
"the",
"enter",
"key",
"so",
"that",
"the",
"exploration",
"session",
"can",
"shift",
"back",
"to",
"the",
"enclosing",
"type",
".",
"Useful",
"when",
"exploring",
"types",
"."
] | def return_to_enclosing_type_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the enclosing type.
Useful when exploring types.
"""
raw_input("\nPress enter to return to enclosing type: ") | [
"def",
"return_to_enclosing_type_prompt",
"(",
")",
":",
"raw_input",
"(",
"\"\\nPress enter to return to enclosing type: \"",
")"
] | https://github.com/raspberrypi/tools/blob/13474ee775d0c5ec8a7da4fb0a9fa84187abfc87/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/share/gdb/python/gdb/command/explore.py#L181-L186 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/core/sorting.py | python | decons_obs_group_ids | (comp_ids, obs_ids, shape, labels, xnull) | return [i8copy(lab[i]) for lab in labels] | reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through | reconstruct labels from observed group ids | [
"reconstruct",
"labels",
"from",
"observed",
"group",
"ids"
] | def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels] | [
"def",
"decons_obs_group_ids",
"(",
"comp_ids",
",",
"obs_ids",
",",
"shape",
",",
"labels",
",",
"xnull",
")",
":",
"if",
"not",
"xnull",
":",
"lift",
"=",
"np",
".",
"fromiter",
"(",
"(",
"(",
"a",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
"for",
"a",
"in",
"labels",
")",
",",
"dtype",
"=",
"'i8'",
")",
"shape",
"=",
"np",
".",
"asarray",
"(",
"shape",
",",
"dtype",
"=",
"'i8'",
")",
"+",
"lift",
"if",
"not",
"is_int64_overflow_possible",
"(",
"shape",
")",
":",
"# obs ids are deconstructable! take the fast route!",
"out",
"=",
"decons_group_index",
"(",
"obs_ids",
",",
"shape",
")",
"return",
"out",
"if",
"xnull",
"or",
"not",
"lift",
".",
"any",
"(",
")",
"else",
"[",
"x",
"-",
"y",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"out",
",",
"lift",
")",
"]",
"i",
"=",
"unique_label_indices",
"(",
"comp_ids",
")",
"i8copy",
"=",
"lambda",
"a",
":",
"a",
".",
"astype",
"(",
"'i8'",
",",
"subok",
"=",
"False",
",",
"copy",
"=",
"True",
")",
"return",
"[",
"i8copy",
"(",
"lab",
"[",
"i",
"]",
")",
"for",
"lab",
"in",
"labels",
"]"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/sorting.py#L152-L174 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/keras/_impl/keras/engine/training.py | python | Model._test_loop | (self, f, ins, batch_size=None, verbose=0, steps=None) | return outs | Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs. | Abstract method to loop over some data in batches. | [
"Abstract",
"method",
"to",
"loop",
"over",
"some",
"data",
"in",
"batches",
"."
] | def _test_loop(self, f, ins, batch_size=None, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
num_samples = self._check_num_samples(ins, batch_size, steps, 'steps')
outs = []
if steps is not None:
if verbose == 1:
progbar = Progbar(target=steps)
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step)
for i in range(len(outs)):
outs[i] /= steps
else:
if verbose == 1:
progbar = Progbar(target=num_samples)
batches = _make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], float):
# Do not slice the training phase flag.
ins_batch = _slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = _slice_arrays(ins, batch_ids)
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs | [
"def",
"_test_loop",
"(",
"self",
",",
"f",
",",
"ins",
",",
"batch_size",
"=",
"None",
",",
"verbose",
"=",
"0",
",",
"steps",
"=",
"None",
")",
":",
"num_samples",
"=",
"self",
".",
"_check_num_samples",
"(",
"ins",
",",
"batch_size",
",",
"steps",
",",
"'steps'",
")",
"outs",
"=",
"[",
"]",
"if",
"steps",
"is",
"not",
"None",
":",
"if",
"verbose",
"==",
"1",
":",
"progbar",
"=",
"Progbar",
"(",
"target",
"=",
"steps",
")",
"for",
"step",
"in",
"range",
"(",
"steps",
")",
":",
"batch_outs",
"=",
"f",
"(",
"ins",
")",
"if",
"isinstance",
"(",
"batch_outs",
",",
"list",
")",
":",
"if",
"step",
"==",
"0",
":",
"for",
"_",
"in",
"enumerate",
"(",
"batch_outs",
")",
":",
"outs",
".",
"append",
"(",
"0.",
")",
"for",
"i",
",",
"batch_out",
"in",
"enumerate",
"(",
"batch_outs",
")",
":",
"outs",
"[",
"i",
"]",
"+=",
"batch_out",
"else",
":",
"if",
"step",
"==",
"0",
":",
"outs",
".",
"append",
"(",
"0.",
")",
"outs",
"[",
"0",
"]",
"+=",
"batch_outs",
"if",
"verbose",
"==",
"1",
":",
"progbar",
".",
"update",
"(",
"step",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"outs",
")",
")",
":",
"outs",
"[",
"i",
"]",
"/=",
"steps",
"else",
":",
"if",
"verbose",
"==",
"1",
":",
"progbar",
"=",
"Progbar",
"(",
"target",
"=",
"num_samples",
")",
"batches",
"=",
"_make_batches",
"(",
"num_samples",
",",
"batch_size",
")",
"index_array",
"=",
"np",
".",
"arange",
"(",
"num_samples",
")",
"for",
"batch_index",
",",
"(",
"batch_start",
",",
"batch_end",
")",
"in",
"enumerate",
"(",
"batches",
")",
":",
"batch_ids",
"=",
"index_array",
"[",
"batch_start",
":",
"batch_end",
"]",
"if",
"isinstance",
"(",
"ins",
"[",
"-",
"1",
"]",
",",
"float",
")",
":",
"# Do not slice the training phase flag.",
"ins_batch",
"=",
"_slice_arrays",
"(",
"ins",
"[",
":",
"-",
"1",
"]",
",",
"batch_ids",
")",
"+",
"[",
"ins",
"[",
"-",
"1",
"]",
"]",
"else",
":",
"ins_batch",
"=",
"_slice_arrays",
"(",
"ins",
",",
"batch_ids",
")",
"batch_outs",
"=",
"f",
"(",
"ins_batch",
")",
"if",
"isinstance",
"(",
"batch_outs",
",",
"list",
")",
":",
"if",
"batch_index",
"==",
"0",
":",
"for",
"batch_out",
"in",
"enumerate",
"(",
"batch_outs",
")",
":",
"outs",
".",
"append",
"(",
"0.",
")",
"for",
"i",
",",
"batch_out",
"in",
"enumerate",
"(",
"batch_outs",
")",
":",
"outs",
"[",
"i",
"]",
"+=",
"batch_out",
"*",
"len",
"(",
"batch_ids",
")",
"else",
":",
"if",
"batch_index",
"==",
"0",
":",
"outs",
".",
"append",
"(",
"0.",
")",
"outs",
"[",
"0",
"]",
"+=",
"batch_outs",
"*",
"len",
"(",
"batch_ids",
")",
"if",
"verbose",
"==",
"1",
":",
"progbar",
".",
"update",
"(",
"batch_end",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"outs",
")",
")",
":",
"outs",
"[",
"i",
"]",
"/=",
"num_samples",
"if",
"len",
"(",
"outs",
")",
"==",
"1",
":",
"return",
"outs",
"[",
"0",
"]",
"return",
"outs"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/keras/_impl/keras/engine/training.py#L1296-L1366 |
|
SFTtech/openage | d6a08c53c48dc1e157807471df92197f6ca9e04d | openage/convert/entity_object/conversion/aoc/genie_unit.py | python | GenieGameEntityGroup.get_head_unit_id | (self) | return head_unit["id0"].get_value() | Return the obj_id of the first unit in the line. | Return the obj_id of the first unit in the line. | [
"Return",
"the",
"obj_id",
"of",
"the",
"first",
"unit",
"in",
"the",
"line",
"."
] | def get_head_unit_id(self):
"""
Return the obj_id of the first unit in the line.
"""
head_unit = self.get_head_unit()
return head_unit["id0"].get_value() | [
"def",
"get_head_unit_id",
"(",
"self",
")",
":",
"head_unit",
"=",
"self",
".",
"get_head_unit",
"(",
")",
"return",
"head_unit",
"[",
"\"id0\"",
"]",
".",
"get_value",
"(",
")"
] | https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/entity_object/conversion/aoc/genie_unit.py#L508-L513 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/swagger_spec_validator/validator20.py | python | validate_property_default | (property_spec, deref) | Validates that default values for definitions are of the property type.
Enforces presence of "type" in case of "default" presence.
:param property_spec: schema object (#/definitions/<def_name>/properties/<property_name>
:param deref: callable that dereferences $refs
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError` | Validates that default values for definitions are of the property type.
Enforces presence of "type" in case of "default" presence. | [
"Validates",
"that",
"default",
"values",
"for",
"definitions",
"are",
"of",
"the",
"property",
"type",
".",
"Enforces",
"presence",
"of",
"type",
"in",
"case",
"of",
"default",
"presence",
"."
] | def validate_property_default(property_spec, deref):
"""
Validates that default values for definitions are of the property type.
Enforces presence of "type" in case of "default" presence.
:param property_spec: schema object (#/definitions/<def_name>/properties/<property_name>
:param deref: callable that dereferences $refs
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
"""
deref_property_spec = deref(property_spec)
if 'default' in deref_property_spec:
if deref_property_spec['default'] is None and deref_property_spec.get('x-nullable', False) is True:
# In case x-nullable property is set to true, null is a valid default
return
validate_value_type(schema=property_spec, value=deref_property_spec['default'], deref=deref) | [
"def",
"validate_property_default",
"(",
"property_spec",
",",
"deref",
")",
":",
"deref_property_spec",
"=",
"deref",
"(",
"property_spec",
")",
"if",
"'default'",
"in",
"deref_property_spec",
":",
"if",
"deref_property_spec",
"[",
"'default'",
"]",
"is",
"None",
"and",
"deref_property_spec",
".",
"get",
"(",
"'x-nullable'",
",",
"False",
")",
"is",
"True",
":",
"# In case x-nullable property is set to true, null is a valid default",
"return",
"validate_value_type",
"(",
"schema",
"=",
"property_spec",
",",
"value",
"=",
"deref_property_spec",
"[",
"'default'",
"]",
",",
"deref",
"=",
"deref",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/swagger_spec_validator/validator20.py#L423-L439 |
||
raspberrypi/tools | 13474ee775d0c5ec8a7da4fb0a9fa84187abfc87 | arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/share/gdb/python/gdb/command/explore.py | python | Explorer.is_scalar_type | (type) | return type.code in Explorer._SCALAR_TYPE_LIST | Checks whether a type is a scalar type.
A type is a scalar type of its type is
gdb.TYPE_CODE_CHAR or
gdb.TYPE_CODE_INT or
gdb.TYPE_CODE_BOOL or
gdb.TYPE_CODE_FLT or
gdb.TYPE_CODE_VOID or
gdb.TYPE_CODE_ENUM.
Arguments:
type: The type to be checked.
Returns:
'True' if 'type' is a scalar type. 'False' otherwise. | Checks whether a type is a scalar type.
A type is a scalar type of its type is
gdb.TYPE_CODE_CHAR or
gdb.TYPE_CODE_INT or
gdb.TYPE_CODE_BOOL or
gdb.TYPE_CODE_FLT or
gdb.TYPE_CODE_VOID or
gdb.TYPE_CODE_ENUM. | [
"Checks",
"whether",
"a",
"type",
"is",
"a",
"scalar",
"type",
".",
"A",
"type",
"is",
"a",
"scalar",
"type",
"of",
"its",
"type",
"is",
"gdb",
".",
"TYPE_CODE_CHAR",
"or",
"gdb",
".",
"TYPE_CODE_INT",
"or",
"gdb",
".",
"TYPE_CODE_BOOL",
"or",
"gdb",
".",
"TYPE_CODE_FLT",
"or",
"gdb",
".",
"TYPE_CODE_VOID",
"or",
"gdb",
".",
"TYPE_CODE_ENUM",
"."
] | def is_scalar_type(type):
"""Checks whether a type is a scalar type.
A type is a scalar type of its type is
gdb.TYPE_CODE_CHAR or
gdb.TYPE_CODE_INT or
gdb.TYPE_CODE_BOOL or
gdb.TYPE_CODE_FLT or
gdb.TYPE_CODE_VOID or
gdb.TYPE_CODE_ENUM.
Arguments:
type: The type to be checked.
Returns:
'True' if 'type' is a scalar type. 'False' otherwise.
"""
return type.code in Explorer._SCALAR_TYPE_LIST | [
"def",
"is_scalar_type",
"(",
"type",
")",
":",
"return",
"type",
".",
"code",
"in",
"Explorer",
".",
"_SCALAR_TYPE_LIST"
] | https://github.com/raspberrypi/tools/blob/13474ee775d0c5ec8a7da4fb0a9fa84187abfc87/arm-bcm2708/arm-rpi-4.9.3-linux-gnueabihf/share/gdb/python/gdb/command/explore.py#L140-L156 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/resource_variable_ops.py | python | BaseResourceVariable.scatter_nd_update | (self, indices, updates, name=None) | return self._lazy_read(gen_state_ops.resource_scatter_nd_update(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name)) | Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed. | Applies sparse assignment to individual values or slices in a Variable. | [
"Applies",
"sparse",
"assignment",
"to",
"individual",
"values",
"or",
"slices",
"in",
"a",
"Variable",
"."
] | def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.compat.v1.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
"""
return self._lazy_read(gen_state_ops.resource_scatter_nd_update(
self.handle, indices, ops.convert_to_tensor(updates, self.dtype),
name=name)) | [
"def",
"scatter_nd_update",
"(",
"self",
",",
"indices",
",",
"updates",
",",
"name",
"=",
"None",
")",
":",
"return",
"self",
".",
"_lazy_read",
"(",
"gen_state_ops",
".",
"resource_scatter_nd_update",
"(",
"self",
".",
"handle",
",",
"indices",
",",
"ops",
".",
"convert_to_tensor",
"(",
"updates",
",",
"self",
".",
"dtype",
")",
",",
"name",
"=",
"name",
")",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/resource_variable_ops.py#L1132-L1180 |
|
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TNEANetAFltI.IsDeleted | (self) | return _snap.TNEANetAFltI_IsDeleted(self) | IsDeleted(TNEANetAFltI self) -> bool
Parameters:
self: TNEANetAFltI const * | IsDeleted(TNEANetAFltI self) -> bool | [
"IsDeleted",
"(",
"TNEANetAFltI",
"self",
")",
"-",
">",
"bool"
] | def IsDeleted(self):
"""
IsDeleted(TNEANetAFltI self) -> bool
Parameters:
self: TNEANetAFltI const *
"""
return _snap.TNEANetAFltI_IsDeleted(self) | [
"def",
"IsDeleted",
"(",
"self",
")",
":",
"return",
"_snap",
".",
"TNEANetAFltI_IsDeleted",
"(",
"self",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L21183-L21191 |
|
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/buildscripts/linter/git.py | python | get_files_to_check_working_tree | (filter_function) | return valid_files | Get a list of files to check from the working tree.
This will pick up files not managed by git. | Get a list of files to check from the working tree. | [
"Get",
"a",
"list",
"of",
"files",
"to",
"check",
"from",
"the",
"working",
"tree",
"."
] | def get_files_to_check_working_tree(filter_function):
# type: (Callable[[str], bool]) -> List[str]
"""
Get a list of files to check from the working tree.
This will pick up files not managed by git.
"""
repos = get_repos()
valid_files = list(
itertools.chain.from_iterable(
[r.get_working_tree_candidates(filter_function) for r in repos]))
return valid_files | [
"def",
"get_files_to_check_working_tree",
"(",
"filter_function",
")",
":",
"# type: (Callable[[str], bool]) -> List[str]",
"repos",
"=",
"get_repos",
"(",
")",
"valid_files",
"=",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"[",
"r",
".",
"get_working_tree_candidates",
"(",
"filter_function",
")",
"for",
"r",
"in",
"repos",
"]",
")",
")",
"return",
"valid_files"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/linter/git.py#L130-L143 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/more-itertools/py3/more_itertools/more.py | python | difference | (iterable, func=sub, *, initial=None) | return chain(first, starmap(func, zip(b, a))) | This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`:
>>> from itertools import accumulate
>>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
*func* defaults to :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120]
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
If the *initial* keyword is set, the first element will be skipped when
computing successive differences.
>>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
>>> list(difference(it, initial=10))
[1, 2, 3] | This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`: | [
"This",
"function",
"is",
"the",
"inverse",
"of",
":",
"func",
":",
"itertools",
".",
"accumulate",
".",
"By",
"default",
"it",
"will",
"compute",
"the",
"first",
"difference",
"of",
"*",
"iterable",
"*",
"using",
":",
"func",
":",
"operator",
".",
"sub",
":"
] | def difference(iterable, func=sub, *, initial=None):
"""This function is the inverse of :func:`itertools.accumulate`. By default
it will compute the first difference of *iterable* using
:func:`operator.sub`:
>>> from itertools import accumulate
>>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
>>> list(difference(iterable))
[0, 1, 2, 3, 4]
*func* defaults to :func:`operator.sub`, but other functions can be
specified. They will be applied as follows::
A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
For example, to do progressive division:
>>> iterable = [1, 2, 6, 24, 120]
>>> func = lambda x, y: x // y
>>> list(difference(iterable, func))
[1, 2, 3, 4, 5]
If the *initial* keyword is set, the first element will be skipped when
computing successive differences.
>>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
>>> list(difference(it, initial=10))
[1, 2, 3]
"""
a, b = tee(iterable)
try:
first = [next(b)]
except StopIteration:
return iter([])
if initial is not None:
first = []
return chain(first, starmap(func, zip(b, a))) | [
"def",
"difference",
"(",
"iterable",
",",
"func",
"=",
"sub",
",",
"*",
",",
"initial",
"=",
"None",
")",
":",
"a",
",",
"b",
"=",
"tee",
"(",
"iterable",
")",
"try",
":",
"first",
"=",
"[",
"next",
"(",
"b",
")",
"]",
"except",
"StopIteration",
":",
"return",
"iter",
"(",
"[",
"]",
")",
"if",
"initial",
"is",
"not",
"None",
":",
"first",
"=",
"[",
"]",
"return",
"chain",
"(",
"first",
",",
"starmap",
"(",
"func",
",",
"zip",
"(",
"b",
",",
"a",
")",
")",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/more-itertools/py3/more_itertools/more.py#L2648-L2687 |
|
yrnkrn/zapcc | c6a8aa30006d997eff0d60fd37b0e62b8aa0ea50 | tools/clang/bindings/python/clang/cindex.py | python | Cursor.get_field_offsetof | (self) | return conf.lib.clang_Cursor_getOffsetOfField(self) | Returns the offsetof the FIELD_DECL pointed by this Cursor. | Returns the offsetof the FIELD_DECL pointed by this Cursor. | [
"Returns",
"the",
"offsetof",
"the",
"FIELD_DECL",
"pointed",
"by",
"this",
"Cursor",
"."
] | def get_field_offsetof(self):
"""Returns the offsetof the FIELD_DECL pointed by this Cursor."""
return conf.lib.clang_Cursor_getOffsetOfField(self) | [
"def",
"get_field_offsetof",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_Cursor_getOffsetOfField",
"(",
"self",
")"
] | https://github.com/yrnkrn/zapcc/blob/c6a8aa30006d997eff0d60fd37b0e62b8aa0ea50/tools/clang/bindings/python/clang/cindex.py#L1839-L1841 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py | python | is_appengine_sandbox | () | return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27" | Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes | Reports if the app is running in the first generation sandbox. | [
"Reports",
"if",
"the",
"app",
"is",
"running",
"in",
"the",
"first",
"generation",
"sandbox",
"."
] | def is_appengine_sandbox():
"""Reports if the app is running in the first generation sandbox.
The second generation runtimes are technically still in a sandbox, but it
is much less restrictive, so generally you shouldn't need to check for it.
see https://cloud.google.com/appengine/docs/standard/runtimes
"""
return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27" | [
"def",
"is_appengine_sandbox",
"(",
")",
":",
"return",
"is_appengine",
"(",
")",
"and",
"os",
".",
"environ",
"[",
"\"APPENGINE_RUNTIME\"",
"]",
"==",
"\"python27\""
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py#L12-L19 |
|
microsoft/CNTK | e9396480025b9ca457d26b6f33dd07c474c6aa04 | bindings/python/cntk/ops/sequence/__init__.py | python | input_variable | (shape, dtype=default_override_or(np.float32), needs_gradient=False, is_sparse=False,
sequence_axis=Axis.default_dynamic_axis(), name='') | return input_variable(shape=shape, dtype=dtype, needs_gradient=needs_gradient, is_sparse=is_sparse, dynamic_axes=[Axis.default_batch_axis(), sequence_axis], name=name) | input_variable(shape, dtype=np.float32, needs_gradient=False, is_sparse=False, sequence_axis=Axis.default_dynamic_axis(), name='')
It creates an input in the network: a place where data,
such as features and labels, should be provided.
Args:
shape (tuple or int): the shape of the input tensor
dtype (np.float32 or np.float64 or np.float16): data type. Default is np.float32.
needs_gradients (bool, optional): whether to back-propagates to it or not. False by default.
is_sparse (bool, optional): whether the variable is sparse (`False` by default)
sequence_axis (:class:`~cntk.axis.Axis`): a dynamic axis (e.g., default_dynamic_axis())
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.variables.Variable` | input_variable(shape, dtype=np.float32, needs_gradient=False, is_sparse=False, sequence_axis=Axis.default_dynamic_axis(), name='') | [
"input_variable",
"(",
"shape",
"dtype",
"=",
"np",
".",
"float32",
"needs_gradient",
"=",
"False",
"is_sparse",
"=",
"False",
"sequence_axis",
"=",
"Axis",
".",
"default_dynamic_axis",
"()",
"name",
"=",
")"
] | def input_variable(shape, dtype=default_override_or(np.float32), needs_gradient=False, is_sparse=False,
sequence_axis=Axis.default_dynamic_axis(), name=''):
'''input_variable(shape, dtype=np.float32, needs_gradient=False, is_sparse=False, sequence_axis=Axis.default_dynamic_axis(), name='')
It creates an input in the network: a place where data,
such as features and labels, should be provided.
Args:
shape (tuple or int): the shape of the input tensor
dtype (np.float32 or np.float64 or np.float16): data type. Default is np.float32.
needs_gradients (bool, optional): whether to back-propagates to it or not. False by default.
is_sparse (bool, optional): whether the variable is sparse (`False` by default)
sequence_axis (:class:`~cntk.axis.Axis`): a dynamic axis (e.g., default_dynamic_axis())
name (str, optional): the name of the Function instance in the network
Returns:
:class:`~cntk.variables.Variable`
'''
from ... import input_variable
return input_variable(shape=shape, dtype=dtype, needs_gradient=needs_gradient, is_sparse=is_sparse, dynamic_axes=[Axis.default_batch_axis(), sequence_axis], name=name) | [
"def",
"input_variable",
"(",
"shape",
",",
"dtype",
"=",
"default_override_or",
"(",
"np",
".",
"float32",
")",
",",
"needs_gradient",
"=",
"False",
",",
"is_sparse",
"=",
"False",
",",
"sequence_axis",
"=",
"Axis",
".",
"default_dynamic_axis",
"(",
")",
",",
"name",
"=",
"''",
")",
":",
"from",
".",
".",
".",
"import",
"input_variable",
"return",
"input_variable",
"(",
"shape",
"=",
"shape",
",",
"dtype",
"=",
"dtype",
",",
"needs_gradient",
"=",
"needs_gradient",
",",
"is_sparse",
"=",
"is_sparse",
",",
"dynamic_axes",
"=",
"[",
"Axis",
".",
"default_batch_axis",
"(",
")",
",",
"sequence_axis",
"]",
",",
"name",
"=",
"name",
")"
] | https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/ops/sequence/__init__.py#L47-L66 |
|
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/estimators/dnn.py | python | DNNRegressor.predict | (self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True) | return super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable) | Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions. | Returns predictions for given features. | [
"Returns",
"predictions",
"for",
"given",
"features",
"."
] | def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable) | [
"def",
"predict",
"(",
"self",
",",
"x",
"=",
"None",
",",
"input_fn",
"=",
"None",
",",
"batch_size",
"=",
"None",
",",
"outputs",
"=",
"None",
",",
"as_iterable",
"=",
"True",
")",
":",
"if",
"not",
"outputs",
":",
"return",
"self",
".",
"predict_scores",
"(",
"x",
"=",
"x",
",",
"input_fn",
"=",
"input_fn",
",",
"batch_size",
"=",
"batch_size",
",",
"as_iterable",
"=",
"as_iterable",
")",
"return",
"super",
"(",
"DNNRegressor",
",",
"self",
")",
".",
"predict",
"(",
"x",
"=",
"x",
",",
"input_fn",
"=",
"input_fn",
",",
"batch_size",
"=",
"batch_size",
",",
"outputs",
"=",
"outputs",
",",
"as_iterable",
"=",
"as_iterable",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/estimators/dnn.py#L714-L749 |
|
google/llvm-propeller | 45c226984fe8377ebfb2ad7713c680d652ba678d | llvm/utils/benchmark/mingw.py | python | find_in_path | (file, path=None) | return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path))) | Attempts to find an executable in the path | Attempts to find an executable in the path | [
"Attempts",
"to",
"find",
"an",
"executable",
"in",
"the",
"path"
] | def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path))) | [
"def",
"find_in_path",
"(",
"file",
",",
"path",
"=",
"None",
")",
":",
"if",
"platform",
".",
"system",
"(",
")",
"==",
"'Windows'",
":",
"file",
"+=",
"'.exe'",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
"if",
"type",
"(",
"path",
")",
"is",
"type",
"(",
"''",
")",
":",
"path",
"=",
"path",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"return",
"list",
"(",
"filter",
"(",
"os",
".",
"path",
".",
"exists",
",",
"map",
"(",
"lambda",
"dir",
",",
"file",
"=",
"file",
":",
"os",
".",
"path",
".",
"join",
"(",
"dir",
",",
"file",
")",
",",
"path",
")",
")",
")"
] | https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/llvm/utils/benchmark/mingw.py#L86-L97 |
|
yushroom/FishEngine | a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9 | Script/reflect/clang/cindex.py | python | CursorKind.is_expression | (self) | return conf.lib.clang_isExpression(self) | Test if this is an expression kind. | Test if this is an expression kind. | [
"Test",
"if",
"this",
"is",
"an",
"expression",
"kind",
"."
] | def is_expression(self):
"""Test if this is an expression kind."""
return conf.lib.clang_isExpression(self) | [
"def",
"is_expression",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_isExpression",
"(",
"self",
")"
] | https://github.com/yushroom/FishEngine/blob/a4b9fb9b0a6dc202f7990e75f4b7d8d5163209d9/Script/reflect/clang/cindex.py#L609-L611 |
|
include-what-you-use/include-what-you-use | 208fbfffa5d69364b9f78e427caa443441279283 | iwyu-check-license-header.py | python | main | (filenames, add_if_missing) | return len(errors) | Entry point.
Checks license header of all filenames provided.
Returns zero if all license headers are OK, non-zero otherwise. | Entry point. | [
"Entry",
"point",
"."
] | def main(filenames, add_if_missing):
""" Entry point.
Checks license header of all filenames provided.
Returns zero if all license headers are OK, non-zero otherwise.
"""
errors = []
for filename in filenames:
if os.path.isdir(filename):
continue
checker = File.parse(filename)
if not checker:
# TODO: Consider printing a warning here in verbose mode.
continue
if not checker.check_license_header():
errors.extend(checker.errors)
if add_if_missing and not checker.has_license_header():
checker.add_license_header()
for err in errors:
print(err)
return len(errors) | [
"def",
"main",
"(",
"filenames",
",",
"add_if_missing",
")",
":",
"errors",
"=",
"[",
"]",
"for",
"filename",
"in",
"filenames",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
":",
"continue",
"checker",
"=",
"File",
".",
"parse",
"(",
"filename",
")",
"if",
"not",
"checker",
":",
"# TODO: Consider printing a warning here in verbose mode.",
"continue",
"if",
"not",
"checker",
".",
"check_license_header",
"(",
")",
":",
"errors",
".",
"extend",
"(",
"checker",
".",
"errors",
")",
"if",
"add_if_missing",
"and",
"not",
"checker",
".",
"has_license_header",
"(",
")",
":",
"checker",
".",
"add_license_header",
"(",
")",
"for",
"err",
"in",
"errors",
":",
"print",
"(",
"err",
")",
"return",
"len",
"(",
"errors",
")"
] | https://github.com/include-what-you-use/include-what-you-use/blob/208fbfffa5d69364b9f78e427caa443441279283/iwyu-check-license-header.py#L244-L269 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiMDIClientWindow.SetActiveChild | (*args, **kwargs) | return _aui.AuiMDIClientWindow_SetActiveChild(*args, **kwargs) | SetActiveChild(self, AuiMDIChildFrame pChildFrame) | SetActiveChild(self, AuiMDIChildFrame pChildFrame) | [
"SetActiveChild",
"(",
"self",
"AuiMDIChildFrame",
"pChildFrame",
")"
] | def SetActiveChild(*args, **kwargs):
"""SetActiveChild(self, AuiMDIChildFrame pChildFrame)"""
return _aui.AuiMDIClientWindow_SetActiveChild(*args, **kwargs) | [
"def",
"SetActiveChild",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiMDIClientWindow_SetActiveChild",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L1643-L1645 |
|
acbull/Unbiased_LambdaMart | 7c39abe5caa18ca07df2d23c2db392916d92956c | Unbias_LightGBM/python-package/lightgbm/basic.py | python | _InnerPredictor.__pred_for_np2d | (self, mat, num_iteration, predict_type) | return preds, mat.shape[0] | Predict for a 2-D numpy matrix. | Predict for a 2-D numpy matrix. | [
"Predict",
"for",
"a",
"2",
"-",
"D",
"numpy",
"matrix",
"."
] | def __pred_for_np2d(self, mat, num_iteration, predict_type):
"""
Predict for a 2-D numpy matrix.
"""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else:
"""change non-float data to float data, need to copy"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0],
predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0] | [
"def",
"__pred_for_np2d",
"(",
"self",
",",
"mat",
",",
"num_iteration",
",",
"predict_type",
")",
":",
"if",
"len",
"(",
"mat",
".",
"shape",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Input numpy.ndarray or list must be 2 dimensional'",
")",
"if",
"mat",
".",
"dtype",
"==",
"np",
".",
"float32",
"or",
"mat",
".",
"dtype",
"==",
"np",
".",
"float64",
":",
"data",
"=",
"np",
".",
"array",
"(",
"mat",
".",
"reshape",
"(",
"mat",
".",
"size",
")",
",",
"dtype",
"=",
"mat",
".",
"dtype",
",",
"copy",
"=",
"False",
")",
"else",
":",
"\"\"\"change non-float data to float data, need to copy\"\"\"",
"data",
"=",
"np",
".",
"array",
"(",
"mat",
".",
"reshape",
"(",
"mat",
".",
"size",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"ptr_data",
",",
"type_ptr_data",
",",
"_",
"=",
"c_float_array",
"(",
"data",
")",
"n_preds",
"=",
"self",
".",
"__get_num_preds",
"(",
"num_iteration",
",",
"mat",
".",
"shape",
"[",
"0",
"]",
",",
"predict_type",
")",
"preds",
"=",
"np",
".",
"zeros",
"(",
"n_preds",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"out_num_preds",
"=",
"ctypes",
".",
"c_int64",
"(",
"0",
")",
"_safe_call",
"(",
"_LIB",
".",
"LGBM_BoosterPredictForMat",
"(",
"self",
".",
"handle",
",",
"ptr_data",
",",
"ctypes",
".",
"c_int",
"(",
"type_ptr_data",
")",
",",
"ctypes",
".",
"c_int",
"(",
"mat",
".",
"shape",
"[",
"0",
"]",
")",
",",
"ctypes",
".",
"c_int",
"(",
"mat",
".",
"shape",
"[",
"1",
"]",
")",
",",
"ctypes",
".",
"c_int",
"(",
"C_API_IS_ROW_MAJOR",
")",
",",
"ctypes",
".",
"c_int",
"(",
"predict_type",
")",
",",
"ctypes",
".",
"c_int",
"(",
"num_iteration",
")",
",",
"c_str",
"(",
"self",
".",
"pred_parameter",
")",
",",
"ctypes",
".",
"byref",
"(",
"out_num_preds",
")",
",",
"preds",
".",
"ctypes",
".",
"data_as",
"(",
"ctypes",
".",
"POINTER",
"(",
"ctypes",
".",
"c_double",
")",
")",
")",
")",
"if",
"n_preds",
"!=",
"out_num_preds",
".",
"value",
":",
"raise",
"ValueError",
"(",
"\"Wrong length for predict results\"",
")",
"return",
"preds",
",",
"mat",
".",
"shape",
"[",
"0",
"]"
] | https://github.com/acbull/Unbiased_LambdaMart/blob/7c39abe5caa18ca07df2d23c2db392916d92956c/Unbias_LightGBM/python-package/lightgbm/basic.py#L477-L508 |
|
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | xmlDoc.htmlSaveFile | (self, filename) | return ret | Dump an HTML document to a file. If @filename is "-" the
stdout file is used. | Dump an HTML document to a file. If | [
"Dump",
"an",
"HTML",
"document",
"to",
"a",
"file",
".",
"If"
] | def htmlSaveFile(self, filename):
"""Dump an HTML document to a file. If @filename is "-" the
stdout file is used. """
ret = libxml2mod.htmlSaveFile(filename, self._o)
return ret | [
"def",
"htmlSaveFile",
"(",
"self",
",",
"filename",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"htmlSaveFile",
"(",
"filename",
",",
"self",
".",
"_o",
")",
"return",
"ret"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L4051-L4055 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/dataset/vision/py_transforms.py | python | CenterCrop.__call__ | (self, img) | return util.center_crop(img, self.size) | Call method.
Args:
img (PIL Image): Image to be center cropped.
Returns:
PIL Image, cropped image. | Call method. | [
"Call",
"method",
"."
] | def __call__(self, img):
"""
Call method.
Args:
img (PIL Image): Image to be center cropped.
Returns:
PIL Image, cropped image.
"""
return util.center_crop(img, self.size) | [
"def",
"__call__",
"(",
"self",
",",
"img",
")",
":",
"return",
"util",
".",
"center_crop",
"(",
"img",
",",
"self",
".",
"size",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/vision/py_transforms.py#L711-L721 |
|
indutny/candor | 48e7260618f5091c80a3416828e2808cad3ea22e | tools/gyp/pylib/gyp/generator/android.py | python | AndroidMkWriter.ComputeAndroidModule | (self, spec) | return make.StringToMakefileVariable(name) | Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names. | Return the Android module name used for a gyp spec. | [
"Return",
"the",
"Android",
"module",
"name",
"used",
"for",
"a",
"gyp",
"spec",
"."
] | def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name) | [
"def",
"ComputeAndroidModule",
"(",
"self",
",",
"spec",
")",
":",
"if",
"self",
".",
"type",
"==",
"'shared_library'",
":",
"# For reasons of convention, the Android build system requires that all",
"# shared library modules are named 'libfoo' when generating -l flags.",
"prefix",
"=",
"'lib_'",
"else",
":",
"prefix",
"=",
"''",
"if",
"spec",
"[",
"'toolset'",
"]",
"==",
"'host'",
":",
"suffix",
"=",
"'_host_gyp'",
"else",
":",
"suffix",
"=",
"'_gyp'",
"if",
"self",
".",
"path",
":",
"name",
"=",
"'%s%s_%s%s'",
"%",
"(",
"prefix",
",",
"self",
".",
"path",
",",
"self",
".",
"target",
",",
"suffix",
")",
"else",
":",
"name",
"=",
"'%s%s%s'",
"%",
"(",
"prefix",
",",
"self",
".",
"target",
",",
"suffix",
")",
"return",
"make",
".",
"StringToMakefileVariable",
"(",
"name",
")"
] | https://github.com/indutny/candor/blob/48e7260618f5091c80a3416828e2808cad3ea22e/tools/gyp/pylib/gyp/generator/android.py#L571-L596 |
|
Dobiasd/frugally-deep | 99d9378c6ef537a209bcb2a102e953899a6ab0e3 | keras_export/convert_model.py | python | singleton_list_to_value | (value_or_values) | return value_or_values | Leaves non-list values untouched.
Raises an Exception in case the input list does not have exactly one element. | Leaves non-list values untouched.
Raises an Exception in case the input list does not have exactly one element. | [
"Leaves",
"non",
"-",
"list",
"values",
"untouched",
".",
"Raises",
"an",
"Exception",
"in",
"case",
"the",
"input",
"list",
"does",
"not",
"have",
"exactly",
"one",
"element",
"."
] | def singleton_list_to_value(value_or_values):
"""
Leaves non-list values untouched.
Raises an Exception in case the input list does not have exactly one element.
"""
if isinstance(value_or_values, list):
assert len(value_or_values) == 1
return value_or_values[0]
return value_or_values | [
"def",
"singleton_list_to_value",
"(",
"value_or_values",
")",
":",
"if",
"isinstance",
"(",
"value_or_values",
",",
"list",
")",
":",
"assert",
"len",
"(",
"value_or_values",
")",
"==",
"1",
"return",
"value_or_values",
"[",
"0",
"]",
"return",
"value_or_values"
] | https://github.com/Dobiasd/frugally-deep/blob/99d9378c6ef537a209bcb2a102e953899a6ab0e3/keras_export/convert_model.py#L755-L763 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/layers/sequence_lod.py | python | sequence_unpad | (x, length, name=None) | return out | :api_attr: Static Graph
**Note**:
**The input of the OP is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
The OP removes the padding data from the input based on the length information and returns a LoDTensor.
.. code-block:: text
Case 1:
Given input Variable **x**:
x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0]],
in which there are 3 sequences padded to length 5, and the actual length
specified by input Variable **length**:
length.data = [2, 3, 4],
after unpadding, the output Variable will be:
out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]]
out.lod = [[0, 2, 5, 9]]
Args:
x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2.
Supported data types: float32, float64, int32, int64.
length(Variable): A 1D Tensor that stores the actual length of each sample, and the Tensor
has the same shape with the 0th dimension of the X . Supported data types: int64.
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoDTensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.fluid as fluid
import numpy
# pad data
x = paddle.static.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = paddle.assign(numpy.array([0.0], dtype=numpy.float32))
pad_data, len = paddle.static.nn.sequence_pad(x=x, pad_value=pad_value)
# unpad data
unpad_data = paddle.static.nn.sequence_unpad(x=pad_data, length=len) | :api_attr: Static Graph | [
":",
"api_attr",
":",
"Static",
"Graph"
] | def sequence_unpad(x, length, name=None):
"""
:api_attr: Static Graph
**Note**:
**The input of the OP is Tensor and the output is LoDTensor. For padding operation, See:** :ref:`api_fluid_layers_sequence_pad`
The OP removes the padding data from the input based on the length information and returns a LoDTensor.
.. code-block:: text
Case 1:
Given input Variable **x**:
x.data = [[ 1.0, 2.0, 3.0, 4.0, 5.0],
[ 6.0, 7.0, 8.0, 9.0, 10.0],
[11.0, 12.0, 13.0, 14.0, 15.0]],
in which there are 3 sequences padded to length 5, and the actual length
specified by input Variable **length**:
length.data = [2, 3, 4],
after unpadding, the output Variable will be:
out.data = [[1.0, 2.0, 6.0, 7.0, 8.0, 11.0, 12.0, 13.0, 14.0]]
out.lod = [[0, 2, 5, 9]]
Args:
x(Variable): A Tensor which contains padding data, and its shape size can not be less than 2.
Supported data types: float32, float64, int32, int64.
length(Variable): A 1D Tensor that stores the actual length of each sample, and the Tensor
has the same shape with the 0th dimension of the X . Supported data types: int64.
name(str|None): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: A LoDTensor whose recursive sequence length is consistent with the information of the length parameter and it has the same data type with input.
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
import paddle.fluid as fluid
import numpy
# pad data
x = paddle.static.data(name='x', shape=[10, 5], dtype='float32', lod_level=1)
pad_value = paddle.assign(numpy.array([0.0], dtype=numpy.float32))
pad_data, len = paddle.static.nn.sequence_pad(x=x, pad_value=pad_value)
# unpad data
unpad_data = paddle.static.nn.sequence_unpad(x=pad_data, length=len)
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
helper = LayerHelper('sequence_unpad', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'fluid.layers.sequence_unpad')
check_variable_and_dtype(length, 'length', ['int64'],
'fluid.layers.sequence_unpad')
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
length.stop_gradient = True
helper.append_op(
type='sequence_unpad',
inputs={'X': x,
'Length': length},
outputs={'Out': out})
return out | [
"def",
"sequence_unpad",
"(",
"x",
",",
"length",
",",
"name",
"=",
"None",
")",
":",
"assert",
"not",
"in_dygraph_mode",
"(",
")",
",",
"(",
"\"sequence layer is not supported in dygraph mode yet.\"",
")",
"helper",
"=",
"LayerHelper",
"(",
"'sequence_unpad'",
",",
"*",
"*",
"locals",
"(",
")",
")",
"check_variable_and_dtype",
"(",
"x",
",",
"'x'",
",",
"[",
"'float32'",
",",
"'float64'",
",",
"'int32'",
",",
"'int64'",
"]",
",",
"'fluid.layers.sequence_unpad'",
")",
"check_variable_and_dtype",
"(",
"length",
",",
"'length'",
",",
"[",
"'int64'",
"]",
",",
"'fluid.layers.sequence_unpad'",
")",
"dtype",
"=",
"helper",
".",
"input_dtype",
"(",
"input_param_name",
"=",
"'x'",
")",
"out",
"=",
"helper",
".",
"create_variable_for_type_inference",
"(",
"dtype",
")",
"length",
".",
"stop_gradient",
"=",
"True",
"helper",
".",
"append_op",
"(",
"type",
"=",
"'sequence_unpad'",
",",
"inputs",
"=",
"{",
"'X'",
":",
"x",
",",
"'Length'",
":",
"length",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"out",
"}",
")",
"return",
"out"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/layers/sequence_lod.py#L1023-L1097 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/psutil/_pssunos.py | python | cpu_times | () | return scputimes(*[sum(x) for x in zip(*ret)]) | Return system-wide CPU times as a named tuple | Return system-wide CPU times as a named tuple | [
"Return",
"system",
"-",
"wide",
"CPU",
"times",
"as",
"a",
"named",
"tuple"
] | def cpu_times():
"""Return system-wide CPU times as a named tuple"""
ret = cext.per_cpu_times()
return scputimes(*[sum(x) for x in zip(*ret)]) | [
"def",
"cpu_times",
"(",
")",
":",
"ret",
"=",
"cext",
".",
"per_cpu_times",
"(",
")",
"return",
"scputimes",
"(",
"*",
"[",
"sum",
"(",
"x",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"ret",
")",
"]",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/psutil/_pssunos.py#L172-L175 |
|
zlgopen/awtk | 2c49e854a78749d9092907c027a7fba9062be549 | 3rd/mbedtls/scripts/assemble_changelog.py | python | ChangeLog.write | (self, filename) | Write the changelog to the specified file. | Write the changelog to the specified file. | [
"Write",
"the",
"changelog",
"to",
"the",
"specified",
"file",
"."
] | def write(self, filename):
"""Write the changelog to the specified file.
"""
with open(filename, 'wb') as out:
out.write(self.header)
out.write(self.top_version_title)
for title, body in self.categories.items():
if not body:
continue
out.write(self.format.format_category(title, body))
out.write(self.trailer) | [
"def",
"write",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"out",
":",
"out",
".",
"write",
"(",
"self",
".",
"header",
")",
"out",
".",
"write",
"(",
"self",
".",
"top_version_title",
")",
"for",
"title",
",",
"body",
"in",
"self",
".",
"categories",
".",
"items",
"(",
")",
":",
"if",
"not",
"body",
":",
"continue",
"out",
".",
"write",
"(",
"self",
".",
"format",
".",
"format_category",
"(",
"title",
",",
"body",
")",
")",
"out",
".",
"write",
"(",
"self",
".",
"trailer",
")"
] | https://github.com/zlgopen/awtk/blob/2c49e854a78749d9092907c027a7fba9062be549/3rd/mbedtls/scripts/assemble_changelog.py#L244-L254 |
||
ycm-core/ycmd | fc0fb7e5e15176cc5a2a30c80956335988c6b59a | ycmd/completers/cs/cs_completer.py | python | CsharpCompleter.ServerIsReady | ( self ) | return self._CheckAllRunning( lambda i: i.ServerIsReady() ) | Check if our OmniSharp server is ready (loaded solution file). | Check if our OmniSharp server is ready (loaded solution file). | [
"Check",
"if",
"our",
"OmniSharp",
"server",
"is",
"ready",
"(",
"loaded",
"solution",
"file",
")",
"."
] | def ServerIsReady( self ):
""" Check if our OmniSharp server is ready (loaded solution file)."""
return self._CheckAllRunning( lambda i: i.ServerIsReady() ) | [
"def",
"ServerIsReady",
"(",
"self",
")",
":",
"return",
"self",
".",
"_CheckAllRunning",
"(",
"lambda",
"i",
":",
"i",
".",
"ServerIsReady",
"(",
")",
")"
] | https://github.com/ycm-core/ycmd/blob/fc0fb7e5e15176cc5a2a30c80956335988c6b59a/ycmd/completers/cs/cs_completer.py#L367-L369 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/pandas/py2/pandas/io/pytables.py | python | Table.read_coordinates | (self, where=None, start=None, stop=None, **kwargs) | return Index(coords) | select coordinates (row numbers) from a table; return the
coordinates object | select coordinates (row numbers) from a table; return the
coordinates object | [
"select",
"coordinates",
"(",
"row",
"numbers",
")",
"from",
"a",
"table",
";",
"return",
"the",
"coordinates",
"object"
] | def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords) | [
"def",
"read_coordinates",
"(",
"self",
",",
"where",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# validate the version",
"self",
".",
"validate_version",
"(",
"where",
")",
"# infer the data kind",
"if",
"not",
"self",
".",
"infer_axes",
"(",
")",
":",
"return",
"False",
"# create the selection",
"self",
".",
"selection",
"=",
"Selection",
"(",
"self",
",",
"where",
"=",
"where",
",",
"start",
"=",
"start",
",",
"stop",
"=",
"stop",
",",
"*",
"*",
"kwargs",
")",
"coords",
"=",
"self",
".",
"selection",
".",
"select_coords",
"(",
")",
"if",
"self",
".",
"selection",
".",
"filter",
"is",
"not",
"None",
":",
"for",
"field",
",",
"op",
",",
"filt",
"in",
"self",
".",
"selection",
".",
"filter",
".",
"format",
"(",
")",
":",
"data",
"=",
"self",
".",
"read_column",
"(",
"field",
",",
"start",
"=",
"coords",
".",
"min",
"(",
")",
",",
"stop",
"=",
"coords",
".",
"max",
"(",
")",
"+",
"1",
")",
"coords",
"=",
"coords",
"[",
"op",
"(",
"data",
".",
"iloc",
"[",
"coords",
"-",
"coords",
".",
"min",
"(",
")",
"]",
",",
"filt",
")",
".",
"values",
"]",
"return",
"Index",
"(",
"coords",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/io/pytables.py#L3791-L3814 |
|
bh107/bohrium | 5b83e7117285fefc7779ed0e9acb0f8e74c7e068 | bridge/py_api/bohrium_api/messaging.py | python | gpu_disable | () | return msg("GPU: disable") | Disable the GPU backend in the current runtime stack | Disable the GPU backend in the current runtime stack | [
"Disable",
"the",
"GPU",
"backend",
"in",
"the",
"current",
"runtime",
"stack"
] | def gpu_disable():
"""Disable the GPU backend in the current runtime stack"""
return msg("GPU: disable") | [
"def",
"gpu_disable",
"(",
")",
":",
"return",
"msg",
"(",
"\"GPU: disable\"",
")"
] | https://github.com/bh107/bohrium/blob/5b83e7117285fefc7779ed0e9acb0f8e74c7e068/bridge/py_api/bohrium_api/messaging.py#L19-L21 |
|
tensorflow/io | 92b44e180674a8af0e12e405530f7343e3e693e4 | tensorflow_io/python/experimental/serialization_ops.py | python | process_entry | (data, name) | return process_primitive(data["type"], name) | process_entry | process_entry | [
"process_entry"
] | def process_entry(data, name):
"""process_entry"""
if data["type"] == "record":
return process_record(data, name)
if data["type"] == "enum":
assert False
if data["type"] == "array":
assert False
if data["type"] == "map":
assert False
if data["type"] == "fixed":
assert False
if isinstance(data["type"], list):
return process_union(data, name)
return process_primitive(data["type"], name) | [
"def",
"process_entry",
"(",
"data",
",",
"name",
")",
":",
"if",
"data",
"[",
"\"type\"",
"]",
"==",
"\"record\"",
":",
"return",
"process_record",
"(",
"data",
",",
"name",
")",
"if",
"data",
"[",
"\"type\"",
"]",
"==",
"\"enum\"",
":",
"assert",
"False",
"if",
"data",
"[",
"\"type\"",
"]",
"==",
"\"array\"",
":",
"assert",
"False",
"if",
"data",
"[",
"\"type\"",
"]",
"==",
"\"map\"",
":",
"assert",
"False",
"if",
"data",
"[",
"\"type\"",
"]",
"==",
"\"fixed\"",
":",
"assert",
"False",
"if",
"isinstance",
"(",
"data",
"[",
"\"type\"",
"]",
",",
"list",
")",
":",
"return",
"process_union",
"(",
"data",
",",
"name",
")",
"return",
"process_primitive",
"(",
"data",
"[",
"\"type\"",
"]",
",",
"name",
")"
] | https://github.com/tensorflow/io/blob/92b44e180674a8af0e12e405530f7343e3e693e4/tensorflow_io/python/experimental/serialization_ops.py#L111-L125 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/core/_internal.py | python | _ctypes.strides | (self) | return self.strides_as(_getintp_ctype()) | (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array. | (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array. | [
"(",
"c_intp",
"*",
"self",
".",
"ndim",
")",
":",
"A",
"ctypes",
"array",
"of",
"length",
"self",
".",
"ndim",
"where",
"the",
"basetype",
"is",
"the",
"same",
"as",
"for",
"the",
"shape",
"attribute",
".",
"This",
"ctypes",
"array",
"contains",
"the",
"strides",
"information",
"from",
"the",
"underlying",
"array",
".",
"This",
"strides",
"information",
"is",
"important",
"for",
"showing",
"how",
"many",
"bytes",
"must",
"be",
"jumped",
"to",
"get",
"to",
"the",
"next",
"element",
"in",
"the",
"array",
"."
] | def strides(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
"""
return self.strides_as(_getintp_ctype()) | [
"def",
"strides",
"(",
"self",
")",
":",
"return",
"self",
".",
"strides_as",
"(",
"_getintp_ctype",
"(",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/core/_internal.py#L336-L344 |
|
OKCoin/websocket | 50c806cf1e9a84984c6cf2efd94a937738cfc35c | python/websocket/_core.py | python | WebSocket.getsubprotocol | (self) | get subprotocol | get subprotocol | [
"get",
"subprotocol"
] | def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None | [
"def",
"getsubprotocol",
"(",
"self",
")",
":",
"if",
"self",
".",
"handshake_response",
":",
"return",
"self",
".",
"handshake_response",
".",
"subprotocol",
"else",
":",
"return",
"None"
] | https://github.com/OKCoin/websocket/blob/50c806cf1e9a84984c6cf2efd94a937738cfc35c/python/websocket/_core.py#L203-L210 |
||
scribusproject/scribus | 41ec7c775a060912cf251682a8b1437f753f80f4 | codegen/cheetah/Cheetah/Templates/_SkeletonPage.py | python | _SkeletonPage.formHTMLTag | (self, tagName, attributes={}) | return ''.join(tagTxt) | returns a string containing an HTML <tag> | returns a string containing an HTML <tag> | [
"returns",
"a",
"string",
"containing",
"an",
"HTML",
"<tag",
">"
] | def formHTMLTag(self, tagName, attributes={}):
"""returns a string containing an HTML <tag> """
tagTxt = ['<', tagName.lower()]
for name, val in attributes.items():
tagTxt += [' ', name.lower(), '="', str(val), '"']
tagTxt.append('>')
return ''.join(tagTxt) | [
"def",
"formHTMLTag",
"(",
"self",
",",
"tagName",
",",
"attributes",
"=",
"{",
"}",
")",
":",
"tagTxt",
"=",
"[",
"'<'",
",",
"tagName",
".",
"lower",
"(",
")",
"]",
"for",
"name",
",",
"val",
"in",
"attributes",
".",
"items",
"(",
")",
":",
"tagTxt",
"+=",
"[",
"' '",
",",
"name",
".",
"lower",
"(",
")",
",",
"'=\"'",
",",
"str",
"(",
"val",
")",
",",
"'\"'",
"]",
"tagTxt",
".",
"append",
"(",
"'>'",
")",
"return",
"''",
".",
"join",
"(",
"tagTxt",
")"
] | https://github.com/scribusproject/scribus/blob/41ec7c775a060912cf251682a8b1437f753f80f4/codegen/cheetah/Cheetah/Templates/_SkeletonPage.py#L194-L200 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/dataview.py | python | PreDataViewListCtrl | (*args, **kwargs) | return val | PreDataViewListCtrl() -> DataViewListCtrl | PreDataViewListCtrl() -> DataViewListCtrl | [
"PreDataViewListCtrl",
"()",
"-",
">",
"DataViewListCtrl"
] | def PreDataViewListCtrl(*args, **kwargs):
"""PreDataViewListCtrl() -> DataViewListCtrl"""
val = _dataview.new_PreDataViewListCtrl(*args, **kwargs)
return val | [
"def",
"PreDataViewListCtrl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"val",
"=",
"_dataview",
".",
"new_PreDataViewListCtrl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"val"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/dataview.py#L2211-L2214 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/AWSPythonSDK/1.5.8/botocore/paginate.py | python | PageIterator._convert_deprecated_starting_token | (self, deprecated_token) | return dict(zip(self._input_token, deprecated_token)) | This attempts to convert a deprecated starting token into the new
style. | This attempts to convert a deprecated starting token into the new
style. | [
"This",
"attempts",
"to",
"convert",
"a",
"deprecated",
"starting",
"token",
"into",
"the",
"new",
"style",
"."
] | def _convert_deprecated_starting_token(self, deprecated_token):
"""
This attempts to convert a deprecated starting token into the new
style.
"""
len_deprecated_token = len(deprecated_token)
len_input_token = len(self._input_token)
if len_deprecated_token > len_input_token:
raise ValueError("Bad starting token: %s" % self._starting_token)
elif len_deprecated_token < len_input_token:
log.debug("Old format starting token does not contain all input "
"tokens. Setting the rest, in order, as None.")
for i in range(len_input_token - len_deprecated_token):
deprecated_token.append(None)
return dict(zip(self._input_token, deprecated_token)) | [
"def",
"_convert_deprecated_starting_token",
"(",
"self",
",",
"deprecated_token",
")",
":",
"len_deprecated_token",
"=",
"len",
"(",
"deprecated_token",
")",
"len_input_token",
"=",
"len",
"(",
"self",
".",
"_input_token",
")",
"if",
"len_deprecated_token",
">",
"len_input_token",
":",
"raise",
"ValueError",
"(",
"\"Bad starting token: %s\"",
"%",
"self",
".",
"_starting_token",
")",
"elif",
"len_deprecated_token",
"<",
"len_input_token",
":",
"log",
".",
"debug",
"(",
"\"Old format starting token does not contain all input \"",
"\"tokens. Setting the rest, in order, as None.\"",
")",
"for",
"i",
"in",
"range",
"(",
"len_input_token",
"-",
"len_deprecated_token",
")",
":",
"deprecated_token",
".",
"append",
"(",
"None",
")",
"return",
"dict",
"(",
"zip",
"(",
"self",
".",
"_input_token",
",",
"deprecated_token",
")",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/paginate.py#L534-L548 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/generic.py | python | NDFrame.tshift | (
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) | return self._constructor(new_data).__finalize__(self) | Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown | Shift the time index, using the index's frequency if available. | [
"Shift",
"the",
"time",
"index",
"using",
"the",
"index",
"s",
"frequency",
"if",
"available",
"."
] | def tshift(
self: FrameOrSeries, periods: int = 1, freq=None, axis=0
) -> FrameOrSeries:
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative.
freq : DateOffset, timedelta, or str, default None
Increment to use from the tseries module
or time rule expressed as a string (e.g. 'EOM').
axis : {0 or ‘index’, 1 or ‘columns’, None}, default 0
Corresponds to the axis that contains the Index.
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
elif orig_freq is not None:
msg = (
f"Given freq {freq.rule_code} does not match"
f" PeriodIndex freq {orig_freq.rule_code}"
)
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self) | [
"def",
"tshift",
"(",
"self",
":",
"FrameOrSeries",
",",
"periods",
":",
"int",
"=",
"1",
",",
"freq",
"=",
"None",
",",
"axis",
"=",
"0",
")",
"->",
"FrameOrSeries",
":",
"index",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"getattr",
"(",
"index",
",",
"\"freq\"",
",",
"None",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"getattr",
"(",
"index",
",",
"\"inferred_freq\"",
",",
"None",
")",
"if",
"freq",
"is",
"None",
":",
"msg",
"=",
"\"Freq was not given and was not set in the index\"",
"raise",
"ValueError",
"(",
"msg",
")",
"if",
"periods",
"==",
"0",
":",
"return",
"self",
"if",
"isinstance",
"(",
"freq",
",",
"str",
")",
":",
"freq",
"=",
"to_offset",
"(",
"freq",
")",
"block_axis",
"=",
"self",
".",
"_get_block_manager_axis",
"(",
"axis",
")",
"if",
"isinstance",
"(",
"index",
",",
"PeriodIndex",
")",
":",
"orig_freq",
"=",
"to_offset",
"(",
"index",
".",
"freq",
")",
"if",
"freq",
"==",
"orig_freq",
":",
"new_data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"new_data",
".",
"axes",
"[",
"block_axis",
"]",
"=",
"index",
".",
"shift",
"(",
"periods",
")",
"elif",
"orig_freq",
"is",
"not",
"None",
":",
"msg",
"=",
"(",
"f\"Given freq {freq.rule_code} does not match\"",
"f\" PeriodIndex freq {orig_freq.rule_code}\"",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"new_data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"new_data",
".",
"axes",
"[",
"block_axis",
"]",
"=",
"index",
".",
"shift",
"(",
"periods",
",",
"freq",
")",
"return",
"self",
".",
"_constructor",
"(",
"new_data",
")",
".",
"__finalize__",
"(",
"self",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/generic.py#L9088-L9148 |
|
trailofbits/llvm-sanitizer-tutorial | d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99 | llvm/tools/clang/bindings/python/clang/cindex.py | python | Type.get_declaration | (self) | return conf.lib.clang_getTypeDeclaration(self) | Return the cursor for the declaration of the given type. | Return the cursor for the declaration of the given type. | [
"Return",
"the",
"cursor",
"for",
"the",
"declaration",
"of",
"the",
"given",
"type",
"."
] | def get_declaration(self):
"""
Return the cursor for the declaration of the given type.
"""
return conf.lib.clang_getTypeDeclaration(self) | [
"def",
"get_declaration",
"(",
"self",
")",
":",
"return",
"conf",
".",
"lib",
".",
"clang_getTypeDeclaration",
"(",
"self",
")"
] | https://github.com/trailofbits/llvm-sanitizer-tutorial/blob/d29dfeec7f51fbf234fd0080f28f2b30cd0b6e99/llvm/tools/clang/bindings/python/clang/cindex.py#L2336-L2340 |
|
H-uru/Plasma | c2140ea046e82e9c199e257a7f2e7edb42602871 | Scripts/Python/islmPodMap.py | python | islmPodMap.__del__ | (self) | unload the dialog that we loaded | unload the dialog that we loaded | [
"unload",
"the",
"dialog",
"that",
"we",
"loaded"
] | def __del__(self):
"unload the dialog that we loaded"
PtUnloadDialog(Vignette.value) | [
"def",
"__del__",
"(",
"self",
")",
":",
"PtUnloadDialog",
"(",
"Vignette",
".",
"value",
")"
] | https://github.com/H-uru/Plasma/blob/c2140ea046e82e9c199e257a7f2e7edb42602871/Scripts/Python/islmPodMap.py#L94-L96 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/stc.py | python | StyledTextCtrl.ScrollToColumn | (*args, **kwargs) | return _stc.StyledTextCtrl_ScrollToColumn(*args, **kwargs) | ScrollToColumn(self, int column)
Scroll enough to make the given column visible | ScrollToColumn(self, int column) | [
"ScrollToColumn",
"(",
"self",
"int",
"column",
")"
] | def ScrollToColumn(*args, **kwargs):
"""
ScrollToColumn(self, int column)
Scroll enough to make the given column visible
"""
return _stc.StyledTextCtrl_ScrollToColumn(*args, **kwargs) | [
"def",
"ScrollToColumn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_stc",
".",
"StyledTextCtrl_ScrollToColumn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/stc.py#L6613-L6619 |
|
Kitware/ParaView | f760af9124ff4634b23ebbeab95a4f56e0261955 | ThirdParty/cinema/paraview/tpl/cinema_python/adaptors/paraview/cinemareader.py | python | layer2img | (layer) | return img | converts a layer to vtkImageData. | converts a layer to vtkImageData. | [
"converts",
"a",
"layer",
"to",
"vtkImageData",
"."
] | def layer2img(layer):
"""converts a layer to vtkImageData."""
if not layer:
return None
img = vtk.vtkImageData()
dims = [0, 0, 0]
if layer.hasValueArray():
nvalues = layer.getValueArray()
# now, the numpy array's shape matches the 2D image
dims[0] = nvalues.shape[1]
dims[1] = nvalues.shape[0]
img.SetDimensions(dims[0], dims[1], 1)
nvalues = nvalues.reshape(dims[0]*dims[1])
vvalues = dsa.numpyTovtkDataArray(nvalues, "Values")
img.GetPointData().SetScalars(vvalues)
elif layer.hasColorArray():
ncolors = layer.getColorArray()
# now, the numpy array's shape matches the 2D image
dims[0] = ncolors.shape[1]
dims[1] = ncolors.shape[0]
img.SetDimensions(dims[0], dims[1], 1)
ncolors = ncolors.reshape((dims[0]*dims[1], -1))
vcolors = dsa.numpyTovtkDataArray(ncolors, "Colors")
img.GetPointData().SetScalars(vcolors)
ndepth = layer.getDepth()
if ndepth is None:
raise RuntimeError("Missing 'depth'")
ndepth = ndepth.reshape(dims[0]*dims[1])
vdepth = dsa.numpyTovtkDataArray(ndepth, "Depth")
img.GetPointData().AddArray(vdepth)
nluminance = layer.getLuminance()
if nluminance is not None:
nluminance = nluminance.reshape((dims[0]*dims[1], -1))
vluminance = dsa.numpyTovtkDataArray(nluminance, "Luminance")
img.GetPointData().AddArray(vluminance)
# from paraview.vtk.vtkIOLegacy import vtkDataSetWriter
# writer = vtkDataSetWriter()
# writer.SetInputDataObject(img)
# writer.SetFileName("/tmp/layer.vtk")
# writer.Update()
# del writer
return img | [
"def",
"layer2img",
"(",
"layer",
")",
":",
"if",
"not",
"layer",
":",
"return",
"None",
"img",
"=",
"vtk",
".",
"vtkImageData",
"(",
")",
"dims",
"=",
"[",
"0",
",",
"0",
",",
"0",
"]",
"if",
"layer",
".",
"hasValueArray",
"(",
")",
":",
"nvalues",
"=",
"layer",
".",
"getValueArray",
"(",
")",
"# now, the numpy array's shape matches the 2D image",
"dims",
"[",
"0",
"]",
"=",
"nvalues",
".",
"shape",
"[",
"1",
"]",
"dims",
"[",
"1",
"]",
"=",
"nvalues",
".",
"shape",
"[",
"0",
"]",
"img",
".",
"SetDimensions",
"(",
"dims",
"[",
"0",
"]",
",",
"dims",
"[",
"1",
"]",
",",
"1",
")",
"nvalues",
"=",
"nvalues",
".",
"reshape",
"(",
"dims",
"[",
"0",
"]",
"*",
"dims",
"[",
"1",
"]",
")",
"vvalues",
"=",
"dsa",
".",
"numpyTovtkDataArray",
"(",
"nvalues",
",",
"\"Values\"",
")",
"img",
".",
"GetPointData",
"(",
")",
".",
"SetScalars",
"(",
"vvalues",
")",
"elif",
"layer",
".",
"hasColorArray",
"(",
")",
":",
"ncolors",
"=",
"layer",
".",
"getColorArray",
"(",
")",
"# now, the numpy array's shape matches the 2D image",
"dims",
"[",
"0",
"]",
"=",
"ncolors",
".",
"shape",
"[",
"1",
"]",
"dims",
"[",
"1",
"]",
"=",
"ncolors",
".",
"shape",
"[",
"0",
"]",
"img",
".",
"SetDimensions",
"(",
"dims",
"[",
"0",
"]",
",",
"dims",
"[",
"1",
"]",
",",
"1",
")",
"ncolors",
"=",
"ncolors",
".",
"reshape",
"(",
"(",
"dims",
"[",
"0",
"]",
"*",
"dims",
"[",
"1",
"]",
",",
"-",
"1",
")",
")",
"vcolors",
"=",
"dsa",
".",
"numpyTovtkDataArray",
"(",
"ncolors",
",",
"\"Colors\"",
")",
"img",
".",
"GetPointData",
"(",
")",
".",
"SetScalars",
"(",
"vcolors",
")",
"ndepth",
"=",
"layer",
".",
"getDepth",
"(",
")",
"if",
"ndepth",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"\"Missing 'depth'\"",
")",
"ndepth",
"=",
"ndepth",
".",
"reshape",
"(",
"dims",
"[",
"0",
"]",
"*",
"dims",
"[",
"1",
"]",
")",
"vdepth",
"=",
"dsa",
".",
"numpyTovtkDataArray",
"(",
"ndepth",
",",
"\"Depth\"",
")",
"img",
".",
"GetPointData",
"(",
")",
".",
"AddArray",
"(",
"vdepth",
")",
"nluminance",
"=",
"layer",
".",
"getLuminance",
"(",
")",
"if",
"nluminance",
"is",
"not",
"None",
":",
"nluminance",
"=",
"nluminance",
".",
"reshape",
"(",
"(",
"dims",
"[",
"0",
"]",
"*",
"dims",
"[",
"1",
"]",
",",
"-",
"1",
")",
")",
"vluminance",
"=",
"dsa",
".",
"numpyTovtkDataArray",
"(",
"nluminance",
",",
"\"Luminance\"",
")",
"img",
".",
"GetPointData",
"(",
")",
".",
"AddArray",
"(",
"vluminance",
")",
"# from paraview.vtk.vtkIOLegacy import vtkDataSetWriter",
"# writer = vtkDataSetWriter()",
"# writer.SetInputDataObject(img)",
"# writer.SetFileName(\"/tmp/layer.vtk\")",
"# writer.Update()",
"# del writer",
"return",
"img"
] | https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/ThirdParty/cinema/paraview/tpl/cinema_python/adaptors/paraview/cinemareader.py#L16-L63 |
|
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBBreakpoint.GetHitCount | (self) | return _lldb.SBBreakpoint_GetHitCount(self) | GetHitCount(self) -> uint32_t | GetHitCount(self) -> uint32_t | [
"GetHitCount",
"(",
"self",
")",
"-",
">",
"uint32_t"
] | def GetHitCount(self):
"""GetHitCount(self) -> uint32_t"""
return _lldb.SBBreakpoint_GetHitCount(self) | [
"def",
"GetHitCount",
"(",
"self",
")",
":",
"return",
"_lldb",
".",
"SBBreakpoint_GetHitCount",
"(",
"self",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L1494-L1496 |
|
intel/caffe | 3f494b442ee3f9d17a07b09ecbd5fa2bbda00836 | scripts/cpp_lint.py | python | ProcessFileData | (filename, file_extension, lines, error,
extra_check_functions=[]) | Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error | Performs lint checks and reports any errors to the given error function. | [
"Performs",
"lint",
"checks",
"and",
"reports",
"any",
"errors",
"to",
"the",
"given",
"error",
"function",
"."
] | def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error) | [
"def",
"ProcessFileData",
"(",
"filename",
",",
"file_extension",
",",
"lines",
",",
"error",
",",
"extra_check_functions",
"=",
"[",
"]",
")",
":",
"lines",
"=",
"(",
"[",
"'// marker so line numbers and indices both start at 1'",
"]",
"+",
"lines",
"+",
"[",
"'// marker so line numbers end in a known way'",
"]",
")",
"include_state",
"=",
"_IncludeState",
"(",
")",
"function_state",
"=",
"_FunctionState",
"(",
")",
"nesting_state",
"=",
"_NestingState",
"(",
")",
"ResetNolintSuppressions",
"(",
")",
"CheckForCopyright",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"if",
"file_extension",
"==",
"'h'",
":",
"CheckForHeaderGuard",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"RemoveMultiLineComments",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"clean_lines",
"=",
"CleansedLines",
"(",
"lines",
")",
"for",
"line",
"in",
"xrange",
"(",
"clean_lines",
".",
"NumLines",
"(",
")",
")",
":",
"ProcessLine",
"(",
"filename",
",",
"file_extension",
",",
"clean_lines",
",",
"line",
",",
"include_state",
",",
"function_state",
",",
"nesting_state",
",",
"error",
",",
"extra_check_functions",
")",
"nesting_state",
".",
"CheckCompletedBlocks",
"(",
"filename",
",",
"error",
")",
"CheckForIncludeWhatYouUse",
"(",
"filename",
",",
"clean_lines",
",",
"include_state",
",",
"error",
")",
"# We check here rather than inside ProcessLine so that we see raw",
"# lines rather than \"cleaned\" lines.",
"CheckForBadCharacters",
"(",
"filename",
",",
"lines",
",",
"error",
")",
"CheckForNewlineAtEOF",
"(",
"filename",
",",
"lines",
",",
"error",
")"
] | https://github.com/intel/caffe/blob/3f494b442ee3f9d17a07b09ecbd5fa2bbda00836/scripts/cpp_lint.py#L4648-L4691 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/protobuf/py3/google/protobuf/internal/containers.py | python | RepeatedScalarFieldContainer.__init__ | (self, message_listener, type_checker) | Args:
message_listener: A MessageListener implementation. The
RepeatedScalarFieldContainer will call this object's Modified() method
when it is modified.
type_checker: A type_checkers.ValueChecker instance to run on elements
inserted into this container. | Args: | [
"Args",
":"
] | def __init__(self, message_listener, type_checker):
"""Args:
message_listener: A MessageListener implementation. The
RepeatedScalarFieldContainer will call this object's Modified() method
when it is modified.
type_checker: A type_checkers.ValueChecker instance to run on elements
inserted into this container.
"""
super(RepeatedScalarFieldContainer, self).__init__(message_listener)
self._type_checker = type_checker | [
"def",
"__init__",
"(",
"self",
",",
"message_listener",
",",
"type_checker",
")",
":",
"super",
"(",
"RepeatedScalarFieldContainer",
",",
"self",
")",
".",
"__init__",
"(",
"message_listener",
")",
"self",
".",
"_type_checker",
"=",
"type_checker"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py3/google/protobuf/internal/containers.py#L247-L257 |
||
martinmoene/span-lite | 8f7935ff4e502ee023990d356d6578b8293eda74 | script/create-vcpkg.py | python | to_ref | ( version ) | return cfg_ref_prefix + version | Add prefix to version/tag, like v1.2.3 | Add prefix to version/tag, like v1.2.3 | [
"Add",
"prefix",
"to",
"version",
"/",
"tag",
"like",
"v1",
".",
"2",
".",
"3"
] | def to_ref( version ):
"""Add prefix to version/tag, like v1.2.3"""
return cfg_ref_prefix + version | [
"def",
"to_ref",
"(",
"version",
")",
":",
"return",
"cfg_ref_prefix",
"+",
"version"
] | https://github.com/martinmoene/span-lite/blob/8f7935ff4e502ee023990d356d6578b8293eda74/script/create-vcpkg.py#L110-L112 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/jedi/jedi/evaluate/context/iterable.py | python | SequenceLiteralContext.exact_key_items | (self) | Returns a generator of tuples like dict.items(), where the key is
resolved (as a string) and the values are still lazy contexts. | Returns a generator of tuples like dict.items(), where the key is
resolved (as a string) and the values are still lazy contexts. | [
"Returns",
"a",
"generator",
"of",
"tuples",
"like",
"dict",
".",
"items",
"()",
"where",
"the",
"key",
"is",
"resolved",
"(",
"as",
"a",
"string",
")",
"and",
"the",
"values",
"are",
"still",
"lazy",
"contexts",
"."
] | def exact_key_items(self):
"""
Returns a generator of tuples like dict.items(), where the key is
resolved (as a string) and the values are still lazy contexts.
"""
for key_node, value in self._items():
for key in self._defining_context.eval_node(key_node):
if is_string(key):
yield key.get_safe_value(), LazyTreeContext(self._defining_context, value) | [
"def",
"exact_key_items",
"(",
"self",
")",
":",
"for",
"key_node",
",",
"value",
"in",
"self",
".",
"_items",
"(",
")",
":",
"for",
"key",
"in",
"self",
".",
"_defining_context",
".",
"eval_node",
"(",
"key_node",
")",
":",
"if",
"is_string",
"(",
"key",
")",
":",
"yield",
"key",
".",
"get_safe_value",
"(",
")",
",",
"LazyTreeContext",
"(",
"self",
".",
"_defining_context",
",",
"value",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/jedi/jedi/evaluate/context/iterable.py#L388-L396 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/tools/Editra/plugins/PyShell/PyShell/__init__.py | python | PyShell.CreateItem | (self, parent) | return EdPyShellBox(parent) | Returns a PyShell Panel | Returns a PyShell Panel | [
"Returns",
"a",
"PyShell",
"Panel"
] | def CreateItem(self, parent):
"""Returns a PyShell Panel"""
util.Log("[PyShell][info] Creating PyShell instance for Shelf")
return EdPyShellBox(parent) | [
"def",
"CreateItem",
"(",
"self",
",",
"parent",
")",
":",
"util",
".",
"Log",
"(",
"\"[PyShell][info] Creating PyShell instance for Shelf\"",
")",
"return",
"EdPyShellBox",
"(",
"parent",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/plugins/PyShell/PyShell/__init__.py#L50-L53 |
|
LiquidPlayer/LiquidCore | 9405979363f2353ac9a71ad8ab59685dd7f919c9 | deps/boost_1_66_0/libs/metaparse/tools/benchmark/benchmark.py | python | byte_to_gb | (byte) | return byte / (1024.0 * 1024 * 1024) | Convert bytes to GB | Convert bytes to GB | [
"Convert",
"bytes",
"to",
"GB"
] | def byte_to_gb(byte):
"""Convert bytes to GB"""
return byte / (1024.0 * 1024 * 1024) | [
"def",
"byte_to_gb",
"(",
"byte",
")",
":",
"return",
"byte",
"/",
"(",
"1024.0",
"*",
"1024",
"*",
"1024",
")"
] | https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/boost_1_66_0/libs/metaparse/tools/benchmark/benchmark.py#L210-L212 |
|
livecode/livecode | 4606a10ea10b16d5071d0f9f263ccdd7ede8b31d | gyp/pylib/gyp/win_tool.py | python | WinTool.ExecClCompile | (self, project_dir, selected_files) | return subprocess.call(cmd, shell=True, cwd=BASE_DIR) | Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files. | Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files. | [
"Executed",
"by",
"msvs",
"-",
"ninja",
"projects",
"when",
"the",
"ClCompile",
"target",
"is",
"used",
"to",
"build",
"selected",
"C",
"/",
"C",
"++",
"files",
"."
] | def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR) | [
"def",
"ExecClCompile",
"(",
"self",
",",
"project_dir",
",",
"selected_files",
")",
":",
"project_dir",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"project_dir",
",",
"BASE_DIR",
")",
"selected_files",
"=",
"selected_files",
".",
"split",
"(",
"';'",
")",
"ninja_targets",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"project_dir",
",",
"filename",
")",
"+",
"'^^'",
"for",
"filename",
"in",
"selected_files",
"]",
"cmd",
"=",
"[",
"'ninja.exe'",
"]",
"cmd",
".",
"extend",
"(",
"ninja_targets",
")",
"return",
"subprocess",
".",
"call",
"(",
"cmd",
",",
"shell",
"=",
"True",
",",
"cwd",
"=",
"BASE_DIR",
")"
] | https://github.com/livecode/livecode/blob/4606a10ea10b16d5071d0f9f263ccdd7ede8b31d/gyp/pylib/gyp/win_tool.py#L300-L309 |
|
BlzFans/wke | b0fa21158312e40c5fbd84682d643022b6c34a93 | cygwin/lib/python2.6/logging/handlers.py | python | SocketHandler.__init__ | (self, host, port) | Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call. | Initializes the handler with a specific host address and port. | [
"Initializes",
"the",
"handler",
"with",
"a",
"specific",
"host",
"address",
"and",
"port",
"."
] | def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0 | [
"def",
"__init__",
"(",
"self",
",",
"host",
",",
"port",
")",
":",
"logging",
".",
"Handler",
".",
"__init__",
"(",
"self",
")",
"self",
".",
"host",
"=",
"host",
"self",
".",
"port",
"=",
"port",
"self",
".",
"sock",
"=",
"None",
"self",
".",
"closeOnError",
"=",
"0",
"self",
".",
"retryTime",
"=",
"None",
"#",
"# Exponential backoff parameters.",
"#",
"self",
".",
"retryStart",
"=",
"1.0",
"self",
".",
"retryMax",
"=",
"30.0",
"self",
".",
"retryFactor",
"=",
"2.0"
] | https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/logging/handlers.py#L415-L434 |
||
miyosuda/TensorFlowAndroidDemo | 35903e0221aa5f109ea2dbef27f20b52e317f42d | jni-build/jni/include/tensorflow/contrib/layers/python/layers/regularizers.py | python | l1_l2_regularizer | (scale_l1=1.0, scale_l2=1.0, scope=None) | return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope) | Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional op_scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float. | Returns a function that can be used to apply L1 L2 regularizations. | [
"Returns",
"a",
"function",
"that",
"can",
"be",
"used",
"to",
"apply",
"L1",
"L2",
"regularizations",
"."
] | def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional op_scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope) | [
"def",
"l1_l2_regularizer",
"(",
"scale_l1",
"=",
"1.0",
",",
"scale_l2",
"=",
"1.0",
",",
"scope",
"=",
"None",
")",
":",
"scope",
"=",
"scope",
"or",
"'l1_l2_regularizer'",
"return",
"sum_regularizer",
"(",
"[",
"l1_regularizer",
"(",
"scale_l1",
")",
",",
"l2_regularizer",
"(",
"scale_l2",
")",
"]",
",",
"scope",
"=",
"scope",
")"
] | https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/layers/python/layers/regularizers.py#L111-L129 |
|
tensorflow/minigo | 6d89c202cdceaf449aefc3149ab2110d44f1a6a4 | strategies.py | python | MCTSPlayer.suggest_move | (self, position) | return self.pick_move() | Used for playing a single game.
For parallel play, use initialize_move, select_leaf,
incorporate_results, and pick_move | Used for playing a single game. | [
"Used",
"for",
"playing",
"a",
"single",
"game",
"."
] | def suggest_move(self, position):
"""Used for playing a single game.
For parallel play, use initialize_move, select_leaf,
incorporate_results, and pick_move
"""
start = time.time()
if self.timed_match:
while time.time() - start < self.seconds_per_move:
self.tree_search()
else:
current_readouts = self.root.N
while self.root.N < current_readouts + self.num_readouts:
self.tree_search()
if self.verbosity > 0:
dbg("%d: Searched %d times in %.2f seconds\n\n" % (
position.n, self.num_readouts, time.time() - start))
# print some stats on moves considered.
if self.verbosity > 2:
dbg(self.root.describe())
dbg('\n\n')
if self.verbosity > 3:
dbg(self.root.position)
return self.pick_move() | [
"def",
"suggest_move",
"(",
"self",
",",
"position",
")",
":",
"start",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"timed_match",
":",
"while",
"time",
".",
"time",
"(",
")",
"-",
"start",
"<",
"self",
".",
"seconds_per_move",
":",
"self",
".",
"tree_search",
"(",
")",
"else",
":",
"current_readouts",
"=",
"self",
".",
"root",
".",
"N",
"while",
"self",
".",
"root",
".",
"N",
"<",
"current_readouts",
"+",
"self",
".",
"num_readouts",
":",
"self",
".",
"tree_search",
"(",
")",
"if",
"self",
".",
"verbosity",
">",
"0",
":",
"dbg",
"(",
"\"%d: Searched %d times in %.2f seconds\\n\\n\"",
"%",
"(",
"position",
".",
"n",
",",
"self",
".",
"num_readouts",
",",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"# print some stats on moves considered.",
"if",
"self",
".",
"verbosity",
">",
"2",
":",
"dbg",
"(",
"self",
".",
"root",
".",
"describe",
"(",
")",
")",
"dbg",
"(",
"'\\n\\n'",
")",
"if",
"self",
".",
"verbosity",
">",
"3",
":",
"dbg",
"(",
"self",
".",
"root",
".",
"position",
")",
"return",
"self",
".",
"pick_move",
"(",
")"
] | https://github.com/tensorflow/minigo/blob/6d89c202cdceaf449aefc3149ab2110d44f1a6a4/strategies.py#L123-L149 |
|
eclipse/sumo | 7132a9b8b6eea734bdec38479026b4d8c4336d03 | tools/sumolib/net/__init__.py | python | Net.getBBoxXY | (self) | return [(self._ranges[0][0], self._ranges[1][0]),
(self._ranges[0][1], self._ranges[1][1])] | Get the bounding box (bottom left and top right coordinates) for a net;
Coordinates are in X and Y (not Lat and Lon)
:return [(bottom_left_X, bottom_left_Y), (top_right_X, top_right_Y)] | Get the bounding box (bottom left and top right coordinates) for a net;
Coordinates are in X and Y (not Lat and Lon) | [
"Get",
"the",
"bounding",
"box",
"(",
"bottom",
"left",
"and",
"top",
"right",
"coordinates",
")",
"for",
"a",
"net",
";",
"Coordinates",
"are",
"in",
"X",
"and",
"Y",
"(",
"not",
"Lat",
"and",
"Lon",
")"
] | def getBBoxXY(self):
"""
Get the bounding box (bottom left and top right coordinates) for a net;
Coordinates are in X and Y (not Lat and Lon)
:return [(bottom_left_X, bottom_left_Y), (top_right_X, top_right_Y)]
"""
return [(self._ranges[0][0], self._ranges[1][0]),
(self._ranges[0][1], self._ranges[1][1])] | [
"def",
"getBBoxXY",
"(",
"self",
")",
":",
"return",
"[",
"(",
"self",
".",
"_ranges",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"self",
".",
"_ranges",
"[",
"1",
"]",
"[",
"0",
"]",
")",
",",
"(",
"self",
".",
"_ranges",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"self",
".",
"_ranges",
"[",
"1",
"]",
"[",
"1",
"]",
")",
"]"
] | https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/sumolib/net/__init__.py#L432-L440 |
|
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/command/config.py | python | config.search_cpp | (self, pattern, body=None, headers=None, include_dirs=None,
lang="c") | return match | Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default. | Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default. | [
"Construct",
"a",
"source",
"file",
"(",
"just",
"like",
"try_cpp",
"()",
")",
"run",
"it",
"through",
"the",
"preprocessor",
"and",
"return",
"true",
"if",
"any",
"line",
"of",
"the",
"output",
"matches",
"pattern",
".",
"pattern",
"should",
"either",
"be",
"a",
"compiled",
"regex",
"object",
"or",
"a",
"string",
"containing",
"a",
"regex",
".",
"If",
"both",
"body",
"and",
"headers",
"are",
"None",
"preprocesses",
"an",
"empty",
"file",
"--",
"which",
"can",
"be",
"useful",
"to",
"determine",
"the",
"symbols",
"the",
"preprocessor",
"and",
"compiler",
"set",
"by",
"default",
"."
] | def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
file = open(out)
match = 0
while 1:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = 1
break
file.close()
self._clean()
return match | [
"def",
"search_cpp",
"(",
"self",
",",
"pattern",
",",
"body",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"include_dirs",
"=",
"None",
",",
"lang",
"=",
"\"c\"",
")",
":",
"self",
".",
"_check_compiler",
"(",
")",
"src",
",",
"out",
"=",
"self",
".",
"_preprocess",
"(",
"body",
",",
"headers",
",",
"include_dirs",
",",
"lang",
")",
"if",
"isinstance",
"(",
"pattern",
",",
"str",
")",
":",
"pattern",
"=",
"re",
".",
"compile",
"(",
"pattern",
")",
"file",
"=",
"open",
"(",
"out",
")",
"match",
"=",
"0",
"while",
"1",
":",
"line",
"=",
"file",
".",
"readline",
"(",
")",
"if",
"line",
"==",
"''",
":",
"break",
"if",
"pattern",
".",
"search",
"(",
"line",
")",
":",
"match",
"=",
"1",
"break",
"file",
".",
"close",
"(",
")",
"self",
".",
"_clean",
"(",
")",
"return",
"match"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/distutils/command/config.py#L196-L223 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/customtreectrl.py | python | CustomTreeCtrl.FillArray | (self, item, array=[]) | return array | Internal function. Used to populate an array of selected items when
the style ``TR_MULTIPLE`` is used.
:param `item`: an instance of :class:`GenericTreeItem`;
:param list `array`: a Python list containing the selected items.
:return: A Python list containing the selected items. | Internal function. Used to populate an array of selected items when
the style ``TR_MULTIPLE`` is used. | [
"Internal",
"function",
".",
"Used",
"to",
"populate",
"an",
"array",
"of",
"selected",
"items",
"when",
"the",
"style",
"TR_MULTIPLE",
"is",
"used",
"."
] | def FillArray(self, item, array=[]):
"""
Internal function. Used to populate an array of selected items when
the style ``TR_MULTIPLE`` is used.
:param `item`: an instance of :class:`GenericTreeItem`;
:param list `array`: a Python list containing the selected items.
:return: A Python list containing the selected items.
"""
if not array:
array = []
if item.IsSelected():
array.append(item)
if item.HasChildren() and item.IsExpanded():
for child in item.GetChildren():
array = self.FillArray(child, array)
return array | [
"def",
"FillArray",
"(",
"self",
",",
"item",
",",
"array",
"=",
"[",
"]",
")",
":",
"if",
"not",
"array",
":",
"array",
"=",
"[",
"]",
"if",
"item",
".",
"IsSelected",
"(",
")",
":",
"array",
".",
"append",
"(",
"item",
")",
"if",
"item",
".",
"HasChildren",
"(",
")",
"and",
"item",
".",
"IsExpanded",
"(",
")",
":",
"for",
"child",
"in",
"item",
".",
"GetChildren",
"(",
")",
":",
"array",
"=",
"self",
".",
"FillArray",
"(",
"child",
",",
"array",
")",
"return",
"array"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L5768-L5789 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_core.py | python | MouseState.SetMiddleDown | (*args, **kwargs) | return _core_.MouseState_SetMiddleDown(*args, **kwargs) | SetMiddleDown(self, bool down) | SetMiddleDown(self, bool down) | [
"SetMiddleDown",
"(",
"self",
"bool",
"down",
")"
] | def SetMiddleDown(*args, **kwargs):
"""SetMiddleDown(self, bool down)"""
return _core_.MouseState_SetMiddleDown(*args, **kwargs) | [
"def",
"SetMiddleDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"MouseState_SetMiddleDown",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L4498-L4500 |
|
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Environment.py | python | SubstitutionEnvironment.subst | (self, string, raw=0, target=None, source=None, conv=None, executor=None) | return SCons.Subst.scons_subst(string, self, raw, target, source, gvars, lvars, conv) | Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters. | Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters. | [
"Recursively",
"interpolates",
"construction",
"variables",
"from",
"the",
"Environment",
"into",
"the",
"specified",
"string",
"returning",
"the",
"expanded",
"result",
".",
"Construction",
"variables",
"are",
"specified",
"by",
"a",
"$",
"prefix",
"in",
"the",
"string",
"and",
"begin",
"with",
"an",
"initial",
"underscore",
"or",
"alphabetic",
"character",
"followed",
"by",
"any",
"number",
"of",
"underscores",
"or",
"alphanumeric",
"characters",
".",
"The",
"construction",
"variable",
"names",
"may",
"be",
"surrounded",
"by",
"curly",
"braces",
"to",
"separate",
"the",
"name",
"from",
"trailing",
"characters",
"."
] | def subst(self, string, raw=0, target=None, source=None, conv=None, executor=None):
"""Recursively interpolates construction variables from the
Environment into the specified string, returning the expanded
result. Construction variables are specified by a $ prefix
in the string and begin with an initial underscore or
alphabetic character followed by any number of underscores
or alphanumeric characters. The construction variable names
may be surrounded by curly braces to separate the name from
trailing characters.
"""
gvars = self.gvars()
lvars = self.lvars()
lvars['__env__'] = self
if executor:
lvars.update(executor.get_lvars())
return SCons.Subst.scons_subst(string, self, raw, target, source, gvars, lvars, conv) | [
"def",
"subst",
"(",
"self",
",",
"string",
",",
"raw",
"=",
"0",
",",
"target",
"=",
"None",
",",
"source",
"=",
"None",
",",
"conv",
"=",
"None",
",",
"executor",
"=",
"None",
")",
":",
"gvars",
"=",
"self",
".",
"gvars",
"(",
")",
"lvars",
"=",
"self",
".",
"lvars",
"(",
")",
"lvars",
"[",
"'__env__'",
"]",
"=",
"self",
"if",
"executor",
":",
"lvars",
".",
"update",
"(",
"executor",
".",
"get_lvars",
"(",
")",
")",
"return",
"SCons",
".",
"Subst",
".",
"scons_subst",
"(",
"string",
",",
"self",
",",
"raw",
",",
"target",
",",
"source",
",",
"gvars",
",",
"lvars",
",",
"conv",
")"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Environment.py#L499-L514 |
|
google/fhir | d77f57706c1a168529b0b87ca7ccb1c0113e83c2 | py/google/fhir/utils/fhir_types.py | python | is_type_or_profile_of_patient | (
message_or_descriptor: annotation_utils.MessageOrDescriptorBase) | return is_type_or_profile_of(_PATIENT_STRUCTURE_DEFINITION_URL,
message_or_descriptor) | Returns True if message_or_descriptor is type or a profile of Patient. | Returns True if message_or_descriptor is type or a profile of Patient. | [
"Returns",
"True",
"if",
"message_or_descriptor",
"is",
"type",
"or",
"a",
"profile",
"of",
"Patient",
"."
] | def is_type_or_profile_of_patient(
message_or_descriptor: annotation_utils.MessageOrDescriptorBase) -> bool:
"""Returns True if message_or_descriptor is type or a profile of Patient."""
return is_type_or_profile_of(_PATIENT_STRUCTURE_DEFINITION_URL,
message_or_descriptor) | [
"def",
"is_type_or_profile_of_patient",
"(",
"message_or_descriptor",
":",
"annotation_utils",
".",
"MessageOrDescriptorBase",
")",
"->",
"bool",
":",
"return",
"is_type_or_profile_of",
"(",
"_PATIENT_STRUCTURE_DEFINITION_URL",
",",
"message_or_descriptor",
")"
] | https://github.com/google/fhir/blob/d77f57706c1a168529b0b87ca7ccb1c0113e83c2/py/google/fhir/utils/fhir_types.py#L151-L155 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/python/distribute/sharded_variable.py | python | ShardedVariableMixin.scatter_mul | (self, sparse_delta, use_locking=False, name=None) | return self | Implements tf.Variable.scatter_mul. | Implements tf.Variable.scatter_mul. | [
"Implements",
"tf",
".",
"Variable",
".",
"scatter_mul",
"."
] | def scatter_mul(self, sparse_delta, use_locking=False, name=None):
"""Implements tf.Variable.scatter_mul."""
per_var_sparse_delta = self._decompose_indexed_slices(sparse_delta)
for i, v in enumerate(self._variables):
new_name = None
if name is not None:
new_name = '{}/part_{}'.format(name, i)
v.scatter_mul(per_var_sparse_delta[i], name=new_name)
return self | [
"def",
"scatter_mul",
"(",
"self",
",",
"sparse_delta",
",",
"use_locking",
"=",
"False",
",",
"name",
"=",
"None",
")",
":",
"per_var_sparse_delta",
"=",
"self",
".",
"_decompose_indexed_slices",
"(",
"sparse_delta",
")",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"self",
".",
"_variables",
")",
":",
"new_name",
"=",
"None",
"if",
"name",
"is",
"not",
"None",
":",
"new_name",
"=",
"'{}/part_{}'",
".",
"format",
"(",
"name",
",",
"i",
")",
"v",
".",
"scatter_mul",
"(",
"per_var_sparse_delta",
"[",
"i",
"]",
",",
"name",
"=",
"new_name",
")",
"return",
"self"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/distribute/sharded_variable.py#L647-L655 |
|
herbstluftwm/herbstluftwm | 23ef0274bd4d317208eae5fea72b21478a71431b | doc/gendoc.py | python | TokTreeInfoExtrator.inside_class_definition | (self, toktreelist, classname) | go on parsing inside the body of a class | go on parsing inside the body of a class | [
"go",
"on",
"parsing",
"inside",
"the",
"body",
"of",
"a",
"class"
] | def inside_class_definition(self, toktreelist, classname):
"""
go on parsing inside the body of a class
"""
pub_priv_prot_re = re.compile('public|private|protected')
stream = TokenStream(toktreelist)
attr_cls_re = re.compile('^(Dyn|)Attribute(Proxy|)_$')
attribute_ = TokenStream.PatternArg(re=attr_cls_re)
link_ = TokenStream.PatternArg(re=re.compile('^(Link_|Child_|DynChild_|ChildMember_)$'))
parameters = TokenStream.PatternArg(callback=lambda t: TokenGroup.IsTokenGroup(t, opening_token='('))
def semicolon_or_block_callback(t):
return t == ';' or TokenGroup.IsTokenGroup(t, opening_token='{')
semicolon_or_block = TokenStream.PatternArg(callback=semicolon_or_block_callback)
arg = TokenStream.PatternArg()
while not stream.empty():
if stream.try_match(pub_priv_prot_re, ':'):
continue
if stream.try_match(re.compile(r'^(//|/\*)')):
# skip comments
continue
# whenever we reach this point, this is a new
# member variable or member function definition
elif stream.try_match('using'):
semicolon_found = stream.discard_until(';')
assert semicolon_found, "expected ; after 'using'"
elif stream.try_match(attribute_, '<'):
attr_type = self.stream_pop_class_name(stream)
assert stream.try_match('>'), \
"every 'Attribute_<' has to be closed by '>'"
attr_name = stream.pop("expect an attribute name")
attr = self.objInfo.attribute_info(classname, attr_name)
attr.type = attr_type
attr.attribute_class = attribute_.value
if stream.try_match('='):
# static initialization:
t = stream.pop()
assert TokenGroup.IsTokenGroup(t)
attr.add_constructor_args(t.enclosed_tokens)
if stream.try_match(';'):
# end of attribute definition
pass
else:
# some other definition (e.g. a function)
stream.discard_until(semicolon_or_block)
elif stream.try_match(link_, '<'):
link_type = self.stream_pop_class_name(stream)
stream.assert_match('>', msg="every 'Link_<' has to be enclosed by '>'")
cpp_name = stream.pop("expect an attribute name")
link = self.objInfo.child_info(classname, cpp_name)
link.child_class = link_.value
link.type = link_type
stream.discard_until(semicolon_or_block)
elif stream.try_match('ByName', arg):
link = self.objInfo.child_info(classname, arg.value)
link.child_class = 'ByName'
link.user_name = 'by-name'
link.type = ClassName('ByName')
stream.discard_until(semicolon_or_block)
elif stream.try_match(classname, parameters, ':'):
self.stream_consume_member_initializers(classname, stream)
else:
stream.discard_until(semicolon_or_block) | [
"def",
"inside_class_definition",
"(",
"self",
",",
"toktreelist",
",",
"classname",
")",
":",
"pub_priv_prot_re",
"=",
"re",
".",
"compile",
"(",
"'public|private|protected'",
")",
"stream",
"=",
"TokenStream",
"(",
"toktreelist",
")",
"attr_cls_re",
"=",
"re",
".",
"compile",
"(",
"'^(Dyn|)Attribute(Proxy|)_$'",
")",
"attribute_",
"=",
"TokenStream",
".",
"PatternArg",
"(",
"re",
"=",
"attr_cls_re",
")",
"link_",
"=",
"TokenStream",
".",
"PatternArg",
"(",
"re",
"=",
"re",
".",
"compile",
"(",
"'^(Link_|Child_|DynChild_|ChildMember_)$'",
")",
")",
"parameters",
"=",
"TokenStream",
".",
"PatternArg",
"(",
"callback",
"=",
"lambda",
"t",
":",
"TokenGroup",
".",
"IsTokenGroup",
"(",
"t",
",",
"opening_token",
"=",
"'('",
")",
")",
"def",
"semicolon_or_block_callback",
"(",
"t",
")",
":",
"return",
"t",
"==",
"';'",
"or",
"TokenGroup",
".",
"IsTokenGroup",
"(",
"t",
",",
"opening_token",
"=",
"'{'",
")",
"semicolon_or_block",
"=",
"TokenStream",
".",
"PatternArg",
"(",
"callback",
"=",
"semicolon_or_block_callback",
")",
"arg",
"=",
"TokenStream",
".",
"PatternArg",
"(",
")",
"while",
"not",
"stream",
".",
"empty",
"(",
")",
":",
"if",
"stream",
".",
"try_match",
"(",
"pub_priv_prot_re",
",",
"':'",
")",
":",
"continue",
"if",
"stream",
".",
"try_match",
"(",
"re",
".",
"compile",
"(",
"r'^(//|/\\*)'",
")",
")",
":",
"# skip comments",
"continue",
"# whenever we reach this point, this is a new",
"# member variable or member function definition",
"elif",
"stream",
".",
"try_match",
"(",
"'using'",
")",
":",
"semicolon_found",
"=",
"stream",
".",
"discard_until",
"(",
"';'",
")",
"assert",
"semicolon_found",
",",
"\"expected ; after 'using'\"",
"elif",
"stream",
".",
"try_match",
"(",
"attribute_",
",",
"'<'",
")",
":",
"attr_type",
"=",
"self",
".",
"stream_pop_class_name",
"(",
"stream",
")",
"assert",
"stream",
".",
"try_match",
"(",
"'>'",
")",
",",
"\"every 'Attribute_<' has to be closed by '>'\"",
"attr_name",
"=",
"stream",
".",
"pop",
"(",
"\"expect an attribute name\"",
")",
"attr",
"=",
"self",
".",
"objInfo",
".",
"attribute_info",
"(",
"classname",
",",
"attr_name",
")",
"attr",
".",
"type",
"=",
"attr_type",
"attr",
".",
"attribute_class",
"=",
"attribute_",
".",
"value",
"if",
"stream",
".",
"try_match",
"(",
"'='",
")",
":",
"# static initialization:",
"t",
"=",
"stream",
".",
"pop",
"(",
")",
"assert",
"TokenGroup",
".",
"IsTokenGroup",
"(",
"t",
")",
"attr",
".",
"add_constructor_args",
"(",
"t",
".",
"enclosed_tokens",
")",
"if",
"stream",
".",
"try_match",
"(",
"';'",
")",
":",
"# end of attribute definition",
"pass",
"else",
":",
"# some other definition (e.g. a function)",
"stream",
".",
"discard_until",
"(",
"semicolon_or_block",
")",
"elif",
"stream",
".",
"try_match",
"(",
"link_",
",",
"'<'",
")",
":",
"link_type",
"=",
"self",
".",
"stream_pop_class_name",
"(",
"stream",
")",
"stream",
".",
"assert_match",
"(",
"'>'",
",",
"msg",
"=",
"\"every 'Link_<' has to be enclosed by '>'\"",
")",
"cpp_name",
"=",
"stream",
".",
"pop",
"(",
"\"expect an attribute name\"",
")",
"link",
"=",
"self",
".",
"objInfo",
".",
"child_info",
"(",
"classname",
",",
"cpp_name",
")",
"link",
".",
"child_class",
"=",
"link_",
".",
"value",
"link",
".",
"type",
"=",
"link_type",
"stream",
".",
"discard_until",
"(",
"semicolon_or_block",
")",
"elif",
"stream",
".",
"try_match",
"(",
"'ByName'",
",",
"arg",
")",
":",
"link",
"=",
"self",
".",
"objInfo",
".",
"child_info",
"(",
"classname",
",",
"arg",
".",
"value",
")",
"link",
".",
"child_class",
"=",
"'ByName'",
"link",
".",
"user_name",
"=",
"'by-name'",
"link",
".",
"type",
"=",
"ClassName",
"(",
"'ByName'",
")",
"stream",
".",
"discard_until",
"(",
"semicolon_or_block",
")",
"elif",
"stream",
".",
"try_match",
"(",
"classname",
",",
"parameters",
",",
"':'",
")",
":",
"self",
".",
"stream_consume_member_initializers",
"(",
"classname",
",",
"stream",
")",
"else",
":",
"stream",
".",
"discard_until",
"(",
"semicolon_or_block",
")"
] | https://github.com/herbstluftwm/herbstluftwm/blob/23ef0274bd4d317208eae5fea72b21478a71431b/doc/gendoc.py#L668-L731 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/_gdi.py | python | GraphicsRenderer.CreatePen | (*args, **kwargs) | return _gdi_.GraphicsRenderer_CreatePen(*args, **kwargs) | CreatePen(self, Pen pen) -> GraphicsPen | CreatePen(self, Pen pen) -> GraphicsPen | [
"CreatePen",
"(",
"self",
"Pen",
"pen",
")",
"-",
">",
"GraphicsPen"
] | def CreatePen(*args, **kwargs):
"""CreatePen(self, Pen pen) -> GraphicsPen"""
return _gdi_.GraphicsRenderer_CreatePen(*args, **kwargs) | [
"def",
"CreatePen",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"GraphicsRenderer_CreatePen",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_gdi.py#L6600-L6602 |
|
KhronosGroup/SPIRV-LLVM | 1eb85593f3fe2c39379b9a9b088d51eda4f42b8b | utils/llvm-build/llvmbuild/main.py | python | cmake_quote_path | (value) | return value | cmake_quote_path(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files. | cmake_quote_path(value) -> str | [
"cmake_quote_path",
"(",
"value",
")",
"-",
">",
"str"
] | def cmake_quote_path(value):
"""
cmake_quote_path(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# CMake has a bug in it's Makefile generator that doesn't properly quote
# strings it generates. So instead of using proper quoting, we just use "/"
# style paths. Currently, we only handle escaping backslashes.
value = value.replace("\\", "/")
return value | [
"def",
"cmake_quote_path",
"(",
"value",
")",
":",
"# CMake has a bug in it's Makefile generator that doesn't properly quote",
"# strings it generates. So instead of using proper quoting, we just use \"/\"",
"# style paths. Currently, we only handle escaping backslashes.",
"value",
"=",
"value",
".",
"replace",
"(",
"\"\\\\\"",
",",
"\"/\"",
")",
"return",
"value"
] | https://github.com/KhronosGroup/SPIRV-LLVM/blob/1eb85593f3fe2c39379b9a9b088d51eda4f42b8b/utils/llvm-build/llvmbuild/main.py#L26-L39 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/numpy/math_ops.py | python | _get_strides | (dims, order='C') | return P.Concat(0)(tup) | Generates strides (1-D tensor) according to `dims` (1-D tensor). | Generates strides (1-D tensor) according to `dims` (1-D tensor). | [
"Generates",
"strides",
"(",
"1",
"-",
"D",
"tensor",
")",
"according",
"to",
"dims",
"(",
"1",
"-",
"D",
"tensor",
")",
"."
] | def _get_strides(dims, order='C'):
"""Generates strides (1-D tensor) according to `dims` (1-D tensor)."""
if order not in ['C', 'F']:
_raise_value_error("invalid order. Expected 'C' or 'F'")
tup = (_to_tensor([1]),)
dims = dims[1:][::-1] if order == 'C' else dims[:-1]
for d in dims:
tensor = tup[-1] * d
if tensor.ndim < 1:
tensor = F.expand_dims(tensor, 0)
tup += (tensor,)
tup = tup[::-1] if order == 'C' else tup
return P.Concat(0)(tup) | [
"def",
"_get_strides",
"(",
"dims",
",",
"order",
"=",
"'C'",
")",
":",
"if",
"order",
"not",
"in",
"[",
"'C'",
",",
"'F'",
"]",
":",
"_raise_value_error",
"(",
"\"invalid order. Expected 'C' or 'F'\"",
")",
"tup",
"=",
"(",
"_to_tensor",
"(",
"[",
"1",
"]",
")",
",",
")",
"dims",
"=",
"dims",
"[",
"1",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
"if",
"order",
"==",
"'C'",
"else",
"dims",
"[",
":",
"-",
"1",
"]",
"for",
"d",
"in",
"dims",
":",
"tensor",
"=",
"tup",
"[",
"-",
"1",
"]",
"*",
"d",
"if",
"tensor",
".",
"ndim",
"<",
"1",
":",
"tensor",
"=",
"F",
".",
"expand_dims",
"(",
"tensor",
",",
"0",
")",
"tup",
"+=",
"(",
"tensor",
",",
")",
"tup",
"=",
"tup",
"[",
":",
":",
"-",
"1",
"]",
"if",
"order",
"==",
"'C'",
"else",
"tup",
"return",
"P",
".",
"Concat",
"(",
"0",
")",
"(",
"tup",
")"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/numpy/math_ops.py#L5440-L5452 |
|
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | chrome/tools/build/version.py | python | write_if_changed | (file_name, contents) | Writes the specified contents to the specified file_name
iff the contents are different than the current contents. | Writes the specified contents to the specified file_name
iff the contents are different than the current contents. | [
"Writes",
"the",
"specified",
"contents",
"to",
"the",
"specified",
"file_name",
"iff",
"the",
"contents",
"are",
"different",
"than",
"the",
"current",
"contents",
"."
] | def write_if_changed(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return
os.unlink(file_name)
open(file_name, 'w').write(contents) | [
"def",
"write_if_changed",
"(",
"file_name",
",",
"contents",
")",
":",
"try",
":",
"old_contents",
"=",
"open",
"(",
"file_name",
",",
"'r'",
")",
".",
"read",
"(",
")",
"except",
"EnvironmentError",
":",
"pass",
"else",
":",
"if",
"contents",
"==",
"old_contents",
":",
"return",
"os",
".",
"unlink",
"(",
"file_name",
")",
"open",
"(",
"file_name",
",",
"'w'",
")",
".",
"write",
"(",
"contents",
")"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/chrome/tools/build/version.py#L91-L104 |
||
google/iree | 1224bbdbe65b0d1fdf40e7324f60f68beeaf7c76 | build_tools/benchmarks/post_benchmarks_as_pr_comment.py | python | get_previous_comment_on_pr | (pr_number: str,
verbose: bool = False) | return None | Gets the previous comment's ID from GitHub. | Gets the previous comment's ID from GitHub. | [
"Gets",
"the",
"previous",
"comment",
"s",
"ID",
"from",
"GitHub",
"."
] | def get_previous_comment_on_pr(pr_number: str,
verbose: bool = False) -> Optional[int]:
"""Gets the previous comment's ID from GitHub."""
# Increasing per_page limit requires user authentication.
api_token = get_required_env_var('GITHUB_TOKEN')
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {api_token}",
}
payload = json.dumps({"per_page": 100})
api_endpoint = f"{GITHUB_IREE_API_PREFIX}/issues/{pr_number}/comments"
response = requests.get(api_endpoint, data=payload, headers=headers)
if response.status_code != 200:
raise requests.RequestException(
f"Failed to get PR comments from GitHub; error code: {response.status_code}"
)
response = response.json()
if verbose:
print(f"Previous comment query response: {response}")
# Find the last comment from GITHUB_USER and has the ABBR_PR_COMMENT_TITILE
# keyword.
for comment in reversed(response):
if (comment["user"]["login"] == GITHUB_USER) and (ABBR_PR_COMMENT_TITLE
in comment["body"]):
return comment["id"]
return None | [
"def",
"get_previous_comment_on_pr",
"(",
"pr_number",
":",
"str",
",",
"verbose",
":",
"bool",
"=",
"False",
")",
"->",
"Optional",
"[",
"int",
"]",
":",
"# Increasing per_page limit requires user authentication.",
"api_token",
"=",
"get_required_env_var",
"(",
"'GITHUB_TOKEN'",
")",
"headers",
"=",
"{",
"\"Accept\"",
":",
"\"application/vnd.github.v3+json\"",
",",
"\"Authorization\"",
":",
"f\"token {api_token}\"",
",",
"}",
"payload",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"per_page\"",
":",
"100",
"}",
")",
"api_endpoint",
"=",
"f\"{GITHUB_IREE_API_PREFIX}/issues/{pr_number}/comments\"",
"response",
"=",
"requests",
".",
"get",
"(",
"api_endpoint",
",",
"data",
"=",
"payload",
",",
"headers",
"=",
"headers",
")",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"raise",
"requests",
".",
"RequestException",
"(",
"f\"Failed to get PR comments from GitHub; error code: {response.status_code}\"",
")",
"response",
"=",
"response",
".",
"json",
"(",
")",
"if",
"verbose",
":",
"print",
"(",
"f\"Previous comment query response: {response}\"",
")",
"# Find the last comment from GITHUB_USER and has the ABBR_PR_COMMENT_TITILE",
"# keyword.",
"for",
"comment",
"in",
"reversed",
"(",
"response",
")",
":",
"if",
"(",
"comment",
"[",
"\"user\"",
"]",
"[",
"\"login\"",
"]",
"==",
"GITHUB_USER",
")",
"and",
"(",
"ABBR_PR_COMMENT_TITLE",
"in",
"comment",
"[",
"\"body\"",
"]",
")",
":",
"return",
"comment",
"[",
"\"id\"",
"]",
"return",
"None"
] | https://github.com/google/iree/blob/1224bbdbe65b0d1fdf40e7324f60f68beeaf7c76/build_tools/benchmarks/post_benchmarks_as_pr_comment.py#L222-L250 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/aui/auibar.py | python | AuiDefaultToolBarArt.GetOrientation | (self) | return self._orientation | Returns the toolbar orientation. | Returns the toolbar orientation. | [
"Returns",
"the",
"toolbar",
"orientation",
"."
] | def GetOrientation(self):
""" Returns the toolbar orientation. """
return self._orientation | [
"def",
"GetOrientation",
"(",
"self",
")",
":",
"return",
"self",
".",
"_orientation"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/auibar.py#L883-L886 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py | python | Context.divide | (self, a, b) | Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1') | Decimal division in a specified context. | [
"Decimal",
"division",
"in",
"a",
"specified",
"context",
"."
] | def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r | [
"def",
"divide",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"a",
"=",
"_convert_other",
"(",
"a",
",",
"raiseit",
"=",
"True",
")",
"r",
"=",
"a",
".",
"__truediv__",
"(",
"b",
",",
"context",
"=",
"self",
")",
"if",
"r",
"is",
"NotImplemented",
":",
"raise",
"TypeError",
"(",
"\"Unable to convert %s to Decimal\"",
"%",
"b",
")",
"else",
":",
"return",
"r"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/_pydecimal.py#L4358-L4393 |
||
apache/openoffice | 97289b2620590d8b431bcc408f87252db6203818 | main/pyuno/source/module/uno.py | python | absolutize | ( path, relativeUrl ) | return pyuno.absolutize( path, relativeUrl ) | returns an absolute file url from the given urls | returns an absolute file url from the given urls | [
"returns",
"an",
"absolute",
"file",
"url",
"from",
"the",
"given",
"urls"
] | def absolutize( path, relativeUrl ):
"returns an absolute file url from the given urls"
return pyuno.absolutize( path, relativeUrl ) | [
"def",
"absolutize",
"(",
"path",
",",
"relativeUrl",
")",
":",
"return",
"pyuno",
".",
"absolutize",
"(",
"path",
",",
"relativeUrl",
")"
] | https://github.com/apache/openoffice/blob/97289b2620590d8b431bcc408f87252db6203818/main/pyuno/source/module/uno.py#L94-L96 |
|
y123456yz/reading-and-annotate-mongodb-3.6 | 93280293672ca7586dc24af18132aa61e4ed7fcf | mongo/buildscripts/idl/idl/parser.py | python | _parse_chained_type | (ctxt, name, node) | return chain | Parse a chained type in a struct in the IDL file. | Parse a chained type in a struct in the IDL file. | [
"Parse",
"a",
"chained",
"type",
"in",
"a",
"struct",
"in",
"the",
"IDL",
"file",
"."
] | def _parse_chained_type(ctxt, name, node):
# type: (errors.ParserContext, str, yaml.nodes.MappingNode) -> syntax.ChainedType
"""Parse a chained type in a struct in the IDL file."""
chain = syntax.ChainedType(ctxt.file_name, node.start_mark.line, node.start_mark.column)
chain.name = name
_generic_parser(ctxt, node, "chain", chain, {
"cpp_name": _RuleDesc('scalar'),
})
return chain | [
"def",
"_parse_chained_type",
"(",
"ctxt",
",",
"name",
",",
"node",
")",
":",
"# type: (errors.ParserContext, str, yaml.nodes.MappingNode) -> syntax.ChainedType",
"chain",
"=",
"syntax",
".",
"ChainedType",
"(",
"ctxt",
".",
"file_name",
",",
"node",
".",
"start_mark",
".",
"line",
",",
"node",
".",
"start_mark",
".",
"column",
")",
"chain",
".",
"name",
"=",
"name",
"_generic_parser",
"(",
"ctxt",
",",
"node",
",",
"\"chain\"",
",",
"chain",
",",
"{",
"\"cpp_name\"",
":",
"_RuleDesc",
"(",
"'scalar'",
")",
",",
"}",
")",
"return",
"chain"
] | https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/idl/idl/parser.py#L236-L246 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/lib2to3/refactor.py | python | RefactoringTool.traverse_by | (self, fixers, traversal) | Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None | Traverse an AST, applying a set of fixers to each node. | [
"Traverse",
"an",
"AST",
"applying",
"a",
"set",
"of",
"fixers",
"to",
"each",
"node",
"."
] | def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new | [
"def",
"traverse_by",
"(",
"self",
",",
"fixers",
",",
"traversal",
")",
":",
"if",
"not",
"fixers",
":",
"return",
"for",
"node",
"in",
"traversal",
":",
"for",
"fixer",
"in",
"fixers",
"[",
"node",
".",
"type",
"]",
":",
"results",
"=",
"fixer",
".",
"match",
"(",
"node",
")",
"if",
"results",
":",
"new",
"=",
"fixer",
".",
"transform",
"(",
"node",
",",
"results",
")",
"if",
"new",
"is",
"not",
"None",
":",
"node",
".",
"replace",
"(",
"new",
")",
"node",
"=",
"new"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib2to3/refactor.py#L484-L505 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | catboost/python-package/catboost/core.py | python | CatBoost.fit | (self, X, y=None, cat_features=None, text_features=None, embedding_features=None, pairs=None, sample_weight=None, group_id=None,
group_weight=None, subgroup_id=None, pairs_weight=None, baseline=None, use_best_model=None,
eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,
verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,
save_snapshot=None, snapshot_file=None, snapshot_interval=None, init_model=None, callbacks=None,
log_cout=sys.stdout, log_cerr=sys.stderr) | return self._fit(X, y, cat_features, text_features, embedding_features, pairs, sample_weight, group_id, group_weight, subgroup_id,
pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot,
column_description, verbose_eval, metric_period, silent, early_stopping_rounds,
save_snapshot, snapshot_file, snapshot_interval, init_model, callbacks, log_cout, log_cerr) | Fit the CatBoost model.
Parameters
----------
X : catboost.Pool or list or numpy.ndarray or pandas.DataFrame or pandas.Series
or string.
If not catboost.Pool or catboost.FeaturesData it must be 2 dimensional Feature matrix
or string - file with dataset.
Must be non-empty (contain > 0 objects)
y : list or numpy.ndarray or pandas.DataFrame or pandas.Series, optional (default=None)
Labels, 1 dimensional array like.
Use only if X is not catboost.Pool.
cat_features : list or numpy.ndarray, optional (default=None)
If not None, giving the list of Categ columns indices.
Use only if X is not catboost.Pool and not catboost.FeaturesData
text_features: list or numpy.ndarray, optional (default=None)
If not none, giving the list of Text columns indices.
Use only if X is not catboost.Pool and not catboost.FeaturesData
embedding_features: list or numpy.ndarray, optional (default=None)
If not none, giving the list of Embedding columns indices.
Use only if X is not catboost.Pool and not catboost.FeaturesData
pairs : list or numpy.ndarray or pandas.DataFrame
The pairs description.
If list or numpy.ndarrays or pandas.DataFrame, giving 2 dimensional.
The shape should be Nx2, where N is the pairs' count. The first element of the pair is
the index of the winner object in the training set. The second element of the pair is
the index of the loser object in the training set.
sample_weight : list or numpy.ndarray or pandas.DataFrame or pandas.Series, optional (default=None)
Instance weights, 1 dimensional array like.
group_id : list or numpy.ndarray, optional (default=None)
group id for each instance.
If not None, giving 1 dimensional array like data.
Use only if X is not catboost.Pool.
group_weight : list or numpy.ndarray, optional (default=None)
Group weight for each instance.
If not None, giving 1 dimensional array like data.
subgroup_id : list or numpy.ndarray, optional (default=None)
subgroup id for each instance.
If not None, giving 1 dimensional array like data.
Use only if X is not catboost.Pool.
pairs_weight : list or numpy.ndarray, optional (default=None)
Weight for each pair.
If not None, giving 1 dimensional array like pairs.
baseline : list or numpy.ndarray, optional (default=None)
If not None, giving 2 dimensional array like data.
Use only if X is not catboost.Pool.
use_best_model : bool, optional (default=None)
Flag to use best model
eval_set : catboost.Pool, or list of catboost.Pool, or list of (X, y) tuples, optional (default=None)
Used as a validation set for early-stopping.
logging_level : string, optional (default=None)
Possible values:
- 'Silent'
- 'Verbose'
- 'Info'
- 'Debug'
metric_period : int
Frequency of evaluating metrics.
verbose : bool or int
If verbose is bool, then if set to True, logging_level is set to Verbose,
if set to False, logging_level is set to Silent.
If verbose is int, it determines the frequency of writing metrics to output and
logging_level is set to Verbose.
silent : bool
If silent is True, logging_level is set to Silent.
If silent is False, logging_level is set to Verbose.
verbose_eval : bool or int
Synonym for verbose. Only one of these parameters should be set.
plot : bool, optional (default=False)
If True, draw train and eval error in Jupyter notebook
early_stopping_rounds : int
Activates Iter overfitting detector with od_wait parameter set to early_stopping_rounds.
save_snapshot : bool, [default=None]
Enable progress snapshotting for restoring progress after crashes or interruptions
snapshot_file : string or pathlib.Path, [default=None]
Learn progress snapshot file path, if None will use default filename
snapshot_interval: int, [default=600]
Interval between saving snapshots (seconds)
init_model : CatBoost class or string or pathlib.Path, [default=None]
Continue training starting from the existing model.
If this parameter is a string or pathlib.Path, load initial model from the path specified by this string.
callbacks : list, optional (default=None)
List of callback objects that are applied at end of each iteration.
log_cout: output stream or callback for logging
log_cerr: error stream or callback for logging
Returns
-------
model : CatBoost | Fit the CatBoost model. | [
"Fit",
"the",
"CatBoost",
"model",
"."
] | def fit(self, X, y=None, cat_features=None, text_features=None, embedding_features=None, pairs=None, sample_weight=None, group_id=None,
group_weight=None, subgroup_id=None, pairs_weight=None, baseline=None, use_best_model=None,
eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None,
verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None,
save_snapshot=None, snapshot_file=None, snapshot_interval=None, init_model=None, callbacks=None,
log_cout=sys.stdout, log_cerr=sys.stderr):
"""
Fit the CatBoost model.
Parameters
----------
X : catboost.Pool or list or numpy.ndarray or pandas.DataFrame or pandas.Series
or string.
If not catboost.Pool or catboost.FeaturesData it must be 2 dimensional Feature matrix
or string - file with dataset.
Must be non-empty (contain > 0 objects)
y : list or numpy.ndarray or pandas.DataFrame or pandas.Series, optional (default=None)
Labels, 1 dimensional array like.
Use only if X is not catboost.Pool.
cat_features : list or numpy.ndarray, optional (default=None)
If not None, giving the list of Categ columns indices.
Use only if X is not catboost.Pool and not catboost.FeaturesData
text_features: list or numpy.ndarray, optional (default=None)
If not none, giving the list of Text columns indices.
Use only if X is not catboost.Pool and not catboost.FeaturesData
embedding_features: list or numpy.ndarray, optional (default=None)
If not none, giving the list of Embedding columns indices.
Use only if X is not catboost.Pool and not catboost.FeaturesData
pairs : list or numpy.ndarray or pandas.DataFrame
The pairs description.
If list or numpy.ndarrays or pandas.DataFrame, giving 2 dimensional.
The shape should be Nx2, where N is the pairs' count. The first element of the pair is
the index of the winner object in the training set. The second element of the pair is
the index of the loser object in the training set.
sample_weight : list or numpy.ndarray or pandas.DataFrame or pandas.Series, optional (default=None)
Instance weights, 1 dimensional array like.
group_id : list or numpy.ndarray, optional (default=None)
group id for each instance.
If not None, giving 1 dimensional array like data.
Use only if X is not catboost.Pool.
group_weight : list or numpy.ndarray, optional (default=None)
Group weight for each instance.
If not None, giving 1 dimensional array like data.
subgroup_id : list or numpy.ndarray, optional (default=None)
subgroup id for each instance.
If not None, giving 1 dimensional array like data.
Use only if X is not catboost.Pool.
pairs_weight : list or numpy.ndarray, optional (default=None)
Weight for each pair.
If not None, giving 1 dimensional array like pairs.
baseline : list or numpy.ndarray, optional (default=None)
If not None, giving 2 dimensional array like data.
Use only if X is not catboost.Pool.
use_best_model : bool, optional (default=None)
Flag to use best model
eval_set : catboost.Pool, or list of catboost.Pool, or list of (X, y) tuples, optional (default=None)
Used as a validation set for early-stopping.
logging_level : string, optional (default=None)
Possible values:
- 'Silent'
- 'Verbose'
- 'Info'
- 'Debug'
metric_period : int
Frequency of evaluating metrics.
verbose : bool or int
If verbose is bool, then if set to True, logging_level is set to Verbose,
if set to False, logging_level is set to Silent.
If verbose is int, it determines the frequency of writing metrics to output and
logging_level is set to Verbose.
silent : bool
If silent is True, logging_level is set to Silent.
If silent is False, logging_level is set to Verbose.
verbose_eval : bool or int
Synonym for verbose. Only one of these parameters should be set.
plot : bool, optional (default=False)
If True, draw train and eval error in Jupyter notebook
early_stopping_rounds : int
Activates Iter overfitting detector with od_wait parameter set to early_stopping_rounds.
save_snapshot : bool, [default=None]
Enable progress snapshotting for restoring progress after crashes or interruptions
snapshot_file : string or pathlib.Path, [default=None]
Learn progress snapshot file path, if None will use default filename
snapshot_interval: int, [default=600]
Interval between saving snapshots (seconds)
init_model : CatBoost class or string or pathlib.Path, [default=None]
Continue training starting from the existing model.
If this parameter is a string or pathlib.Path, load initial model from the path specified by this string.
callbacks : list, optional (default=None)
List of callback objects that are applied at end of each iteration.
log_cout: output stream or callback for logging
log_cerr: error stream or callback for logging
Returns
-------
model : CatBoost
"""
return self._fit(X, y, cat_features, text_features, embedding_features, pairs, sample_weight, group_id, group_weight, subgroup_id,
pairs_weight, baseline, use_best_model, eval_set, verbose, logging_level, plot,
column_description, verbose_eval, metric_period, silent, early_stopping_rounds,
save_snapshot, snapshot_file, snapshot_interval, init_model, callbacks, log_cout, log_cerr) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"cat_features",
"=",
"None",
",",
"text_features",
"=",
"None",
",",
"embedding_features",
"=",
"None",
",",
"pairs",
"=",
"None",
",",
"sample_weight",
"=",
"None",
",",
"group_id",
"=",
"None",
",",
"group_weight",
"=",
"None",
",",
"subgroup_id",
"=",
"None",
",",
"pairs_weight",
"=",
"None",
",",
"baseline",
"=",
"None",
",",
"use_best_model",
"=",
"None",
",",
"eval_set",
"=",
"None",
",",
"verbose",
"=",
"None",
",",
"logging_level",
"=",
"None",
",",
"plot",
"=",
"False",
",",
"column_description",
"=",
"None",
",",
"verbose_eval",
"=",
"None",
",",
"metric_period",
"=",
"None",
",",
"silent",
"=",
"None",
",",
"early_stopping_rounds",
"=",
"None",
",",
"save_snapshot",
"=",
"None",
",",
"snapshot_file",
"=",
"None",
",",
"snapshot_interval",
"=",
"None",
",",
"init_model",
"=",
"None",
",",
"callbacks",
"=",
"None",
",",
"log_cout",
"=",
"sys",
".",
"stdout",
",",
"log_cerr",
"=",
"sys",
".",
"stderr",
")",
":",
"return",
"self",
".",
"_fit",
"(",
"X",
",",
"y",
",",
"cat_features",
",",
"text_features",
",",
"embedding_features",
",",
"pairs",
",",
"sample_weight",
",",
"group_id",
",",
"group_weight",
",",
"subgroup_id",
",",
"pairs_weight",
",",
"baseline",
",",
"use_best_model",
",",
"eval_set",
",",
"verbose",
",",
"logging_level",
",",
"plot",
",",
"column_description",
",",
"verbose_eval",
",",
"metric_period",
",",
"silent",
",",
"early_stopping_rounds",
",",
"save_snapshot",
",",
"snapshot_file",
",",
"snapshot_interval",
",",
"init_model",
",",
"callbacks",
",",
"log_cout",
",",
"log_cerr",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/catboost/python-package/catboost/core.py#L2216-L2344 |
|
google/llvm-propeller | 45c226984fe8377ebfb2ad7713c680d652ba678d | libcxx/utils/libcxx/sym_check/extract.py | python | extract_symbols | (lib_file, static_lib=None) | return extractor.extract(lib_file) | Extract and return a list of symbols extracted from a static or dynamic
library. The symbols are extracted using NM or readelf. They are then
filtered and formated. Finally they symbols are made unique. | Extract and return a list of symbols extracted from a static or dynamic
library. The symbols are extracted using NM or readelf. They are then
filtered and formated. Finally they symbols are made unique. | [
"Extract",
"and",
"return",
"a",
"list",
"of",
"symbols",
"extracted",
"from",
"a",
"static",
"or",
"dynamic",
"library",
".",
"The",
"symbols",
"are",
"extracted",
"using",
"NM",
"or",
"readelf",
".",
"They",
"are",
"then",
"filtered",
"and",
"formated",
".",
"Finally",
"they",
"symbols",
"are",
"made",
"unique",
"."
] | def extract_symbols(lib_file, static_lib=None):
"""
Extract and return a list of symbols extracted from a static or dynamic
library. The symbols are extracted using NM or readelf. They are then
filtered and formated. Finally they symbols are made unique.
"""
if static_lib is None:
_, ext = os.path.splitext(lib_file)
static_lib = True if ext in ['.a'] else False
if ReadElfExtractor.find_tool() and not static_lib:
extractor = ReadElfExtractor(static_lib=static_lib)
else:
extractor = NMExtractor(static_lib=static_lib)
return extractor.extract(lib_file) | [
"def",
"extract_symbols",
"(",
"lib_file",
",",
"static_lib",
"=",
"None",
")",
":",
"if",
"static_lib",
"is",
"None",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"lib_file",
")",
"static_lib",
"=",
"True",
"if",
"ext",
"in",
"[",
"'.a'",
"]",
"else",
"False",
"if",
"ReadElfExtractor",
".",
"find_tool",
"(",
")",
"and",
"not",
"static_lib",
":",
"extractor",
"=",
"ReadElfExtractor",
"(",
"static_lib",
"=",
"static_lib",
")",
"else",
":",
"extractor",
"=",
"NMExtractor",
"(",
"static_lib",
"=",
"static_lib",
")",
"return",
"extractor",
".",
"extract",
"(",
"lib_file",
")"
] | https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/libcxx/utils/libcxx/sym_check/extract.py#L188-L201 |
|
wenwei202/caffe | f54a74abaf6951d8485cbdcfa1d74a4c37839466 | scripts/cpp_lint.py | python | CheckSpacing | (filename, clean_lines, linenum, nesting_state, error) | Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Checks for the correctness of various spacing issues in the code. | [
"Checks",
"for",
"the",
"correctness",
"of",
"various",
"spacing",
"issues",
"in",
"the",
"code",
"."
] | def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop') | [
"def",
"CheckSpacing",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"nesting_state",
",",
"error",
")",
":",
"# Don't use \"elided\" lines here, otherwise we can't check commented lines.",
"# Don't want to use \"raw\" either, because we don't want to check inside C++11",
"# raw strings,",
"raw",
"=",
"clean_lines",
".",
"lines_without_raw_strings",
"line",
"=",
"raw",
"[",
"linenum",
"]",
"# Before nixing comments, check if the line is blank for no good",
"# reason. This includes the first line after a block is opened, and",
"# blank lines at the end of a function (ie, right before a line like '}'",
"#",
"# Skip all the blank line checks if we are immediately inside a",
"# namespace body. In other words, don't issue blank line warnings",
"# for this block:",
"# namespace {",
"#",
"# }",
"#",
"# A warning about missing end of namespace comments will be issued instead.",
"if",
"IsBlankLine",
"(",
"line",
")",
"and",
"not",
"nesting_state",
".",
"InNamespaceBody",
"(",
")",
":",
"elided",
"=",
"clean_lines",
".",
"elided",
"prev_line",
"=",
"elided",
"[",
"linenum",
"-",
"1",
"]",
"prevbrace",
"=",
"prev_line",
".",
"rfind",
"(",
"'{'",
")",
"# TODO(unknown): Don't complain if line before blank line, and line after,",
"# both start with alnums and are indented the same amount.",
"# This ignores whitespace at the start of a namespace block",
"# because those are not usually indented.",
"if",
"prevbrace",
"!=",
"-",
"1",
"and",
"prev_line",
"[",
"prevbrace",
":",
"]",
".",
"find",
"(",
"'}'",
")",
"==",
"-",
"1",
":",
"# OK, we have a blank line at the start of a code block. Before we",
"# complain, we check if it is an exception to the rule: The previous",
"# non-empty line has the parameters of a function header that are indented",
"# 4 spaces (because they did not fit in a 80 column line when placed on",
"# the same line as the function name). We also check for the case where",
"# the previous line is indented 6 spaces, which may happen when the",
"# initializers of a constructor do not fit into a 80 column line.",
"exception",
"=",
"False",
"if",
"Match",
"(",
"r' {6}\\w'",
",",
"prev_line",
")",
":",
"# Initializer list?",
"# We are looking for the opening column of initializer list, which",
"# should be indented 4 spaces to cause 6 space indentation afterwards.",
"search_position",
"=",
"linenum",
"-",
"2",
"while",
"(",
"search_position",
">=",
"0",
"and",
"Match",
"(",
"r' {6}\\w'",
",",
"elided",
"[",
"search_position",
"]",
")",
")",
":",
"search_position",
"-=",
"1",
"exception",
"=",
"(",
"search_position",
">=",
"0",
"and",
"elided",
"[",
"search_position",
"]",
"[",
":",
"5",
"]",
"==",
"' :'",
")",
"else",
":",
"# Search for the function arguments or an initializer list. We use a",
"# simple heuristic here: If the line is indented 4 spaces; and we have a",
"# closing paren, without the opening paren, followed by an opening brace",
"# or colon (for initializer lists) we assume that it is the last line of",
"# a function header. If we have a colon indented 4 spaces, it is an",
"# initializer list.",
"exception",
"=",
"(",
"Match",
"(",
"r' {4}\\w[^\\(]*\\)\\s*(const\\s*)?(\\{\\s*$|:)'",
",",
"prev_line",
")",
"or",
"Match",
"(",
"r' {4}:'",
",",
"prev_line",
")",
")",
"if",
"not",
"exception",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/blank_line'",
",",
"2",
",",
"'Redundant blank line at the start of a code block '",
"'should be deleted.'",
")",
"# Ignore blank lines at the end of a block in a long if-else",
"# chain, like this:",
"# if (condition1) {",
"# // Something followed by a blank line",
"#",
"# } else if (condition2) {",
"# // Something else",
"# }",
"if",
"linenum",
"+",
"1",
"<",
"clean_lines",
".",
"NumLines",
"(",
")",
":",
"next_line",
"=",
"raw",
"[",
"linenum",
"+",
"1",
"]",
"if",
"(",
"next_line",
"and",
"Match",
"(",
"r'\\s*}'",
",",
"next_line",
")",
"and",
"next_line",
".",
"find",
"(",
"'} else '",
")",
"==",
"-",
"1",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/blank_line'",
",",
"3",
",",
"'Redundant blank line at the end of a code block '",
"'should be deleted.'",
")",
"matched",
"=",
"Match",
"(",
"r'\\s*(public|protected|private):'",
",",
"prev_line",
")",
"if",
"matched",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/blank_line'",
",",
"3",
",",
"'Do not leave a blank line after \"%s:\"'",
"%",
"matched",
".",
"group",
"(",
"1",
")",
")",
"# Next, we complain if there's a comment too near the text",
"commentpos",
"=",
"line",
".",
"find",
"(",
"'//'",
")",
"if",
"commentpos",
"!=",
"-",
"1",
":",
"# Check if the // may be in quotes. If so, ignore it",
"# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison",
"if",
"(",
"line",
".",
"count",
"(",
"'\"'",
",",
"0",
",",
"commentpos",
")",
"-",
"line",
".",
"count",
"(",
"'\\\\\"'",
",",
"0",
",",
"commentpos",
")",
")",
"%",
"2",
"==",
"0",
":",
"# not in quotes",
"# Allow one space for new scopes, two spaces otherwise:",
"if",
"(",
"not",
"Match",
"(",
"r'^\\s*{ //'",
",",
"line",
")",
"and",
"(",
"(",
"commentpos",
">=",
"1",
"and",
"line",
"[",
"commentpos",
"-",
"1",
"]",
"not",
"in",
"string",
".",
"whitespace",
")",
"or",
"(",
"commentpos",
">=",
"2",
"and",
"line",
"[",
"commentpos",
"-",
"2",
"]",
"not",
"in",
"string",
".",
"whitespace",
")",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/comments'",
",",
"2",
",",
"'At least two spaces is best between code and comments'",
")",
"# There should always be a space between the // and the comment",
"commentend",
"=",
"commentpos",
"+",
"2",
"if",
"commentend",
"<",
"len",
"(",
"line",
")",
"and",
"not",
"line",
"[",
"commentend",
"]",
"==",
"' '",
":",
"# but some lines are exceptions -- e.g. if they're big",
"# comment delimiters like:",
"# //----------------------------------------------------------",
"# or are an empty C++ style Doxygen comment, like:",
"# ///",
"# or C++ style Doxygen comments placed after the variable:",
"# ///< Header comment",
"# //!< Header comment",
"# or they begin with multiple slashes followed by a space:",
"# //////// Header comment",
"match",
"=",
"(",
"Search",
"(",
"r'[=/-]{4,}\\s*$'",
",",
"line",
"[",
"commentend",
":",
"]",
")",
"or",
"Search",
"(",
"r'^/$'",
",",
"line",
"[",
"commentend",
":",
"]",
")",
"or",
"Search",
"(",
"r'^!< '",
",",
"line",
"[",
"commentend",
":",
"]",
")",
"or",
"Search",
"(",
"r'^/< '",
",",
"line",
"[",
"commentend",
":",
"]",
")",
"or",
"Search",
"(",
"r'^/+ '",
",",
"line",
"[",
"commentend",
":",
"]",
")",
")",
"if",
"not",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/comments'",
",",
"4",
",",
"'Should have a space between // and comment'",
")",
"CheckComment",
"(",
"line",
"[",
"commentpos",
":",
"]",
",",
"filename",
",",
"linenum",
",",
"error",
")",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"# get rid of comments and strings",
"# Don't try to do spacing checks for operator methods",
"line",
"=",
"re",
".",
"sub",
"(",
"r'operator(==|!=|<|<<|<=|>=|>>|>)\\('",
",",
"'operator\\('",
",",
"line",
")",
"# We allow no-spaces around = within an if: \"if ( (a=Foo()) == 0 )\".",
"# Otherwise not. Note we only check for non-spaces on *both* sides;",
"# sometimes people put non-spaces on one side when aligning ='s among",
"# many lines (not that this is behavior that I approve of...)",
"if",
"Search",
"(",
"r'[\\w.]=[\\w.]'",
",",
"line",
")",
"and",
"not",
"Search",
"(",
"r'\\b(if|while) '",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"4",
",",
"'Missing spaces around ='",
")",
"# It's ok not to have spaces around binary operators like + - * /, but if",
"# there's too little whitespace, we get concerned. It's hard to tell,",
"# though, so we punt on this one for now. TODO.",
"# You should always have whitespace around binary operators.",
"#",
"# Check <= and >= first to avoid false positives with < and >, then",
"# check non-include lines for spacing around < and >.",
"match",
"=",
"Search",
"(",
"r'[^<>=!\\s](==|!=|<=|>=)[^<>=!\\s]'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"# We allow no-spaces around << when used like this: 10<<20, but",
"# not otherwise (particularly, not when used as streams)",
"# Also ignore using ns::operator<<;",
"match",
"=",
"Search",
"(",
"r'(operator|\\S)(?:L|UL|ULL|l|ul|ull)?<<(\\S)'",
",",
"line",
")",
"if",
"(",
"match",
"and",
"not",
"(",
"match",
".",
"group",
"(",
"1",
")",
".",
"isdigit",
"(",
")",
"and",
"match",
".",
"group",
"(",
"2",
")",
".",
"isdigit",
"(",
")",
")",
"and",
"not",
"(",
"match",
".",
"group",
"(",
"1",
")",
"==",
"'operator'",
"and",
"match",
".",
"group",
"(",
"2",
")",
"==",
"';'",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around <<'",
")",
"elif",
"not",
"Match",
"(",
"r'#.*include'",
",",
"line",
")",
":",
"# Avoid false positives on ->",
"reduced_line",
"=",
"line",
".",
"replace",
"(",
"'->'",
",",
"''",
")",
"# Look for < that is not surrounded by spaces. This is only",
"# triggered if both sides are missing spaces, even though",
"# technically should should flag if at least one side is missing a",
"# space. This is done to avoid some false positives with shifts.",
"match",
"=",
"Search",
"(",
"r'[^\\s<]<([^\\s=<].*)'",
",",
"reduced_line",
")",
"if",
"(",
"match",
"and",
"not",
"FindNextMatchingAngleBracket",
"(",
"clean_lines",
",",
"linenum",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around <'",
")",
"# Look for > that is not surrounded by spaces. Similar to the",
"# above, we only trigger if both sides are missing spaces to avoid",
"# false positives with shifts.",
"match",
"=",
"Search",
"(",
"r'^(.*[^\\s>])>[^\\s=>]'",
",",
"reduced_line",
")",
"if",
"(",
"match",
"and",
"not",
"FindPreviousMatchingAngleBracket",
"(",
"clean_lines",
",",
"linenum",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around >'",
")",
"# We allow no-spaces around >> for almost anything. This is because",
"# C++11 allows \">>\" to close nested templates, which accounts for",
"# most cases when \">>\" is not followed by a space.",
"#",
"# We still warn on \">>\" followed by alpha character, because that is",
"# likely due to \">>\" being used for right shifts, e.g.:",
"# value >> alpha",
"#",
"# When \">>\" is used to close templates, the alphanumeric letter that",
"# follows would be part of an identifier, and there should still be",
"# a space separating the template type and the identifier.",
"# type<type<type>> alpha",
"match",
"=",
"Search",
"(",
"r'>>[a-zA-Z_]'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"3",
",",
"'Missing spaces around >>'",
")",
"# There shouldn't be space around unary operators",
"match",
"=",
"Search",
"(",
"r'(!\\s|~\\s|[\\s]--[\\s;]|[\\s]\\+\\+[\\s;])'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/operators'",
",",
"4",
",",
"'Extra space for operator %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"# A pet peeve of mine: no spaces after an if, while, switch, or for",
"match",
"=",
"Search",
"(",
"r' (if\\(|for\\(|while\\(|switch\\()'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"5",
",",
"'Missing space before ( in %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"# For if/for/while/switch, the left and right parens should be",
"# consistent about how many spaces are inside the parens, and",
"# there should either be zero or one spaces inside the parens.",
"# We don't want: \"if ( foo)\" or \"if ( foo )\".",
"# Exception: \"for ( ; foo; bar)\" and \"for (foo; bar; )\" are allowed.",
"match",
"=",
"Search",
"(",
"r'\\b(if|for|while|switch)\\s*'",
"r'\\(([ ]*)(.).*[^ ]+([ ]*)\\)\\s*{\\s*$'",
",",
"line",
")",
"if",
"match",
":",
"if",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"!=",
"len",
"(",
"match",
".",
"group",
"(",
"4",
")",
")",
":",
"if",
"not",
"(",
"match",
".",
"group",
"(",
"3",
")",
"==",
"';'",
"and",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"==",
"1",
"+",
"len",
"(",
"match",
".",
"group",
"(",
"4",
")",
")",
"or",
"not",
"match",
".",
"group",
"(",
"2",
")",
"and",
"Search",
"(",
"r'\\bfor\\s*\\(.*; \\)'",
",",
"line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"5",
",",
"'Mismatching spaces inside () in %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"if",
"len",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"not",
"in",
"[",
"0",
",",
"1",
"]",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/parens'",
",",
"5",
",",
"'Should have zero or one spaces inside ( and ) in %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"# You should always have a space after a comma (either as fn arg or operator)",
"#",
"# This does not apply when the non-space character following the",
"# comma is another comma, since the only time when that happens is",
"# for empty macro arguments.",
"#",
"# We run this check in two passes: first pass on elided lines to",
"# verify that lines contain missing whitespaces, second pass on raw",
"# lines to confirm that those missing whitespaces are not due to",
"# elided comments.",
"if",
"Search",
"(",
"r',[^,\\s]'",
",",
"line",
")",
"and",
"Search",
"(",
"r',[^,\\s]'",
",",
"raw",
"[",
"linenum",
"]",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/comma'",
",",
"3",
",",
"'Missing space after ,'",
")",
"# You should always have a space after a semicolon",
"# except for few corner cases",
"# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more",
"# space after ;",
"if",
"Search",
"(",
"r';[^\\s};\\\\)/]'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/semicolon'",
",",
"3",
",",
"'Missing space after ;'",
")",
"# Next we will look for issues with function calls.",
"CheckSpacingForFunctionCall",
"(",
"filename",
",",
"line",
",",
"linenum",
",",
"error",
")",
"# Except after an opening paren, or after another opening brace (in case of",
"# an initializer list, for instance), you should have spaces before your",
"# braces. And since you should never have braces at the beginning of a line,",
"# this is an easy test.",
"match",
"=",
"Match",
"(",
"r'^(.*[^ ({]){'",
",",
"line",
")",
"if",
"match",
":",
"# Try a bit harder to check for brace initialization. This",
"# happens in one of the following forms:",
"# Constructor() : initializer_list_{} { ... }",
"# Constructor{}.MemberFunction()",
"# Type variable{};",
"# FunctionCall(type{}, ...);",
"# LastArgument(..., type{});",
"# LOG(INFO) << type{} << \" ...\";",
"# map_of_type[{...}] = ...;",
"#",
"# We check for the character following the closing brace, and",
"# silence the warning if it's one of those listed above, i.e.",
"# \"{.;,)<]\".",
"#",
"# To account for nested initializer list, we allow any number of",
"# closing braces up to \"{;,)<\". We can't simply silence the",
"# warning on first sight of closing brace, because that would",
"# cause false negatives for things that are not initializer lists.",
"# Silence this: But not this:",
"# Outer{ if (...) {",
"# Inner{...} if (...){ // Missing space before {",
"# }; }",
"#",
"# There is a false negative with this approach if people inserted",
"# spurious semicolons, e.g. \"if (cond){};\", but we will catch the",
"# spurious semicolon with a separate check.",
"(",
"endline",
",",
"endlinenum",
",",
"endpos",
")",
"=",
"CloseExpression",
"(",
"clean_lines",
",",
"linenum",
",",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"trailing_text",
"=",
"''",
"if",
"endpos",
">",
"-",
"1",
":",
"trailing_text",
"=",
"endline",
"[",
"endpos",
":",
"]",
"for",
"offset",
"in",
"xrange",
"(",
"endlinenum",
"+",
"1",
",",
"min",
"(",
"endlinenum",
"+",
"3",
",",
"clean_lines",
".",
"NumLines",
"(",
")",
"-",
"1",
")",
")",
":",
"trailing_text",
"+=",
"clean_lines",
".",
"elided",
"[",
"offset",
"]",
"if",
"not",
"Match",
"(",
"r'^[\\s}]*[{.;,)<\\]]'",
",",
"trailing_text",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/braces'",
",",
"5",
",",
"'Missing space before {'",
")",
"# Make sure '} else {' has spaces.",
"if",
"Search",
"(",
"r'}else'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/braces'",
",",
"5",
",",
"'Missing space before else'",
")",
"# You shouldn't have spaces before your brackets, except maybe after",
"# 'delete []' or 'new char * []'.",
"if",
"Search",
"(",
"r'\\w\\s+\\['",
",",
"line",
")",
"and",
"not",
"Search",
"(",
"r'delete\\s+\\['",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/braces'",
",",
"5",
",",
"'Extra space before ['",
")",
"# You shouldn't have a space before a semicolon at the end of the line.",
"# There's a special case for \"for\" since the style guide allows space before",
"# the semicolon there.",
"if",
"Search",
"(",
"r':\\s*;\\s*$'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/semicolon'",
",",
"5",
",",
"'Semicolon defining empty statement. Use {} instead.'",
")",
"elif",
"Search",
"(",
"r'^\\s*;\\s*$'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/semicolon'",
",",
"5",
",",
"'Line contains only semicolon. If this should be an empty statement, '",
"'use {} instead.'",
")",
"elif",
"(",
"Search",
"(",
"r'\\s+;\\s*$'",
",",
"line",
")",
"and",
"not",
"Search",
"(",
"r'\\bfor\\b'",
",",
"line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/semicolon'",
",",
"5",
",",
"'Extra space before last semicolon. If this should be an empty '",
"'statement, use {} instead.'",
")",
"# In range-based for, we wanted spaces before and after the colon, but",
"# not around \"::\" tokens that might appear.",
"if",
"(",
"Search",
"(",
"'for *\\(.*[^:]:[^: ]'",
",",
"line",
")",
"or",
"Search",
"(",
"'for *\\(.*[^: ]:[^:]'",
",",
"line",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'whitespace/forcolon'",
",",
"2",
",",
"'Missing space around colon in range-based for loop'",
")"
] | https://github.com/wenwei202/caffe/blob/f54a74abaf6951d8485cbdcfa1d74a4c37839466/scripts/cpp_lint.py#L2643-L2988 |
||
wlanjie/AndroidFFmpeg | 7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf | tools/fdk-aac-build/x86/toolchain/lib/python2.7/fractions.py | python | Fraction.__gt__ | (a, b) | return a._richcmp(b, operator.gt) | a > b | a > b | [
"a",
">",
"b"
] | def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt) | [
"def",
"__gt__",
"(",
"a",
",",
"b",
")",
":",
"return",
"a",
".",
"_richcmp",
"(",
"b",
",",
"operator",
".",
"gt",
")"
] | https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/fractions.py#L576-L578 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.