nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
yuxng/PoseCNN
9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04
lib/datasets/yumi.py
python
yumi.gt_roidb
(self)
return gt_roidb
Return the database of ground-truth regions of interest. This function loads/saves from/to a cache file to speed up future calls.
Return the database of ground-truth regions of interest.
[ "Return", "the", "database", "of", "ground", "-", "truth", "regions", "of", "interest", "." ]
def gt_roidb(self): """ Return the database of ground-truth regions of interest. This function loads/saves from/to a cache file to speed up future calls. """ cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl') if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) print '{} gt roidb loaded from {}'.format(self.name, cache_file) print 'class weights: ', roidb[0]['class_weights'] return roidb # self.compute_class_weights() gt_roidb = [self._load_yumi_annotation(index) for index in self.image_index] if not cfg.TRAIN.SEGMENTATION: # print out recall for i in xrange(1, self.num_classes): print '{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]) print '{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]) if self._num_boxes_all[i] > 0: print '{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])) with open(cache_file, 'wb') as fid: cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL) print 'wrote gt roidb to {}'.format(cache_file) return gt_roidb
[ "def", "gt_roidb", "(", "self", ")", ":", "cache_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cache_path", ",", "self", ".", "name", "+", "'_gt_roidb.pkl'", ")", "if", "os", ".", "path", ".", "exists", "(", "cache_file", ")", ":", "with", "open", "(", "cache_file", ",", "'rb'", ")", "as", "fid", ":", "roidb", "=", "cPickle", ".", "load", "(", "fid", ")", "print", "'{} gt roidb loaded from {}'", ".", "format", "(", "self", ".", "name", ",", "cache_file", ")", "print", "'class weights: '", ",", "roidb", "[", "0", "]", "[", "'class_weights'", "]", "return", "roidb", "# self.compute_class_weights()", "gt_roidb", "=", "[", "self", ".", "_load_yumi_annotation", "(", "index", ")", "for", "index", "in", "self", ".", "image_index", "]", "if", "not", "cfg", ".", "TRAIN", ".", "SEGMENTATION", ":", "# print out recall", "for", "i", "in", "xrange", "(", "1", ",", "self", ".", "num_classes", ")", ":", "print", "'{}: Total number of boxes {:d}'", ".", "format", "(", "self", ".", "classes", "[", "i", "]", ",", "self", ".", "_num_boxes_all", "[", "i", "]", ")", "print", "'{}: Number of boxes covered {:d}'", ".", "format", "(", "self", ".", "classes", "[", "i", "]", ",", "self", ".", "_num_boxes_covered", "[", "i", "]", ")", "if", "self", ".", "_num_boxes_all", "[", "i", "]", ">", "0", ":", "print", "'{}: Recall {:f}'", ".", "format", "(", "self", ".", "classes", "[", "i", "]", ",", "float", "(", "self", ".", "_num_boxes_covered", "[", "i", "]", ")", "/", "float", "(", "self", ".", "_num_boxes_all", "[", "i", "]", ")", ")", "with", "open", "(", "cache_file", ",", "'wb'", ")", "as", "fid", ":", "cPickle", ".", "dump", "(", "gt_roidb", ",", "fid", ",", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "print", "'wrote gt roidb to {}'", ".", "format", "(", "cache_file", ")", "return", "gt_roidb" ]
https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/yumi.py#L194-L226
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/webkit.py
python
WebKitCtrl.CanIncreaseTextSize
(*args, **kwargs)
return _webkit.WebKitCtrl_CanIncreaseTextSize(*args, **kwargs)
CanIncreaseTextSize(self) -> bool
CanIncreaseTextSize(self) -> bool
[ "CanIncreaseTextSize", "(", "self", ")", "-", ">", "bool" ]
def CanIncreaseTextSize(*args, **kwargs): """CanIncreaseTextSize(self) -> bool""" return _webkit.WebKitCtrl_CanIncreaseTextSize(*args, **kwargs)
[ "def", "CanIncreaseTextSize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_webkit", ".", "WebKitCtrl_CanIncreaseTextSize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/webkit.py#L136-L138
Pay20Y/FOTS_TF
c42ea59a20c28d506fee35cfb4c553b0cb20eee8
nets/resnet_utils.py
python
resnet_arg_scope
(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True)
Defines the default ResNet arg scope. TODO(gpapan): The batch-normalization related default values above are appropriate for use in conjunction with the reference ResNet models released at https://github.com/KaimingHe/deep-residual-networks. When training ResNets from scratch, they might need to be tuned. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: The moving average decay when estimating layer activation statistics in batch normalization. batch_norm_epsilon: Small constant to prevent division by zero when normalizing activations by their variance in batch normalization. batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. Returns: An `arg_scope` to use for the resnet models.
Defines the default ResNet arg scope.
[ "Defines", "the", "default", "ResNet", "arg", "scope", "." ]
def resnet_arg_scope(weight_decay=0.0001, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): """Defines the default ResNet arg scope. TODO(gpapan): The batch-normalization related default values above are appropriate for use in conjunction with the reference ResNet models released at https://github.com/KaimingHe/deep-residual-networks. When training ResNets from scratch, they might need to be tuned. Args: weight_decay: The weight decay to use for regularizing the model. batch_norm_decay: The moving average decay when estimating layer activation statistics in batch normalization. batch_norm_epsilon: Small constant to prevent division by zero when normalizing activations by their variance in batch normalization. batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the activations in the batch normalization layer. Returns: An `arg_scope` to use for the resnet models. """ batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS, } with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params): # The following implies padding='SAME' for pool1, which makes feature # alignment easier for dense prediction tasks. This is also used in # https://github.com/facebook/fb.resnet.torch. However the accompanying # code of 'Deep Residual Learning for Image Recognition' uses # padding='VALID' for pool1. You can switch to that choice by setting # slim.arg_scope([slim.max_pool2d], padding='VALID'). with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
[ "def", "resnet_arg_scope", "(", "weight_decay", "=", "0.0001", ",", "batch_norm_decay", "=", "0.997", ",", "batch_norm_epsilon", "=", "1e-5", ",", "batch_norm_scale", "=", "True", ")", ":", "batch_norm_params", "=", "{", "'decay'", ":", "batch_norm_decay", ",", "'epsilon'", ":", "batch_norm_epsilon", ",", "'scale'", ":", "batch_norm_scale", ",", "'updates_collections'", ":", "tf", ".", "GraphKeys", ".", "UPDATE_OPS", ",", "}", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "conv2d", "]", ",", "weights_regularizer", "=", "slim", ".", "l2_regularizer", "(", "weight_decay", ")", ",", "weights_initializer", "=", "slim", ".", "variance_scaling_initializer", "(", ")", ",", "activation_fn", "=", "tf", ".", "nn", ".", "relu", ",", "normalizer_fn", "=", "slim", ".", "batch_norm", ",", "normalizer_params", "=", "batch_norm_params", ")", ":", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "batch_norm", "]", ",", "*", "*", "batch_norm_params", ")", ":", "# The following implies padding='SAME' for pool1, which makes feature", "# alignment easier for dense prediction tasks. This is also used in", "# https://github.com/facebook/fb.resnet.torch. However the accompanying", "# code of 'Deep Residual Learning for Image Recognition' uses", "# padding='VALID' for pool1. You can switch to that choice by setting", "# slim.arg_scope([slim.max_pool2d], padding='VALID').", "with", "slim", ".", "arg_scope", "(", "[", "slim", ".", "max_pool2d", "]", ",", "padding", "=", "'SAME'", ")", "as", "arg_sc", ":", "return", "arg_sc" ]
https://github.com/Pay20Y/FOTS_TF/blob/c42ea59a20c28d506fee35cfb4c553b0cb20eee8/nets/resnet_utils.py#L209-L254
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/models/image/cifar10/cifar10_eval.py
python
evaluate
()
Eval CIFAR-10 for a number of steps.
Eval CIFAR-10 for a number of steps.
[ "Eval", "CIFAR", "-", "10", "for", "a", "number", "of", "steps", "." ]
def evaluate(): """Eval CIFAR-10 for a number of steps.""" with tf.Graph().as_default() as g: # Get images and labels for CIFAR-10. eval_data = FLAGS.eval_data == 'test' images, labels = cifar10.inputs(eval_data=eval_data) # Build a Graph that computes the logits predictions from the # inference model. logits = cifar10.inference(images) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( cifar10.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g) while True: eval_once(saver, summary_writer, top_k_op, summary_op) if FLAGS.run_once: break time.sleep(FLAGS.eval_interval_secs)
[ "def", "evaluate", "(", ")", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", "as", "g", ":", "# Get images and labels for CIFAR-10.", "eval_data", "=", "FLAGS", ".", "eval_data", "==", "'test'", "images", ",", "labels", "=", "cifar10", ".", "inputs", "(", "eval_data", "=", "eval_data", ")", "# Build a Graph that computes the logits predictions from the", "# inference model.", "logits", "=", "cifar10", ".", "inference", "(", "images", ")", "# Calculate predictions.", "top_k_op", "=", "tf", ".", "nn", ".", "in_top_k", "(", "logits", ",", "labels", ",", "1", ")", "# Restore the moving average version of the learned variables for eval.", "variable_averages", "=", "tf", ".", "train", ".", "ExponentialMovingAverage", "(", "cifar10", ".", "MOVING_AVERAGE_DECAY", ")", "variables_to_restore", "=", "variable_averages", ".", "variables_to_restore", "(", ")", "saver", "=", "tf", ".", "train", ".", "Saver", "(", "variables_to_restore", ")", "# Build the summary operation based on the TF collection of Summaries.", "summary_op", "=", "tf", ".", "merge_all_summaries", "(", ")", "summary_writer", "=", "tf", ".", "train", ".", "SummaryWriter", "(", "FLAGS", ".", "eval_dir", ",", "g", ")", "while", "True", ":", "eval_once", "(", "saver", ",", "summary_writer", ",", "top_k_op", ",", "summary_op", ")", "if", "FLAGS", ".", "run_once", ":", "break", "time", ".", "sleep", "(", "FLAGS", ".", "eval_interval_secs", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/models/image/cifar10/cifar10_eval.py#L116-L145
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/stc.py
python
StyledTextCtrl.StyleGetSizeFractional
(*args, **kwargs)
return _stc.StyledTextCtrl_StyleGetSizeFractional(*args, **kwargs)
StyleGetSizeFractional(self, int style) -> int
StyleGetSizeFractional(self, int style) -> int
[ "StyleGetSizeFractional", "(", "self", "int", "style", ")", "-", ">", "int" ]
def StyleGetSizeFractional(*args, **kwargs): """StyleGetSizeFractional(self, int style) -> int""" return _stc.StyledTextCtrl_StyleGetSizeFractional(*args, **kwargs)
[ "def", "StyleGetSizeFractional", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_StyleGetSizeFractional", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/stc.py#L2703-L2705
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/python/ops/nn_grad.py
python
_Conv2DBackpropInputGrad
(op, grad)
return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter
The derivatives for deconvolution.
[ "The", "derivatives", "for", "deconvolution", "." ]
def _Conv2DBackpropInputGrad(op, grad): """The derivatives for deconvolution. Args: op: the Deconvolution op. grad: the tensor representing the gradient w.r.t. the output Returns: the gradients w.r.t. the input and the filter """ return [None, nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]), op.inputs[2], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")), nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"), op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format"))]
[ "def", "_Conv2DBackpropInputGrad", "(", "op", ",", "grad", ")", ":", "return", "[", "None", ",", "nn_ops", ".", "conv2d_backprop_filter", "(", "grad", ",", "array_ops", ".", "shape", "(", "op", ".", "inputs", "[", "1", "]", ")", ",", "op", ".", "inputs", "[", "2", "]", ",", "op", ".", "get_attr", "(", "\"strides\"", ")", ",", "op", ".", "get_attr", "(", "\"padding\"", ")", ",", "op", ".", "get_attr", "(", "\"use_cudnn_on_gpu\"", ")", ",", "op", ".", "get_attr", "(", "\"data_format\"", ")", ")", ",", "nn_ops", ".", "conv2d", "(", "grad", ",", "op", ".", "inputs", "[", "1", "]", ",", "op", ".", "get_attr", "(", "\"strides\"", ")", ",", "op", ".", "get_attr", "(", "\"padding\"", ")", ",", "op", ".", "get_attr", "(", "\"use_cudnn_on_gpu\"", ")", ",", "op", ".", "get_attr", "(", "\"data_format\"", ")", ")", "]" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/ops/nn_grad.py#L31-L49
microsoft/EdgeML
ef9f8a77f096acbdeb941014791f8eda1c1bc35b
tf/edgeml_tf/trainer/fastTrainer.py
python
FastTrainer.runHardThrsd
(self, sess)
Function to run the IHT routine on FastObj
Function to run the IHT routine on FastObj
[ "Function", "to", "run", "the", "IHT", "routine", "on", "FastObj" ]
def runHardThrsd(self, sess): ''' Function to run the IHT routine on FastObj ''' self.thrsdParams = [] for i in range(0, self.numMatrices[0]): self.thrsdParams.append( utils.hardThreshold(self.FastParams[i].eval(), self.sW)) for i in range(self.numMatrices[0], self.totalMatrices): self.thrsdParams.append( utils.hardThreshold(self.FastParams[i].eval(), self.sU)) fd_thrsd = {} for i in range(0, self.totalMatrices): fd_thrsd[self.paramPlaceholders[i]] = self.thrsdParams[i] sess.run(self.hardThresholdGroup, feed_dict=fd_thrsd)
[ "def", "runHardThrsd", "(", "self", ",", "sess", ")", ":", "self", ".", "thrsdParams", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "numMatrices", "[", "0", "]", ")", ":", "self", ".", "thrsdParams", ".", "append", "(", "utils", ".", "hardThreshold", "(", "self", ".", "FastParams", "[", "i", "]", ".", "eval", "(", ")", ",", "self", ".", "sW", ")", ")", "for", "i", "in", "range", "(", "self", ".", "numMatrices", "[", "0", "]", ",", "self", ".", "totalMatrices", ")", ":", "self", ".", "thrsdParams", ".", "append", "(", "utils", ".", "hardThreshold", "(", "self", ".", "FastParams", "[", "i", "]", ".", "eval", "(", ")", ",", "self", ".", "sU", ")", ")", "fd_thrsd", "=", "{", "}", "for", "i", "in", "range", "(", "0", ",", "self", ".", "totalMatrices", ")", ":", "fd_thrsd", "[", "self", ".", "paramPlaceholders", "[", "i", "]", "]", "=", "self", ".", "thrsdParams", "[", "i", "]", "sess", ".", "run", "(", "self", ".", "hardThresholdGroup", ",", "feed_dict", "=", "fd_thrsd", ")" ]
https://github.com/microsoft/EdgeML/blob/ef9f8a77f096acbdeb941014791f8eda1c1bc35b/tf/edgeml_tf/trainer/fastTrainer.py#L165-L180
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
Framework/PythonInterface/mantid/fitfunctions.py
python
FunctionWrapper.free
(self, name)
Free a parameter from tie or constraint :param name: name of parameter to be freed
Free a parameter from tie or constraint
[ "Free", "a", "parameter", "from", "tie", "or", "constraint" ]
def free(self, name): """ Free a parameter from tie or constraint :param name: name of parameter to be freed """ self.fun.removeTie(name) self.fun.removeConstraint(name)
[ "def", "free", "(", "self", ",", "name", ")", ":", "self", ".", "fun", ".", "removeTie", "(", "name", ")", "self", ".", "fun", ".", "removeConstraint", "(", "name", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/mantid/fitfunctions.py#L336-L343
giuspen/cherrytree
84712f206478fcf9acf30174009ad28c648c6344
pygtk2/modules/imports.py
python
HTMLCheck.handle_endtag
(self, tag)
Encountered the end of a tag
Encountered the end of a tag
[ "Encountered", "the", "end", "of", "a", "tag" ]
def handle_endtag(self, tag): """Encountered the end of a tag""" if tag == "head" and self.steps == 4: self.steps = 5 elif tag == "body" and self.steps == 6: self.steps = 7 if tag == "html" and self.steps == 7: self.steps = 8
[ "def", "handle_endtag", "(", "self", ",", "tag", ")", ":", "if", "tag", "==", "\"head\"", "and", "self", ".", "steps", "==", "4", ":", "self", ".", "steps", "=", "5", "elif", "tag", "==", "\"body\"", "and", "self", ".", "steps", "==", "6", ":", "self", ".", "steps", "=", "7", "if", "tag", "==", "\"html\"", "and", "self", ".", "steps", "==", "7", ":", "self", ".", "steps", "=", "8" ]
https://github.com/giuspen/cherrytree/blob/84712f206478fcf9acf30174009ad28c648c6344/pygtk2/modules/imports.py#L2931-L2935
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/asyncio/base_events.py
python
BaseEventLoop._process_events
(self, event_list)
Process selector events.
Process selector events.
[ "Process", "selector", "events", "." ]
def _process_events(self, event_list): """Process selector events.""" raise NotImplementedError
[ "def", "_process_events", "(", "self", ",", "event_list", ")", ":", "raise", "NotImplementedError" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/asyncio/base_events.py#L473-L475
commaai/openpilot
4416c21b1e738ab7d04147c5ae52b5135e0cdb40
pyextra/acados_template/acados_ocp.py
python
AcadosOcpConstraints.Jbu
(self)
return self.__idxbu
:math:`J_{bu}` - matrix coefficient for bounds on u at shooting nodes (0 to N-1). Translated internally to :py:attr:`idxbu`.
:math:`J_{bu}` - matrix coefficient for bounds on u at shooting nodes (0 to N-1). Translated internally to :py:attr:`idxbu`.
[ ":", "math", ":", "J_", "{", "bu", "}", "-", "matrix", "coefficient", "for", "bounds", "on", "u", "at", "shooting", "nodes", "(", "0", "to", "N", "-", "1", ")", ".", "Translated", "internally", "to", ":", "py", ":", "attr", ":", "idxbu", "." ]
def Jbu(self): """:math:`J_{bu}` - matrix coefficient for bounds on u at shooting nodes (0 to N-1). Translated internally to :py:attr:`idxbu`. """ print_J_to_idx_note() return self.__idxbu
[ "def", "Jbu", "(", "self", ")", ":", "print_J_to_idx_note", "(", ")", "return", "self", ".", "__idxbu" ]
https://github.com/commaai/openpilot/blob/4416c21b1e738ab7d04147c5ae52b5135e0cdb40/pyextra/acados_template/acados_ocp.py#L1175-L1180
yuxng/PoseCNN
9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04
lib/datasets/rgbd_scene.py
python
rgbd_scene.image_path_from_index
(self, index)
return image_path
Construct an image path from the image's "index" identifier.
Construct an image path from the image's "index" identifier.
[ "Construct", "an", "image", "path", "from", "the", "image", "s", "index", "identifier", "." ]
def image_path_from_index(self, index): """ Construct an image path from the image's "index" identifier. """ image_path = os.path.join(self._data_path, index + '-color' + self._image_ext) assert os.path.exists(image_path), \ 'Path does not exist: {}'.format(image_path) return image_path
[ "def", "image_path_from_index", "(", "self", ",", "index", ")", ":", "image_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_data_path", ",", "index", "+", "'-color'", "+", "self", ".", "_image_ext", ")", "assert", "os", ".", "path", ".", "exists", "(", "image_path", ")", ",", "'Path does not exist: {}'", ".", "format", "(", "image_path", ")", "return", "image_path" ]
https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/datasets/rgbd_scene.py#L39-L47
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/util.py
python
change_root
(new_root, pathname)
Return 'pathname' with 'new_root' prepended. If 'pathname' is relative, this is equivalent to "os.path.join(new_root,pathname)". Otherwise, it requires making 'pathname' relative and then joining the two, which is tricky on DOS/Windows and Mac OS.
Return 'pathname' with 'new_root' prepended. If 'pathname' is relative, this is equivalent to "os.path.join(new_root,pathname)". Otherwise, it requires making 'pathname' relative and then joining the two, which is tricky on DOS/Windows and Mac OS.
[ "Return", "pathname", "with", "new_root", "prepended", ".", "If", "pathname", "is", "relative", "this", "is", "equivalent", "to", "os", ".", "path", ".", "join", "(", "new_root", "pathname", ")", ".", "Otherwise", "it", "requires", "making", "pathname", "relative", "and", "then", "joining", "the", "two", "which", "is", "tricky", "on", "DOS", "/", "Windows", "and", "Mac", "OS", "." ]
def change_root (new_root, pathname): """Return 'pathname' with 'new_root' prepended. If 'pathname' is relative, this is equivalent to "os.path.join(new_root,pathname)". Otherwise, it requires making 'pathname' relative and then joining the two, which is tricky on DOS/Windows and Mac OS. """ if os.name == 'posix': if not os.path.isabs(pathname): return os.path.join(new_root, pathname) else: return os.path.join(new_root, pathname[1:]) elif os.name == 'nt': (drive, path) = os.path.splitdrive(pathname) if path[0] == '\\': path = path[1:] return os.path.join(new_root, path) else: raise DistutilsPlatformError("nothing known about platform '%s'" % os.name)
[ "def", "change_root", "(", "new_root", ",", "pathname", ")", ":", "if", "os", ".", "name", "==", "'posix'", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "pathname", ")", ":", "return", "os", ".", "path", ".", "join", "(", "new_root", ",", "pathname", ")", "else", ":", "return", "os", ".", "path", ".", "join", "(", "new_root", ",", "pathname", "[", "1", ":", "]", ")", "elif", "os", ".", "name", "==", "'nt'", ":", "(", "drive", ",", "path", ")", "=", "os", ".", "path", ".", "splitdrive", "(", "pathname", ")", "if", "path", "[", "0", "]", "==", "'\\\\'", ":", "path", "=", "path", "[", "1", ":", "]", "return", "os", ".", "path", ".", "join", "(", "new_root", ",", "path", ")", "else", ":", "raise", "DistutilsPlatformError", "(", "\"nothing known about platform '%s'\"", "%", "os", ".", "name", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/util.py#L124-L143
ZintrulCre/LeetCode_Archiver
de23e16ead29336b5ee7aa1898a392a5d6463d27
LeetCode/python3/209.py
python
Solution.minSubArrayLen
(self, s, nums)
return min_len if i != 0 else 0
:type s: int :type nums: List[int] :rtype: int
:type s: int :type nums: List[int] :rtype: int
[ ":", "type", "s", ":", "int", ":", "type", "nums", ":", "List", "[", "int", "]", ":", "rtype", ":", "int" ]
def minSubArrayLen(self, s, nums): """ :type s: int :type nums: List[int] :rtype: int """ i, j, size, sum, min_len = 0, 0, len(nums), 0, len(nums) if size == 0: return 0 while j < size: sum += nums[j] while sum >= s: sum -= nums[i] i += 1 min_len = min(min_len, j - i + 2) j += 1 return min_len if i != 0 else 0
[ "def", "minSubArrayLen", "(", "self", ",", "s", ",", "nums", ")", ":", "i", ",", "j", ",", "size", ",", "sum", ",", "min_len", "=", "0", ",", "0", ",", "len", "(", "nums", ")", ",", "0", ",", "len", "(", "nums", ")", "if", "size", "==", "0", ":", "return", "0", "while", "j", "<", "size", ":", "sum", "+=", "nums", "[", "j", "]", "while", "sum", ">=", "s", ":", "sum", "-=", "nums", "[", "i", "]", "i", "+=", "1", "min_len", "=", "min", "(", "min_len", ",", "j", "-", "i", "+", "2", ")", "j", "+=", "1", "return", "min_len", "if", "i", "!=", "0", "else", "0" ]
https://github.com/ZintrulCre/LeetCode_Archiver/blob/de23e16ead29336b5ee7aa1898a392a5d6463d27/LeetCode/python3/209.py#L2-L18
DGA-MI-SSI/YaCo
9b85e6ca1809114c4df1382c11255f7e38408912
deps/libxml2-2.7.8/python/libxml.py
python
xmlTextReaderCore.SetErrorHandler
(self,f,arg)
Register an error handler that will be called back as f(arg,msg,severity,locator).
Register an error handler that will be called back as f(arg,msg,severity,locator).
[ "Register", "an", "error", "handler", "that", "will", "be", "called", "back", "as", "f", "(", "arg", "msg", "severity", "locator", ")", "." ]
def SetErrorHandler(self,f,arg): """Register an error handler that will be called back as f(arg,msg,severity,locator).""" if f is None: libxml2mod.xmlTextReaderSetErrorHandler(\ self._o,None,None) else: libxml2mod.xmlTextReaderSetErrorHandler(\ self._o,_xmlTextReaderErrorFunc,(f,arg))
[ "def", "SetErrorHandler", "(", "self", ",", "f", ",", "arg", ")", ":", "if", "f", "is", "None", ":", "libxml2mod", ".", "xmlTextReaderSetErrorHandler", "(", "self", ".", "_o", ",", "None", ",", "None", ")", "else", ":", "libxml2mod", ".", "xmlTextReaderSetErrorHandler", "(", "self", ".", "_o", ",", "_xmlTextReaderErrorFunc", ",", "(", "f", ",", "arg", ")", ")" ]
https://github.com/DGA-MI-SSI/YaCo/blob/9b85e6ca1809114c4df1382c11255f7e38408912/deps/libxml2-2.7.8/python/libxml.py#L701-L709
freeorion/freeorion
c266a40eccd3a99a17de8fe57c36ef6ba3771665
default/python/AI/ResearchAI.py
python
get_completed_techs
()
return [tech for tech in fo.techs() if tech_is_complete(tech)]
Get completed and available for use techs.
Get completed and available for use techs.
[ "Get", "completed", "and", "available", "for", "use", "techs", "." ]
def get_completed_techs(): """Get completed and available for use techs.""" return [tech for tech in fo.techs() if tech_is_complete(tech)]
[ "def", "get_completed_techs", "(", ")", ":", "return", "[", "tech", "for", "tech", "in", "fo", ".", "techs", "(", ")", "if", "tech_is_complete", "(", "tech", ")", "]" ]
https://github.com/freeorion/freeorion/blob/c266a40eccd3a99a17de8fe57c36ef6ba3771665/default/python/AI/ResearchAI.py#L687-L689
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/index/collector.py
python
_get_encoding_from_headers
(headers)
return None
Determine if we have any encoding information in our headers.
Determine if we have any encoding information in our headers.
[ "Determine", "if", "we", "have", "any", "encoding", "information", "in", "our", "headers", "." ]
def _get_encoding_from_headers(headers): # type: (ResponseHeaders) -> Optional[str] """Determine if we have any encoding information in our headers. """ if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: return params['charset'] return None
[ "def", "_get_encoding_from_headers", "(", "headers", ")", ":", "# type: (ResponseHeaders) -> Optional[str]", "if", "headers", "and", "\"Content-Type\"", "in", "headers", ":", "content_type", ",", "params", "=", "cgi", ".", "parse_header", "(", "headers", "[", "\"Content-Type\"", "]", ")", "if", "\"charset\"", "in", "params", ":", "return", "params", "[", "'charset'", "]", "return", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_internal/index/collector.py#L158-L166
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/profiler/parser/container.py
python
HWTSContainer.duration
(self)
return self._duration
Get the duration of the operator execution.
Get the duration of the operator execution.
[ "Get", "the", "duration", "of", "the", "operator", "execution", "." ]
def duration(self): """Get the duration of the operator execution.""" return self._duration
[ "def", "duration", "(", "self", ")", ":", "return", "self", ".", "_duration" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/profiler/parser/container.py#L66-L68
peterljq/OpenMMD
795d4dd660cf7e537ceb599fdb038c5388b33390
VMD 3D Pose Baseline Multi-Objects/packages/lifting/utils/prob_model.py
python
Prob3dPose.create_rec
(self, w2, weights, res_weight=1)
return rec
Reconstruct 3D pose given a 2D pose
Reconstruct 3D pose given a 2D pose
[ "Reconstruct", "3D", "pose", "given", "a", "2D", "pose" ]
def create_rec(self, w2, weights, res_weight=1): """Reconstruct 3D pose given a 2D pose""" _SIGMA_SCALING = 5.2 res, e, a, r, scale = self.affine_estimate( w2, scale=_SIGMA_SCALING, weights=weights, depth_reg=0, cap_scale=-0.001, scale_mean=-0.003 ) remaining_dims = 3 * w2.shape[2] - e.shape[1] assert (remaining_dims >= 0) llambda = -np.log(self.sigma) lgdet = np.sum(llambda[:, :-1], 1) + llambda[:, -1] * remaining_dims score = (res * res_weight + lgdet[:, np.newaxis] * (scale ** 2)) best = np.argmin(score, 0) index = np.arange(best.shape[0]) a2 = a[best, index] r2 = r[best, :, index].T rec = Prob3dPose.build_and_rot_model(a2, e[best], self.mu[best], r2) rec *= -np.abs(scale[best, index])[:, np.newaxis, np.newaxis] rec = self.better_rec(w2, rec, 1, 1.55 * weights, 1) * -1 rec = Prob3dPose.renorm_gt(rec) rec *= 0.97 return rec
[ "def", "create_rec", "(", "self", ",", "w2", ",", "weights", ",", "res_weight", "=", "1", ")", ":", "_SIGMA_SCALING", "=", "5.2", "res", ",", "e", ",", "a", ",", "r", ",", "scale", "=", "self", ".", "affine_estimate", "(", "w2", ",", "scale", "=", "_SIGMA_SCALING", ",", "weights", "=", "weights", ",", "depth_reg", "=", "0", ",", "cap_scale", "=", "-", "0.001", ",", "scale_mean", "=", "-", "0.003", ")", "remaining_dims", "=", "3", "*", "w2", ".", "shape", "[", "2", "]", "-", "e", ".", "shape", "[", "1", "]", "assert", "(", "remaining_dims", ">=", "0", ")", "llambda", "=", "-", "np", ".", "log", "(", "self", ".", "sigma", ")", "lgdet", "=", "np", ".", "sum", "(", "llambda", "[", ":", ",", ":", "-", "1", "]", ",", "1", ")", "+", "llambda", "[", ":", ",", "-", "1", "]", "*", "remaining_dims", "score", "=", "(", "res", "*", "res_weight", "+", "lgdet", "[", ":", ",", "np", ".", "newaxis", "]", "*", "(", "scale", "**", "2", ")", ")", "best", "=", "np", ".", "argmin", "(", "score", ",", "0", ")", "index", "=", "np", ".", "arange", "(", "best", ".", "shape", "[", "0", "]", ")", "a2", "=", "a", "[", "best", ",", "index", "]", "r2", "=", "r", "[", "best", ",", ":", ",", "index", "]", ".", "T", "rec", "=", "Prob3dPose", ".", "build_and_rot_model", "(", "a2", ",", "e", "[", "best", "]", ",", "self", ".", "mu", "[", "best", "]", ",", "r2", ")", "rec", "*=", "-", "np", ".", "abs", "(", "scale", "[", "best", ",", "index", "]", ")", "[", ":", ",", "np", ".", "newaxis", ",", "np", ".", "newaxis", "]", "rec", "=", "self", ".", "better_rec", "(", "w2", ",", "rec", ",", "1", ",", "1.55", "*", "weights", ",", "1", ")", "*", "-", "1", "rec", "=", "Prob3dPose", ".", "renorm_gt", "(", "rec", ")", "rec", "*=", "0.97", "return", "rec" ]
https://github.com/peterljq/OpenMMD/blob/795d4dd660cf7e537ceb599fdb038c5388b33390/VMD 3D Pose Baseline Multi-Objects/packages/lifting/utils/prob_model.py#L217-L241
apache/incubator-mxnet
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
python/mxnet/numpy/random.py
python
randn
(*size, **kwargs)
return _mx_nd_np.random.normal(0, 1, size=output_shape, **kwargs)
r"""Return a sample (or samples) from the "standard normal" distribution. If positive, int_like or int-convertible arguments are provided, `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled with random floats sampled from a univariate "normal" (Gaussian) distribution of mean 0 and variance 1 (if any of the :math:`d_i` are floats, they are first converted to integers by truncation). A single float randomly sampled from the distribution is returned if no argument is provided. This is a convenience function. If you want an interface that takes a tuple as the first argument, use `numpy.random.standard_normal` instead. Parameters ---------- d0, d1, ..., dn : int, optional The dimensions of the returned array, should be all positive. If no argument is given a single Python float is returned. Returns ------- Z : ndarray A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from the standard normal distribution, or a single such float if no parameters were supplied. Notes ----- For random samples from :math:`N(\mu, \sigma^2)`, use: ``sigma * np.random.randn(...) + mu`` Examples -------- >>> np.random.randn() 2.1923875335537315 #random Two-by-four array of samples from N(3, 6.25): >>> 2.5 * np.random.randn(2, 4) + 3 array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
r"""Return a sample (or samples) from the "standard normal" distribution. If positive, int_like or int-convertible arguments are provided, `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled with random floats sampled from a univariate "normal" (Gaussian) distribution of mean 0 and variance 1 (if any of the :math:`d_i` are floats, they are first converted to integers by truncation). A single float randomly sampled from the distribution is returned if no argument is provided. This is a convenience function. If you want an interface that takes a tuple as the first argument, use `numpy.random.standard_normal` instead. Parameters ---------- d0, d1, ..., dn : int, optional The dimensions of the returned array, should be all positive. If no argument is given a single Python float is returned. Returns ------- Z : ndarray A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from the standard normal distribution, or a single such float if no parameters were supplied. Notes ----- For random samples from :math:`N(\mu, \sigma^2)`, use: ``sigma * np.random.randn(...) + mu`` Examples -------- >>> np.random.randn() 2.1923875335537315 #random Two-by-four array of samples from N(3, 6.25): >>> 2.5 * np.random.randn(2, 4) + 3 array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
[ "r", "Return", "a", "sample", "(", "or", "samples", ")", "from", "the", "standard", "normal", "distribution", ".", "If", "positive", "int_like", "or", "int", "-", "convertible", "arguments", "are", "provided", "randn", "generates", "an", "array", "of", "shape", "(", "d0", "d1", "...", "dn", ")", "filled", "with", "random", "floats", "sampled", "from", "a", "univariate", "normal", "(", "Gaussian", ")", "distribution", "of", "mean", "0", "and", "variance", "1", "(", "if", "any", "of", "the", ":", "math", ":", "d_i", "are", "floats", "they", "are", "first", "converted", "to", "integers", "by", "truncation", ")", ".", "A", "single", "float", "randomly", "sampled", "from", "the", "distribution", "is", "returned", "if", "no", "argument", "is", "provided", ".", "This", "is", "a", "convenience", "function", ".", "If", "you", "want", "an", "interface", "that", "takes", "a", "tuple", "as", "the", "first", "argument", "use", "numpy", ".", "random", ".", "standard_normal", "instead", ".", "Parameters", "----------", "d0", "d1", "...", "dn", ":", "int", "optional", "The", "dimensions", "of", "the", "returned", "array", "should", "be", "all", "positive", ".", "If", "no", "argument", "is", "given", "a", "single", "Python", "float", "is", "returned", ".", "Returns", "-------", "Z", ":", "ndarray", "A", "(", "d0", "d1", "...", "dn", ")", "-", "shaped", "array", "of", "floating", "-", "point", "samples", "from", "the", "standard", "normal", "distribution", "or", "a", "single", "such", "float", "if", "no", "parameters", "were", "supplied", ".", "Notes", "-----", "For", "random", "samples", "from", ":", "math", ":", "N", "(", "\\", "mu", "\\", "sigma^2", ")", "use", ":", "sigma", "*", "np", ".", "random", ".", "randn", "(", "...", ")", "+", "mu", "Examples", "--------", ">>>", "np", ".", "random", ".", "randn", "()", "2", ".", "1923875335537315", "#random", "Two", "-", "by", "-", "four", "array", "of", "samples", "from", "N", "(", "3", "6", ".", "25", ")", ":", ">>>", "2", ".", "5", "*", "np", ".", "random", ".", "randn", "(", "2", "4", ")", "+", "3", "array", "(", "[[", "-", "4", ".", "49401501", "4", ".", "00950034", "-", "1", ".", "81814867", "7", ".", "29718677", "]", "#random", "[", "0", ".", "39924804", "4", ".", "68456316", "4", ".", "99394529", "4", ".", "84057254", "]]", ")", "#random" ]
def randn(*size, **kwargs): r"""Return a sample (or samples) from the "standard normal" distribution. If positive, int_like or int-convertible arguments are provided, `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled with random floats sampled from a univariate "normal" (Gaussian) distribution of mean 0 and variance 1 (if any of the :math:`d_i` are floats, they are first converted to integers by truncation). A single float randomly sampled from the distribution is returned if no argument is provided. This is a convenience function. If you want an interface that takes a tuple as the first argument, use `numpy.random.standard_normal` instead. Parameters ---------- d0, d1, ..., dn : int, optional The dimensions of the returned array, should be all positive. If no argument is given a single Python float is returned. Returns ------- Z : ndarray A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from the standard normal distribution, or a single such float if no parameters were supplied. Notes ----- For random samples from :math:`N(\mu, \sigma^2)`, use: ``sigma * np.random.randn(...) + mu`` Examples -------- >>> np.random.randn() 2.1923875335537315 #random Two-by-four array of samples from N(3, 6.25): >>> 2.5 * np.random.randn(2, 4) + 3 array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random """ output_shape = () for s in size: output_shape += (s,) return _mx_nd_np.random.normal(0, 1, size=output_shape, **kwargs)
[ "def", "randn", "(", "*", "size", ",", "*", "*", "kwargs", ")", ":", "output_shape", "=", "(", ")", "for", "s", "in", "size", ":", "output_shape", "+=", "(", "s", ",", ")", "return", "_mx_nd_np", ".", "random", ".", "normal", "(", "0", ",", "1", ",", "size", "=", "output_shape", ",", "*", "*", "kwargs", ")" ]
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/numpy/random.py#L1016-L1054
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
MoveEvent.SetPosition
(*args, **kwargs)
return _core_.MoveEvent_SetPosition(*args, **kwargs)
SetPosition(self, Point pos)
SetPosition(self, Point pos)
[ "SetPosition", "(", "self", "Point", "pos", ")" ]
def SetPosition(*args, **kwargs): """SetPosition(self, Point pos)""" return _core_.MoveEvent_SetPosition(*args, **kwargs)
[ "def", "SetPosition", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "MoveEvent_SetPosition", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L6205-L6207
tfwu/FaceDetection-ConvNet-3D
f9251c48eb40c5aec8fba7455115c355466555be
python/mxnet/misc.py
python
FactorScheduler.__call__
(self, iteration)
return lr
Call to schedule current learning rate Parameters ---------- iteration: int Current iteration count
Call to schedule current learning rate
[ "Call", "to", "schedule", "current", "learning", "rate" ]
def __call__(self, iteration): """ Call to schedule current learning rate Parameters ---------- iteration: int Current iteration count """ if self.init == False: self.init = True self.old_lr = self.base_lr lr = self.base_lr * math.pow(self.factor, int(iteration / self.step)) if lr != self.old_lr: self.old_lr = lr logging.info("At Iteration [%d]: Swith to new learning rate %.5f", iteration, lr) return lr
[ "def", "__call__", "(", "self", ",", "iteration", ")", ":", "if", "self", ".", "init", "==", "False", ":", "self", ".", "init", "=", "True", "self", ".", "old_lr", "=", "self", ".", "base_lr", "lr", "=", "self", ".", "base_lr", "*", "math", ".", "pow", "(", "self", ".", "factor", ",", "int", "(", "iteration", "/", "self", ".", "step", ")", ")", "if", "lr", "!=", "self", ".", "old_lr", ":", "self", ".", "old_lr", "=", "lr", "logging", ".", "info", "(", "\"At Iteration [%d]: Swith to new learning rate %.5f\"", ",", "iteration", ",", "lr", ")", "return", "lr" ]
https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/mxnet/misc.py#L45-L63
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/bisect-builds.py
python
PathContext.GetRevList
(self)
return revlist
Gets the list of revision numbers between self.good_revision and self.bad_revision.
Gets the list of revision numbers between self.good_revision and self.bad_revision.
[ "Gets", "the", "list", "of", "revision", "numbers", "between", "self", ".", "good_revision", "and", "self", ".", "bad_revision", "." ]
def GetRevList(self): """Gets the list of revision numbers between self.good_revision and self.bad_revision.""" cache = {} # The cache is stored in the same directory as bisect-builds.py cache_filename = os.path.join( os.path.abspath(os.path.dirname(__file__)), '.bisect-builds-cache.json') cache_dict_key = self.GetListingURL() def _LoadBucketFromCache(): if self.use_local_cache: try: with open(cache_filename) as cache_file: for (key, value) in json.load(cache_file).items(): cache[key] = value revisions = cache.get(cache_dict_key, []) githash_svn_dict = cache.get('githash_svn_dict', {}) if revisions: print 'Loaded revisions %d-%d from %s' % (revisions[0], revisions[-1], cache_filename) return (revisions, githash_svn_dict) except (EnvironmentError, ValueError): pass return ([], {}) def _SaveBucketToCache(): """Save the list of revisions and the git-svn mappings to a file. The list of revisions is assumed to be sorted.""" if self.use_local_cache: cache[cache_dict_key] = revlist_all cache['githash_svn_dict'] = self.githash_svn_dict try: with open(cache_filename, 'w') as cache_file: json.dump(cache, cache_file) print 'Saved revisions %d-%d to %s' % ( revlist_all[0], revlist_all[-1], cache_filename) except EnvironmentError: pass # Download the revlist and filter for just the range between good and bad. minrev = min(self.good_revision, self.bad_revision) maxrev = max(self.good_revision, self.bad_revision) (revlist_all, self.githash_svn_dict) = _LoadBucketFromCache() last_known_rev = revlist_all[-1] if revlist_all else 0 if last_known_rev < maxrev: revlist_all.extend(map(int, self.ParseDirectoryIndex(last_known_rev))) revlist_all = list(set(revlist_all)) revlist_all.sort() _SaveBucketToCache() revlist = [x for x in revlist_all if x >= int(minrev) and x <= int(maxrev)] # Set good and bad revisions to be legit revisions. if revlist: if self.good_revision < self.bad_revision: self.good_revision = revlist[0] self.bad_revision = revlist[-1] else: self.bad_revision = revlist[0] self.good_revision = revlist[-1] # Fix chromium rev so that the deps blink revision matches REVISIONS file. if self.base_url == WEBKIT_BASE_URL: revlist_all.sort() self.good_revision = FixChromiumRevForBlink(revlist, revlist_all, self, self.good_revision) self.bad_revision = FixChromiumRevForBlink(revlist, revlist_all, self, self.bad_revision) return revlist
[ "def", "GetRevList", "(", "self", ")", ":", "cache", "=", "{", "}", "# The cache is stored in the same directory as bisect-builds.py", "cache_filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", ",", "'.bisect-builds-cache.json'", ")", "cache_dict_key", "=", "self", ".", "GetListingURL", "(", ")", "def", "_LoadBucketFromCache", "(", ")", ":", "if", "self", ".", "use_local_cache", ":", "try", ":", "with", "open", "(", "cache_filename", ")", "as", "cache_file", ":", "for", "(", "key", ",", "value", ")", "in", "json", ".", "load", "(", "cache_file", ")", ".", "items", "(", ")", ":", "cache", "[", "key", "]", "=", "value", "revisions", "=", "cache", ".", "get", "(", "cache_dict_key", ",", "[", "]", ")", "githash_svn_dict", "=", "cache", ".", "get", "(", "'githash_svn_dict'", ",", "{", "}", ")", "if", "revisions", ":", "print", "'Loaded revisions %d-%d from %s'", "%", "(", "revisions", "[", "0", "]", ",", "revisions", "[", "-", "1", "]", ",", "cache_filename", ")", "return", "(", "revisions", ",", "githash_svn_dict", ")", "except", "(", "EnvironmentError", ",", "ValueError", ")", ":", "pass", "return", "(", "[", "]", ",", "{", "}", ")", "def", "_SaveBucketToCache", "(", ")", ":", "\"\"\"Save the list of revisions and the git-svn mappings to a file.\n The list of revisions is assumed to be sorted.\"\"\"", "if", "self", ".", "use_local_cache", ":", "cache", "[", "cache_dict_key", "]", "=", "revlist_all", "cache", "[", "'githash_svn_dict'", "]", "=", "self", ".", "githash_svn_dict", "try", ":", "with", "open", "(", "cache_filename", ",", "'w'", ")", "as", "cache_file", ":", "json", ".", "dump", "(", "cache", ",", "cache_file", ")", "print", "'Saved revisions %d-%d to %s'", "%", "(", "revlist_all", "[", "0", "]", ",", "revlist_all", "[", "-", "1", "]", ",", "cache_filename", ")", "except", "EnvironmentError", ":", "pass", "# Download the revlist and filter for just the range between good and bad.", "minrev", "=", "min", "(", "self", ".", "good_revision", ",", "self", ".", "bad_revision", ")", "maxrev", "=", "max", "(", "self", ".", "good_revision", ",", "self", ".", "bad_revision", ")", "(", "revlist_all", ",", "self", ".", "githash_svn_dict", ")", "=", "_LoadBucketFromCache", "(", ")", "last_known_rev", "=", "revlist_all", "[", "-", "1", "]", "if", "revlist_all", "else", "0", "if", "last_known_rev", "<", "maxrev", ":", "revlist_all", ".", "extend", "(", "map", "(", "int", ",", "self", ".", "ParseDirectoryIndex", "(", "last_known_rev", ")", ")", ")", "revlist_all", "=", "list", "(", "set", "(", "revlist_all", ")", ")", "revlist_all", ".", "sort", "(", ")", "_SaveBucketToCache", "(", ")", "revlist", "=", "[", "x", "for", "x", "in", "revlist_all", "if", "x", ">=", "int", "(", "minrev", ")", "and", "x", "<=", "int", "(", "maxrev", ")", "]", "# Set good and bad revisions to be legit revisions.", "if", "revlist", ":", "if", "self", ".", "good_revision", "<", "self", ".", "bad_revision", ":", "self", ".", "good_revision", "=", "revlist", "[", "0", "]", "self", ".", "bad_revision", "=", "revlist", "[", "-", "1", "]", "else", ":", "self", ".", "bad_revision", "=", "revlist", "[", "0", "]", "self", ".", "good_revision", "=", "revlist", "[", "-", "1", "]", "# Fix chromium rev so that the deps blink revision matches REVISIONS file.", "if", "self", ".", "base_url", "==", "WEBKIT_BASE_URL", ":", "revlist_all", ".", "sort", "(", ")", "self", ".", "good_revision", "=", "FixChromiumRevForBlink", "(", "revlist", ",", "revlist_all", ",", "self", ",", "self", ".", "good_revision", ")", "self", ".", "bad_revision", "=", "FixChromiumRevForBlink", "(", "revlist", ",", "revlist_all", ",", "self", ",", "self", ".", "bad_revision", ")", "return", "revlist" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/bisect-builds.py#L376-L451
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/lib2to3/fixer_util.py
python
is_list
(node)
return (isinstance(node, Node) and len(node.children) > 1 and isinstance(node.children[0], Leaf) and isinstance(node.children[-1], Leaf) and node.children[0].value == "[" and node.children[-1].value == "]")
Does the node represent a list literal?
Does the node represent a list literal?
[ "Does", "the", "node", "represent", "a", "list", "literal?" ]
def is_list(node): """Does the node represent a list literal?""" return (isinstance(node, Node) and len(node.children) > 1 and isinstance(node.children[0], Leaf) and isinstance(node.children[-1], Leaf) and node.children[0].value == "[" and node.children[-1].value == "]")
[ "def", "is_list", "(", "node", ")", ":", "return", "(", "isinstance", "(", "node", ",", "Node", ")", "and", "len", "(", "node", ".", "children", ")", ">", "1", "and", "isinstance", "(", "node", ".", "children", "[", "0", "]", ",", "Leaf", ")", "and", "isinstance", "(", "node", ".", "children", "[", "-", "1", "]", ",", "Leaf", ")", "and", "node", ".", "children", "[", "0", "]", ".", "value", "==", "\"[\"", "and", "node", ".", "children", "[", "-", "1", "]", ".", "value", "==", "\"]\"", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/lib2to3/fixer_util.py#L170-L177
xhzdeng/crpn
a5aef0f80dbe486103123f740c634fb01e6cc9a1
lib/datasets/pascal_voc.py
python
pascal_voc.selective_search_roidb
(self)
return roidb
Return the database of selective search regions of interest. Ground-truth ROIs are also included. This function loads/saves from/to a cache file to speed up future calls.
Return the database of selective search regions of interest. Ground-truth ROIs are also included.
[ "Return", "the", "database", "of", "selective", "search", "regions", "of", "interest", ".", "Ground", "-", "truth", "ROIs", "are", "also", "included", "." ]
def selective_search_roidb(self): """ Return the database of selective search regions of interest. Ground-truth ROIs are also included. This function loads/saves from/to a cache file to speed up future calls. """ cache_file = os.path.join(self.cache_path, self.name + '_selective_search_roidb.pkl') if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = cPickle.load(fid) print '{} ss roidb loaded from {}'.format(self.name, cache_file) return roidb if int(self._year) == 2007 or self._image_set != 'test': gt_roidb = self.gt_roidb() ss_roidb = self._load_selective_search_roidb(gt_roidb) roidb = imdb.merge_roidbs(gt_roidb, ss_roidb) else: roidb = self._load_selective_search_roidb(None) with open(cache_file, 'wb') as fid: cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL) print 'wrote ss roidb to {}'.format(cache_file) return roidb
[ "def", "selective_search_roidb", "(", "self", ")", ":", "cache_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "cache_path", ",", "self", ".", "name", "+", "'_selective_search_roidb.pkl'", ")", "if", "os", ".", "path", ".", "exists", "(", "cache_file", ")", ":", "with", "open", "(", "cache_file", ",", "'rb'", ")", "as", "fid", ":", "roidb", "=", "cPickle", ".", "load", "(", "fid", ")", "print", "'{} ss roidb loaded from {}'", ".", "format", "(", "self", ".", "name", ",", "cache_file", ")", "return", "roidb", "if", "int", "(", "self", ".", "_year", ")", "==", "2007", "or", "self", ".", "_image_set", "!=", "'test'", ":", "gt_roidb", "=", "self", ".", "gt_roidb", "(", ")", "ss_roidb", "=", "self", ".", "_load_selective_search_roidb", "(", "gt_roidb", ")", "roidb", "=", "imdb", ".", "merge_roidbs", "(", "gt_roidb", ",", "ss_roidb", ")", "else", ":", "roidb", "=", "self", ".", "_load_selective_search_roidb", "(", "None", ")", "with", "open", "(", "cache_file", ",", "'wb'", ")", "as", "fid", ":", "cPickle", ".", "dump", "(", "roidb", ",", "fid", ",", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "print", "'wrote ss roidb to {}'", ".", "format", "(", "cache_file", ")", "return", "roidb" ]
https://github.com/xhzdeng/crpn/blob/a5aef0f80dbe486103123f740c634fb01e6cc9a1/lib/datasets/pascal_voc.py#L110-L136
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
lts/tools/gyp/pylib/gyp/generator/msvs.py
python
_EscapeCppDefineForMSBuild
(s)
return s
Escapes a CPP define so that it will reach the compiler unaltered.
Escapes a CPP define so that it will reach the compiler unaltered.
[ "Escapes", "a", "CPP", "define", "so", "that", "it", "will", "reach", "the", "compiler", "unaltered", "." ]
def _EscapeCppDefineForMSBuild(s): """Escapes a CPP define so that it will reach the compiler unaltered.""" s = _EscapeEnvironmentVariableExpansion(s) s = _EscapeCommandLineArgumentForMSBuild(s) s = _EscapeMSBuildSpecialCharacters(s) # cl.exe replaces literal # characters with = in preprocesor definitions for # some reason. Octal-encode to work around that. s = s.replace('#', '\\%03o' % ord('#')) return s
[ "def", "_EscapeCppDefineForMSBuild", "(", "s", ")", ":", "s", "=", "_EscapeEnvironmentVariableExpansion", "(", "s", ")", "s", "=", "_EscapeCommandLineArgumentForMSBuild", "(", "s", ")", "s", "=", "_EscapeMSBuildSpecialCharacters", "(", "s", ")", "# cl.exe replaces literal # characters with = in preprocesor definitions for", "# some reason. Octal-encode to work around that.", "s", "=", "s", ".", "replace", "(", "'#'", ",", "'\\\\%03o'", "%", "ord", "(", "'#'", ")", ")", "return", "s" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/gyp/pylib/gyp/generator/msvs.py#L831-L839
jackaudio/jack2
21b293dbc37d42446141a08922cdec0d2550c6a0
waflib/TaskGen.py
python
task_gen.get_hook
(self, node)
Returns the ``@extension`` method to call for a Node of a particular extension. :param node: Input file to process :type node: :py:class:`waflib.Tools.Node.Node` :return: A method able to process the input node by looking at the extension :rtype: function
Returns the ``@extension`` method to call for a Node of a particular extension.
[ "Returns", "the", "@extension", "method", "to", "call", "for", "a", "Node", "of", "a", "particular", "extension", "." ]
def get_hook(self, node): """ Returns the ``@extension`` method to call for a Node of a particular extension. :param node: Input file to process :type node: :py:class:`waflib.Tools.Node.Node` :return: A method able to process the input node by looking at the extension :rtype: function """ name = node.name for k in self.mappings: try: if name.endswith(k): return self.mappings[k] except TypeError: # regexps objects if k.match(name): return self.mappings[k] keys = list(self.mappings.keys()) raise Errors.WafError("File %r has no mapping in %r (load a waf tool?)" % (node, keys))
[ "def", "get_hook", "(", "self", ",", "node", ")", ":", "name", "=", "node", ".", "name", "for", "k", "in", "self", ".", "mappings", ":", "try", ":", "if", "name", ".", "endswith", "(", "k", ")", ":", "return", "self", ".", "mappings", "[", "k", "]", "except", "TypeError", ":", "# regexps objects", "if", "k", ".", "match", "(", "name", ")", ":", "return", "self", ".", "mappings", "[", "k", "]", "keys", "=", "list", "(", "self", ".", "mappings", ".", "keys", "(", ")", ")", "raise", "Errors", ".", "WafError", "(", "\"File %r has no mapping in %r (load a waf tool?)\"", "%", "(", "node", ",", "keys", ")", ")" ]
https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/waflib/TaskGen.py#L244-L263
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/arrays/categorical.py
python
Categorical.set_ordered
(self, value, inplace=False)
Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to the value.
Set the ordered attribute to the boolean value.
[ "Set", "the", "ordered", "attribute", "to", "the", "boolean", "value", "." ]
def set_ordered(self, value, inplace=False): """ Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to the value. """ inplace = validate_bool_kwarg(inplace, "inplace") new_dtype = CategoricalDtype(self.categories, ordered=value) cat = self if inplace else self.copy() cat._dtype = new_dtype if not inplace: return cat
[ "def", "set_ordered", "(", "self", ",", "value", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "new_dtype", "=", "CategoricalDtype", "(", "self", ".", "categories", ",", "ordered", "=", "value", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "cat", ".", "_dtype", "=", "new_dtype", "if", "not", "inplace", ":", "return", "cat" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/arrays/categorical.py#L737-L754
RobotLocomotion/drake
0e18a34604c45ed65bc9018a54f7610f91cdad5b
examples/acrobot/acrobot_io.py
python
save_scenario
(*, scenario)
return yaml_dump(scrubbed)
Given a scenario, returns a yaml-formatted str for it.
Given a scenario, returns a yaml-formatted str for it.
[ "Given", "a", "scenario", "returns", "a", "yaml", "-", "formatted", "str", "for", "it", "." ]
def save_scenario(*, scenario): """Given a scenario, returns a yaml-formatted str for it. """ # For a known list of scenario-specific items, convert numpy arrays into # lists for serialization purposes. scrubbed = dict(scenario) for key in ["controller_params", "initial_state"]: if isinstance(scenario[key], dict): for subkey in ["min", "max"]: scrubbed[key][subkey] = [ float(x) for x in scenario[key][subkey] ] else: scrubbed[key] = [float(x) for x in scenario[key]] return yaml_dump(scrubbed)
[ "def", "save_scenario", "(", "*", ",", "scenario", ")", ":", "# For a known list of scenario-specific items, convert numpy arrays into", "# lists for serialization purposes.", "scrubbed", "=", "dict", "(", "scenario", ")", "for", "key", "in", "[", "\"controller_params\"", ",", "\"initial_state\"", "]", ":", "if", "isinstance", "(", "scenario", "[", "key", "]", ",", "dict", ")", ":", "for", "subkey", "in", "[", "\"min\"", ",", "\"max\"", "]", ":", "scrubbed", "[", "key", "]", "[", "subkey", "]", "=", "[", "float", "(", "x", ")", "for", "x", "in", "scenario", "[", "key", "]", "[", "subkey", "]", "]", "else", ":", "scrubbed", "[", "key", "]", "=", "[", "float", "(", "x", ")", "for", "x", "in", "scenario", "[", "key", "]", "]", "return", "yaml_dump", "(", "scrubbed", ")" ]
https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/examples/acrobot/acrobot_io.py#L13-L27
SpaceNetChallenge/BuildingDetectors
3def3c44b5847c744cd2f3356182892d92496579
qinhaifang/src/caffe-mnc/scripts/cpp_lint.py
python
ReverseCloseExpression
(clean_lines, linenum, pos)
return (line, 0, -1)
If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum.
If input points to ) or } or ] or >, finds the position that opens it.
[ "If", "input", "points", "to", ")", "or", "}", "or", "]", "or", ">", "finds", "the", "position", "that", "opens", "it", "." ]
def ReverseCloseExpression(clean_lines, linenum, pos): """If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] endchar = line[pos] if endchar not in ')}]>': return (line, 0, -1) if endchar == ')': startchar = '(' if endchar == ']': startchar = '[' if endchar == '}': startchar = '{' if endchar == '>': startchar = '<' # Check last line (start_pos, num_open) = FindStartOfExpressionInLine( line, pos, 0, startchar, endchar) if start_pos > -1: return (line, linenum, start_pos) # Continue scanning backward while linenum > 0: linenum -= 1 line = clean_lines.elided[linenum] (start_pos, num_open) = FindStartOfExpressionInLine( line, len(line) - 1, num_open, startchar, endchar) if start_pos > -1: return (line, linenum, start_pos) # Did not find startchar before beginning of file, give up return (line, 0, -1)
[ "def", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "pos", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "endchar", "=", "line", "[", "pos", "]", "if", "endchar", "not", "in", "')}]>'", ":", "return", "(", "line", ",", "0", ",", "-", "1", ")", "if", "endchar", "==", "')'", ":", "startchar", "=", "'('", "if", "endchar", "==", "']'", ":", "startchar", "=", "'['", "if", "endchar", "==", "'}'", ":", "startchar", "=", "'{'", "if", "endchar", "==", "'>'", ":", "startchar", "=", "'<'", "# Check last line", "(", "start_pos", ",", "num_open", ")", "=", "FindStartOfExpressionInLine", "(", "line", ",", "pos", ",", "0", ",", "startchar", ",", "endchar", ")", "if", "start_pos", ">", "-", "1", ":", "return", "(", "line", ",", "linenum", ",", "start_pos", ")", "# Continue scanning backward", "while", "linenum", ">", "0", ":", "linenum", "-=", "1", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "(", "start_pos", ",", "num_open", ")", "=", "FindStartOfExpressionInLine", "(", "line", ",", "len", "(", "line", ")", "-", "1", ",", "num_open", ",", "startchar", ",", "endchar", ")", "if", "start_pos", ">", "-", "1", ":", "return", "(", "line", ",", "linenum", ",", "start_pos", ")", "# Did not find startchar before beginning of file, give up", "return", "(", "line", ",", "0", ",", "-", "1", ")" ]
https://github.com/SpaceNetChallenge/BuildingDetectors/blob/3def3c44b5847c744cd2f3356182892d92496579/qinhaifang/src/caffe-mnc/scripts/cpp_lint.py#L1327-L1369
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mailbox.py
python
_ProxyFile.seek
(self, offset, whence=0)
Change position.
Change position.
[ "Change", "position", "." ]
def seek(self, offset, whence=0): """Change position.""" if whence == 1: self._file.seek(self._pos) self._file.seek(offset, whence) self._pos = self._file.tell()
[ "def", "seek", "(", "self", ",", "offset", ",", "whence", "=", "0", ")", ":", "if", "whence", "==", "1", ":", "self", ".", "_file", ".", "seek", "(", "self", ".", "_pos", ")", "self", ".", "_file", ".", "seek", "(", "offset", ",", "whence", ")", "self", ".", "_pos", "=", "self", ".", "_file", ".", "tell", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/mailbox.py#L1898-L1903
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/asyncio/coroutines.py
python
iscoroutine
(obj)
Return True if obj is a coroutine object.
Return True if obj is a coroutine object.
[ "Return", "True", "if", "obj", "is", "a", "coroutine", "object", "." ]
def iscoroutine(obj): """Return True if obj is a coroutine object.""" if type(obj) in _iscoroutine_typecache: return True if isinstance(obj, _COROUTINE_TYPES): # Just in case we don't want to cache more than 100 # positive types. That shouldn't ever happen, unless # someone stressing the system on purpose. if len(_iscoroutine_typecache) < 100: _iscoroutine_typecache.add(type(obj)) return True else: return False
[ "def", "iscoroutine", "(", "obj", ")", ":", "if", "type", "(", "obj", ")", "in", "_iscoroutine_typecache", ":", "return", "True", "if", "isinstance", "(", "obj", ",", "_COROUTINE_TYPES", ")", ":", "# Just in case we don't want to cache more than 100", "# positive types. That shouldn't ever happen, unless", "# someone stressing the system on purpose.", "if", "len", "(", "_iscoroutine_typecache", ")", "<", "100", ":", "_iscoroutine_typecache", ".", "add", "(", "type", "(", "obj", ")", ")", "return", "True", "else", ":", "return", "False" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/asyncio/coroutines.py#L173-L186
netket/netket
0d534e54ecbf25b677ea72af6b85947979420652
netket/graph/_lattice_edge_logic.py
python
get_custom_edges
( basis_vectors, extent, site_offsets, pbc, atol, custom_edges: Sequence[CustomEdgeT] )
return colored_edges
Generates the edges described in `custom_edges` for all unit cells. See the docstring of `Lattice.__init__` for the syntax of `custom_edges.
Generates the edges described in `custom_edges` for all unit cells.
[ "Generates", "the", "edges", "described", "in", "custom_edges", "for", "all", "unit", "cells", "." ]
def get_custom_edges( basis_vectors, extent, site_offsets, pbc, atol, custom_edges: Sequence[CustomEdgeT] ): """Generates the edges described in `custom_edges` for all unit cells. See the docstring of `Lattice.__init__` for the syntax of `custom_edges.""" if not all([len(desc) in (3, 4) for desc in custom_edges]): raise ValueError( dedent( """ custom_edges must be a list of tuples of length 3 or 4. Every tuple must contain two sublattice indices (integers), a distance vector and can optionally include an integer to represent the color of that edge. Check the docstring of `nk.graph.Lattice` for more informations. """ ) ) def translated_edges(sl1, sl2, distance, color): # get distance in terms of unit cells d_cell = (distance + site_offsets[sl1] - site_offsets[sl2]) @ np.linalg.inv( basis_vectors ) if not np.all(is_approx_int(d_cell, atol=atol)): # error out msg = f"{distance} is invalid distance vector between sublattices {sl1}->{sl2}" # see if the user flipped the vector accidentally d_cell = (distance + site_offsets[sl2] - site_offsets[sl1]) @ np.linalg.inv( basis_vectors ) if np.all(is_approx_int(d_cell, atol=atol)): msg += f" (but valid {sl2}->{sl1})" raise ValueError(msg) d_cell = np.asarray(np.rint(d_cell), dtype=int) # catches self-referential and other unrealisably long edges if not np.all(d_cell < extent): raise ValueError( f"Distance vector {distance} does not fit into the lattice" ) # Unit cells of starting points start_min = np.where(pbc, 0, np.maximum(0, -d_cell)) start_max = np.where(pbc, extent, extent - np.maximum(0, d_cell)) start_ranges = [slice(lo, hi) for lo, hi in zip(start_min, start_max)] start = np.mgrid[start_ranges].reshape(len(extent), -1).T end = (start + d_cell) % extent # Convert to site indices start = site_to_idx((start, sl1), extent, site_offsets) end = site_to_idx((end, sl2), extent, site_offsets) return [(*edge, color) for edge in zip(start, end)] colored_edges = [] for i, desc in enumerate(custom_edges): edge_data = desc[:3] edge_color = desc[3] if len(desc) == 4 else i colored_edges += translated_edges(*edge_data, edge_color) return colored_edges
[ "def", "get_custom_edges", "(", "basis_vectors", ",", "extent", ",", "site_offsets", ",", "pbc", ",", "atol", ",", "custom_edges", ":", "Sequence", "[", "CustomEdgeT", "]", ")", ":", "if", "not", "all", "(", "[", "len", "(", "desc", ")", "in", "(", "3", ",", "4", ")", "for", "desc", "in", "custom_edges", "]", ")", ":", "raise", "ValueError", "(", "dedent", "(", "\"\"\"\n custom_edges must be a list of tuples of length 3 or 4.\n Every tuple must contain two sublattice indices (integers), a distance vector\n and can optionally include an integer to represent the color of that edge.\n \n Check the docstring of `nk.graph.Lattice` for more informations.\n \"\"\"", ")", ")", "def", "translated_edges", "(", "sl1", ",", "sl2", ",", "distance", ",", "color", ")", ":", "# get distance in terms of unit cells", "d_cell", "=", "(", "distance", "+", "site_offsets", "[", "sl1", "]", "-", "site_offsets", "[", "sl2", "]", ")", "@", "np", ".", "linalg", ".", "inv", "(", "basis_vectors", ")", "if", "not", "np", ".", "all", "(", "is_approx_int", "(", "d_cell", ",", "atol", "=", "atol", ")", ")", ":", "# error out", "msg", "=", "f\"{distance} is invalid distance vector between sublattices {sl1}->{sl2}\"", "# see if the user flipped the vector accidentally", "d_cell", "=", "(", "distance", "+", "site_offsets", "[", "sl2", "]", "-", "site_offsets", "[", "sl1", "]", ")", "@", "np", ".", "linalg", ".", "inv", "(", "basis_vectors", ")", "if", "np", ".", "all", "(", "is_approx_int", "(", "d_cell", ",", "atol", "=", "atol", ")", ")", ":", "msg", "+=", "f\" (but valid {sl2}->{sl1})\"", "raise", "ValueError", "(", "msg", ")", "d_cell", "=", "np", ".", "asarray", "(", "np", ".", "rint", "(", "d_cell", ")", ",", "dtype", "=", "int", ")", "# catches self-referential and other unrealisably long edges", "if", "not", "np", ".", "all", "(", "d_cell", "<", "extent", ")", ":", "raise", "ValueError", "(", "f\"Distance vector {distance} does not fit into the lattice\"", ")", "# Unit cells of starting points", "start_min", "=", "np", ".", "where", "(", "pbc", ",", "0", ",", "np", ".", "maximum", "(", "0", ",", "-", "d_cell", ")", ")", "start_max", "=", "np", ".", "where", "(", "pbc", ",", "extent", ",", "extent", "-", "np", ".", "maximum", "(", "0", ",", "d_cell", ")", ")", "start_ranges", "=", "[", "slice", "(", "lo", ",", "hi", ")", "for", "lo", ",", "hi", "in", "zip", "(", "start_min", ",", "start_max", ")", "]", "start", "=", "np", ".", "mgrid", "[", "start_ranges", "]", ".", "reshape", "(", "len", "(", "extent", ")", ",", "-", "1", ")", ".", "T", "end", "=", "(", "start", "+", "d_cell", ")", "%", "extent", "# Convert to site indices", "start", "=", "site_to_idx", "(", "(", "start", ",", "sl1", ")", ",", "extent", ",", "site_offsets", ")", "end", "=", "site_to_idx", "(", "(", "end", ",", "sl2", ")", ",", "extent", ",", "site_offsets", ")", "return", "[", "(", "*", "edge", ",", "color", ")", "for", "edge", "in", "zip", "(", "start", ",", "end", ")", "]", "colored_edges", "=", "[", "]", "for", "i", ",", "desc", "in", "enumerate", "(", "custom_edges", ")", ":", "edge_data", "=", "desc", "[", ":", "3", "]", "edge_color", "=", "desc", "[", "3", "]", "if", "len", "(", "desc", ")", "==", "4", "else", "i", "colored_edges", "+=", "translated_edges", "(", "*", "edge_data", ",", "edge_color", ")", "return", "colored_edges" ]
https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/graph/_lattice_edge_logic.py#L146-L208
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
build/android/gyp/util/md5_check.py
python
Changes.IterAddedPaths
(self)
Generator for paths that were added.
Generator for paths that were added.
[ "Generator", "for", "paths", "that", "were", "added", "." ]
def IterAddedPaths(self): """Generator for paths that were added.""" for path in self.new_metadata.IterPaths(): if self._GetOldTag(path) is None: yield path
[ "def", "IterAddedPaths", "(", "self", ")", ":", "for", "path", "in", "self", ".", "new_metadata", ".", "IterPaths", "(", ")", ":", "if", "self", ".", "_GetOldTag", "(", "path", ")", "is", "None", ":", "yield", "path" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/build/android/gyp/util/md5_check.py#L136-L140
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/android/appstats.py
python
OutputBeautifier.PrettyGraph
(self, file_path, snapshots)
Creates a pdf graph of |snapshots| (a list of DeviceSnapshots) at |file_path|. This currently only shows memory stats and no network stats.
Creates a pdf graph of |snapshots| (a list of DeviceSnapshots) at |file_path|. This currently only shows memory stats and no network stats.
[ "Creates", "a", "pdf", "graph", "of", "|snapshots|", "(", "a", "list", "of", "DeviceSnapshots", ")", "at", "|file_path|", ".", "This", "currently", "only", "shows", "memory", "stats", "and", "no", "network", "stats", "." ]
def PrettyGraph(self, file_path, snapshots): """Creates a pdf graph of |snapshots| (a list of DeviceSnapshots) at |file_path|. This currently only shows memory stats and no network stats.""" # Import these here so the rest of the functionality doesn't rely on # matplotlib from matplotlib import pyplot from matplotlib.backends.backend_pdf import PdfPages if not file_path or not snapshots: return pids = self.__FindPidsForSnapshotList(snapshots) pp = PdfPages(file_path) for (userid, pid, name) in pids: figure = pyplot.figure() ax = figure.add_subplot(1, 1, 1) ax.set_xlabel('Time (s)') ax.set_ylabel('MB') ax.set_title(name + ' (' + pid + ')') mem_list = [[] for x in range(len(self.__MEMORY_COLUMN_TITLES))] timestamps = [] for snapshot in snapshots: results = snapshot.GetMemoryResults(pid) if not results: continue timestamps.append(round(snapshot.GetTimestamp(), 2)) assert len(results) == len(self.__MEMORY_COLUMN_TITLES) for idx, result in enumerate(results): mem_list[idx].append(result) colors = [] for data in mem_list: colors.append(ax.plot(timestamps, data)[0]) for i in xrange(len(timestamps)): ax.annotate(data[i], xy=(timestamps[i], data[i])) figure.legend(colors, self.__MEMORY_COLUMN_TITLES) pp.savefig() pp.close()
[ "def", "PrettyGraph", "(", "self", ",", "file_path", ",", "snapshots", ")", ":", "# Import these here so the rest of the functionality doesn't rely on", "# matplotlib", "from", "matplotlib", "import", "pyplot", "from", "matplotlib", ".", "backends", ".", "backend_pdf", "import", "PdfPages", "if", "not", "file_path", "or", "not", "snapshots", ":", "return", "pids", "=", "self", ".", "__FindPidsForSnapshotList", "(", "snapshots", ")", "pp", "=", "PdfPages", "(", "file_path", ")", "for", "(", "userid", ",", "pid", ",", "name", ")", "in", "pids", ":", "figure", "=", "pyplot", ".", "figure", "(", ")", "ax", "=", "figure", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "ax", ".", "set_xlabel", "(", "'Time (s)'", ")", "ax", ".", "set_ylabel", "(", "'MB'", ")", "ax", ".", "set_title", "(", "name", "+", "' ('", "+", "pid", "+", "')'", ")", "mem_list", "=", "[", "[", "]", "for", "x", "in", "range", "(", "len", "(", "self", ".", "__MEMORY_COLUMN_TITLES", ")", ")", "]", "timestamps", "=", "[", "]", "for", "snapshot", "in", "snapshots", ":", "results", "=", "snapshot", ".", "GetMemoryResults", "(", "pid", ")", "if", "not", "results", ":", "continue", "timestamps", ".", "append", "(", "round", "(", "snapshot", ".", "GetTimestamp", "(", ")", ",", "2", ")", ")", "assert", "len", "(", "results", ")", "==", "len", "(", "self", ".", "__MEMORY_COLUMN_TITLES", ")", "for", "idx", ",", "result", "in", "enumerate", "(", "results", ")", ":", "mem_list", "[", "idx", "]", ".", "append", "(", "result", ")", "colors", "=", "[", "]", "for", "data", "in", "mem_list", ":", "colors", ".", "append", "(", "ax", ".", "plot", "(", "timestamps", ",", "data", ")", "[", "0", "]", ")", "for", "i", "in", "xrange", "(", "len", "(", "timestamps", ")", ")", ":", "ax", ".", "annotate", "(", "data", "[", "i", "]", ",", "xy", "=", "(", "timestamps", "[", "i", "]", ",", "data", "[", "i", "]", ")", ")", "figure", ".", "legend", "(", "colors", ",", "self", ".", "__MEMORY_COLUMN_TITLES", ")", "pp", ".", "savefig", "(", ")", "pp", ".", "close", "(", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/android/appstats.py#L826-L869
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/fluid/framework.py
python
IrGraph.all_sub_graphs
(self, for_test=False)
return [ IrGraph( self.graph.get_sub_graph(i), for_test=for_test) for i in range(self.graph.sub_graph_size()) ]
Return all sub_graphs included in the main graph as a set.
Return all sub_graphs included in the main graph as a set.
[ "Return", "all", "sub_graphs", "included", "in", "the", "main", "graph", "as", "a", "set", "." ]
def all_sub_graphs(self, for_test=False): """ Return all sub_graphs included in the main graph as a set. """ return [ IrGraph( self.graph.get_sub_graph(i), for_test=for_test) for i in range(self.graph.sub_graph_size()) ]
[ "def", "all_sub_graphs", "(", "self", ",", "for_test", "=", "False", ")", ":", "return", "[", "IrGraph", "(", "self", ".", "graph", ".", "get_sub_graph", "(", "i", ")", ",", "for_test", "=", "for_test", ")", "for", "i", "in", "range", "(", "self", ".", "graph", ".", "sub_graph_size", "(", ")", ")", "]" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/framework.py#L4276-L4285
ZhouWeikuan/DouDiZhu
0d84ff6c0bc54dba6ae37955de9ae9307513dc99
code/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py
python
TypeKind.name
(self)
return self._name_map[self]
Get the enumeration name of this cursor kind.
Get the enumeration name of this cursor kind.
[ "Get", "the", "enumeration", "name", "of", "this", "cursor", "kind", "." ]
def name(self): """Get the enumeration name of this cursor kind.""" if self._name_map is None: self._name_map = {} for key,value in TypeKind.__dict__.items(): if isinstance(value,TypeKind): self._name_map[value] = key return self._name_map[self]
[ "def", "name", "(", "self", ")", ":", "if", "self", ".", "_name_map", "is", "None", ":", "self", ".", "_name_map", "=", "{", "}", "for", "key", ",", "value", "in", "TypeKind", ".", "__dict__", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "TypeKind", ")", ":", "self", ".", "_name_map", "[", "value", "]", "=", "key", "return", "self", ".", "_name_map", "[", "self", "]" ]
https://github.com/ZhouWeikuan/DouDiZhu/blob/0d84ff6c0bc54dba6ae37955de9ae9307513dc99/code/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py#L1386-L1393
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Path/PathScripts/PathOpGui.py
python
SetupOperation
( name, objFactory, opPageClass, pixmap, menuText, toolTip, setupProperties=None )
return command
SetupOperation(name, objFactory, opPageClass, pixmap, menuText, toolTip, setupProperties=None) Creates an instance of CommandPathOp with the given parameters and registers the command with FreeCAD. When activated it creates a model with proxy (by invoking objFactory), assigns a view provider to it (see ViewProvider in this module) and starts the editor specifically for this operation (driven by opPageClass). This is an internal function that is automatically called by the initialisation code for each operation. It is not expected to be called manually.
SetupOperation(name, objFactory, opPageClass, pixmap, menuText, toolTip, setupProperties=None) Creates an instance of CommandPathOp with the given parameters and registers the command with FreeCAD. When activated it creates a model with proxy (by invoking objFactory), assigns a view provider to it (see ViewProvider in this module) and starts the editor specifically for this operation (driven by opPageClass). This is an internal function that is automatically called by the initialisation code for each operation. It is not expected to be called manually.
[ "SetupOperation", "(", "name", "objFactory", "opPageClass", "pixmap", "menuText", "toolTip", "setupProperties", "=", "None", ")", "Creates", "an", "instance", "of", "CommandPathOp", "with", "the", "given", "parameters", "and", "registers", "the", "command", "with", "FreeCAD", ".", "When", "activated", "it", "creates", "a", "model", "with", "proxy", "(", "by", "invoking", "objFactory", ")", "assigns", "a", "view", "provider", "to", "it", "(", "see", "ViewProvider", "in", "this", "module", ")", "and", "starts", "the", "editor", "specifically", "for", "this", "operation", "(", "driven", "by", "opPageClass", ")", ".", "This", "is", "an", "internal", "function", "that", "is", "automatically", "called", "by", "the", "initialisation", "code", "for", "each", "operation", ".", "It", "is", "not", "expected", "to", "be", "called", "manually", "." ]
def SetupOperation( name, objFactory, opPageClass, pixmap, menuText, toolTip, setupProperties=None ): """SetupOperation(name, objFactory, opPageClass, pixmap, menuText, toolTip, setupProperties=None) Creates an instance of CommandPathOp with the given parameters and registers the command with FreeCAD. When activated it creates a model with proxy (by invoking objFactory), assigns a view provider to it (see ViewProvider in this module) and starts the editor specifically for this operation (driven by opPageClass). This is an internal function that is automatically called by the initialisation code for each operation. It is not expected to be called manually. """ res = CommandResources( name, objFactory, opPageClass, pixmap, menuText, None, toolTip ) command = CommandPathOp(res) FreeCADGui.addCommand("Path_%s" % name.replace(" ", "_"), command) if setupProperties is not None: PathSetupSheet.RegisterOperation(name, objFactory, setupProperties) return command
[ "def", "SetupOperation", "(", "name", ",", "objFactory", ",", "opPageClass", ",", "pixmap", ",", "menuText", ",", "toolTip", ",", "setupProperties", "=", "None", ")", ":", "res", "=", "CommandResources", "(", "name", ",", "objFactory", ",", "opPageClass", ",", "pixmap", ",", "menuText", ",", "None", ",", "toolTip", ")", "command", "=", "CommandPathOp", "(", "res", ")", "FreeCADGui", ".", "addCommand", "(", "\"Path_%s\"", "%", "name", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", ",", "command", ")", "if", "setupProperties", "is", "not", "None", ":", "PathSetupSheet", ".", "RegisterOperation", "(", "name", ",", "objFactory", ",", "setupProperties", ")", "return", "command" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Path/PathScripts/PathOpGui.py#L1459-L1480
googleprojectzero/BrokenType
cf49a52b8e35b7d684fc8bc6b2ea8b923c177c2e
truetype-generator/truetype_generate.py
python
TTXParser._Handler_assembly
(self, path, node)
Generates a new TTF program for the node.
Generates a new TTF program for the node.
[ "Generates", "a", "new", "TTF", "program", "for", "the", "node", "." ]
def _Handler_assembly(self, path, node): """Generates a new TTF program for the node. """ if "fpgm" in path: # We want the "fpgm" (Font Program) section empty, as it should only contain instruction/function definitions. node.text = "" else: program = TTProgram(self._twilight_points, self._contours_in_glyph, self._points_in_glyph) if "prep" in path: # Insert a constant number of initialization instructions into the "prep" table. node.text = program.GenerateProgram(self.PREP_INSTRUCTIONS) else: # Generate a regular TrueType program with length depending on the number of glyphs in font. node.text = program.GenerateProgram(min(self._num_instructions // self._num_glyphs, self.MAX_INSTRUCTIONS_PER_GLYPH))
[ "def", "_Handler_assembly", "(", "self", ",", "path", ",", "node", ")", ":", "if", "\"fpgm\"", "in", "path", ":", "# We want the \"fpgm\" (Font Program) section empty, as it should only contain instruction/function definitions.", "node", ".", "text", "=", "\"\"", "else", ":", "program", "=", "TTProgram", "(", "self", ".", "_twilight_points", ",", "self", ".", "_contours_in_glyph", ",", "self", ".", "_points_in_glyph", ")", "if", "\"prep\"", "in", "path", ":", "# Insert a constant number of initialization instructions into the \"prep\" table.", "node", ".", "text", "=", "program", ".", "GenerateProgram", "(", "self", ".", "PREP_INSTRUCTIONS", ")", "else", ":", "# Generate a regular TrueType program with length depending on the number of glyphs in font.", "node", ".", "text", "=", "program", ".", "GenerateProgram", "(", "min", "(", "self", ".", "_num_instructions", "//", "self", ".", "_num_glyphs", ",", "self", ".", "MAX_INSTRUCTIONS_PER_GLYPH", ")", ")" ]
https://github.com/googleprojectzero/BrokenType/blob/cf49a52b8e35b7d684fc8bc6b2ea8b923c177c2e/truetype-generator/truetype_generate.py#L694-L709
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
contrib/gizmos/osx_cocoa/gizmos.py
python
DynamicSashWindow.GetHScrollBar
(*args, **kwargs)
return _gizmos.DynamicSashWindow_GetHScrollBar(*args, **kwargs)
GetHScrollBar(self, Window child) -> ScrollBar
GetHScrollBar(self, Window child) -> ScrollBar
[ "GetHScrollBar", "(", "self", "Window", "child", ")", "-", ">", "ScrollBar" ]
def GetHScrollBar(*args, **kwargs): """GetHScrollBar(self, Window child) -> ScrollBar""" return _gizmos.DynamicSashWindow_GetHScrollBar(*args, **kwargs)
[ "def", "GetHScrollBar", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gizmos", ".", "DynamicSashWindow_GetHScrollBar", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/osx_cocoa/gizmos.py#L111-L113
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py
python
Maildir.__contains__
(self, key)
return key in self._toc
Return True if the keyed message exists, False otherwise.
Return True if the keyed message exists, False otherwise.
[ "Return", "True", "if", "the", "keyed", "message", "exists", "False", "otherwise", "." ]
def __contains__(self, key): """Return True if the keyed message exists, False otherwise.""" self._refresh() return key in self._toc
[ "def", "__contains__", "(", "self", ",", "key", ")", ":", "self", ".", "_refresh", "(", ")", "return", "key", "in", "self", ".", "_toc" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py#L405-L408
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/cython/Cython/Build/Dependencies.py
python
cythonize
(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, exclude_failures=False, **options)
return module_list
Compile a set of source modules into C/C++ files and return a list of distutils Extension objects for them. :param module_list: As module list, pass either a glob pattern, a list of glob patterns or a list of Extension objects. The latter allows you to configure the extensions separately through the normal distutils options. You can also pass Extension objects that have glob patterns as their sources. Then, cythonize will resolve the pattern and create a copy of the Extension for every matching file. :param exclude: When passing glob patterns as ``module_list``, you can exclude certain module names explicitly by passing them into the ``exclude`` option. :param nthreads: The number of concurrent builds for parallel compilation (requires the ``multiprocessing`` module). :param aliases: If you want to use compiler directives like ``# distutils: ...`` but can only know at compile time (when running the ``setup.py``) which values to use, you can use aliases and pass a dictionary mapping those aliases to Python strings when calling :func:`cythonize`. As an example, say you want to use the compiler directive ``# distutils: include_dirs = ../static_libs/include/`` but this path isn't always fixed and you want to find it when running the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python variable called ``foo`` as a string, and then call ``cythonize(..., aliases={'MY_HEADERS': foo})``. :param quiet: If True, Cython won't print error, warning, or status messages during the compilation. :param force: Forces the recompilation of the Cython modules, even if the timestamps don't indicate that a recompilation is necessary. :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this will be determined at a per-file level based on compiler directives. This affects only modules found based on file names. Extension instances passed into :func:`cythonize` will not be changed. It is recommended to rather use the compiler directive ``# distutils: language = c++`` than this option. :param exclude_failures: For a broad 'try to compile' mode that ignores compilation failures and simply excludes the failed extensions, pass ``exclude_failures=True``. Note that this only really makes sense for compiling ``.py`` files which can also be used without compilation. :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` files compiled. The HTML file gives an indication of how much Python interaction there is in each of the source code lines, compared to plain C code. It also allows you to see the C/C++ code generated for each line of Cython code. This report is invaluable when optimizing a function for speed, and for determining when to :ref:`release the GIL <nogil>`: in general, a ``nogil`` block may contain only "white" code. See examples in :ref:`determining_where_to_add_types` or :ref:`primes`. :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: ``compiler_directives={'embedsignature': True}``. See :ref:`compiler-directives`. :param depfile: produce depfiles for the sources if True.
Compile a set of source modules into C/C++ files and return a list of distutils Extension objects for them.
[ "Compile", "a", "set", "of", "source", "modules", "into", "C", "/", "C", "++", "files", "and", "return", "a", "list", "of", "distutils", "Extension", "objects", "for", "them", "." ]
def cythonize(module_list, exclude=None, nthreads=0, aliases=None, quiet=False, force=False, language=None, exclude_failures=False, **options): """ Compile a set of source modules into C/C++ files and return a list of distutils Extension objects for them. :param module_list: As module list, pass either a glob pattern, a list of glob patterns or a list of Extension objects. The latter allows you to configure the extensions separately through the normal distutils options. You can also pass Extension objects that have glob patterns as their sources. Then, cythonize will resolve the pattern and create a copy of the Extension for every matching file. :param exclude: When passing glob patterns as ``module_list``, you can exclude certain module names explicitly by passing them into the ``exclude`` option. :param nthreads: The number of concurrent builds for parallel compilation (requires the ``multiprocessing`` module). :param aliases: If you want to use compiler directives like ``# distutils: ...`` but can only know at compile time (when running the ``setup.py``) which values to use, you can use aliases and pass a dictionary mapping those aliases to Python strings when calling :func:`cythonize`. As an example, say you want to use the compiler directive ``# distutils: include_dirs = ../static_libs/include/`` but this path isn't always fixed and you want to find it when running the ``setup.py``. You can then do ``# distutils: include_dirs = MY_HEADERS``, find the value of ``MY_HEADERS`` in the ``setup.py``, put it in a python variable called ``foo`` as a string, and then call ``cythonize(..., aliases={'MY_HEADERS': foo})``. :param quiet: If True, Cython won't print error, warning, or status messages during the compilation. :param force: Forces the recompilation of the Cython modules, even if the timestamps don't indicate that a recompilation is necessary. :param language: To globally enable C++ mode, you can pass ``language='c++'``. Otherwise, this will be determined at a per-file level based on compiler directives. This affects only modules found based on file names. Extension instances passed into :func:`cythonize` will not be changed. It is recommended to rather use the compiler directive ``# distutils: language = c++`` than this option. :param exclude_failures: For a broad 'try to compile' mode that ignores compilation failures and simply excludes the failed extensions, pass ``exclude_failures=True``. Note that this only really makes sense for compiling ``.py`` files which can also be used without compilation. :param annotate: If ``True``, will produce a HTML file for each of the ``.pyx`` or ``.py`` files compiled. The HTML file gives an indication of how much Python interaction there is in each of the source code lines, compared to plain C code. It also allows you to see the C/C++ code generated for each line of Cython code. This report is invaluable when optimizing a function for speed, and for determining when to :ref:`release the GIL <nogil>`: in general, a ``nogil`` block may contain only "white" code. See examples in :ref:`determining_where_to_add_types` or :ref:`primes`. :param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this: ``compiler_directives={'embedsignature': True}``. See :ref:`compiler-directives`. :param depfile: produce depfiles for the sources if True. """ if exclude is None: exclude = [] if 'include_path' not in options: options['include_path'] = ['.'] if 'common_utility_include_dir' in options: safe_makedirs(options['common_utility_include_dir']) depfile = options.pop('depfile', None) if pythran is None: pythran_options = None else: pythran_options = CompilationOptions(**options) pythran_options.cplus = True pythran_options.np_pythran = True c_options = CompilationOptions(**options) cpp_options = CompilationOptions(**options); cpp_options.cplus = True ctx = c_options.create_context() options = c_options module_list, module_metadata = create_extension_list( module_list, exclude=exclude, ctx=ctx, quiet=quiet, exclude_failures=exclude_failures, language=language, aliases=aliases) deps = create_dependency_tree(ctx, quiet=quiet) build_dir = getattr(options, 'build_dir', None) def copy_to_build_dir(filepath, root=os.getcwd()): filepath_abs = os.path.abspath(filepath) if os.path.isabs(filepath): filepath = filepath_abs if filepath_abs.startswith(root): # distutil extension depends are relative to cwd mod_dir = join_path(build_dir, os.path.dirname(_relpath(filepath, root))) copy_once_if_newer(filepath_abs, mod_dir) modules_by_cfile = collections.defaultdict(list) to_compile = [] for m in module_list: if build_dir: for dep in m.depends: copy_to_build_dir(dep) cy_sources = [ source for source in m.sources if os.path.splitext(source)[1] in ('.pyx', '.py')] if len(cy_sources) == 1: # normal "special" case: believe the Extension module name to allow user overrides full_module_name = m.name else: # infer FQMN from source files full_module_name = None new_sources = [] for source in m.sources: base, ext = os.path.splitext(source) if ext in ('.pyx', '.py'): if m.np_pythran: c_file = base + '.cpp' options = pythran_options elif m.language == 'c++': c_file = base + '.cpp' options = cpp_options else: c_file = base + '.c' options = c_options # setup for out of place build directory if enabled if build_dir: if os.path.isabs(c_file): warnings.warn("build_dir has no effect for absolute source paths") c_file = os.path.join(build_dir, c_file) dir = os.path.dirname(c_file) safe_makedirs_once(dir) # write out the depfile, if requested if depfile: dependencies = deps.all_dependencies(source) src_base_dir, _ = os.path.split(source) if not src_base_dir.endswith(os.sep): src_base_dir += os.sep # paths below the base_dir are relative, otherwise absolute paths = [] for fname in dependencies: if (fname.startswith(src_base_dir) or fname.startswith('.' + os.path.sep)): paths.append(os.path.relpath(fname, src_base_dir)) else: paths.append(os.path.abspath(fname)) depline = os.path.split(c_file)[1] + ": \\\n " depline += " \\\n ".join(paths) + "\n" with open(c_file+'.dep', 'w') as outfile: outfile.write(depline) if os.path.exists(c_file): c_timestamp = os.path.getmtime(c_file) else: c_timestamp = -1 # Priority goes first to modified files, second to direct # dependents, and finally to indirect dependents. if c_timestamp < deps.timestamp(source): dep_timestamp, dep = deps.timestamp(source), source priority = 0 else: dep_timestamp, dep = deps.newest_dependency(source) priority = 2 - (dep in deps.immediate_dependencies(source)) if force or c_timestamp < dep_timestamp: if not quiet and not force: if source == dep: print("Compiling %s because it changed." % source) else: print("Compiling %s because it depends on %s." % (source, dep)) if not force and options.cache: fingerprint = deps.transitive_fingerprint(source, m, options) else: fingerprint = None to_compile.append(( priority, source, c_file, fingerprint, quiet, options, not exclude_failures, module_metadata.get(m.name), full_module_name)) new_sources.append(c_file) modules_by_cfile[c_file].append(m) else: new_sources.append(source) if build_dir: copy_to_build_dir(source) m.sources = new_sources if options.cache: if not os.path.exists(options.cache): os.makedirs(options.cache) to_compile.sort() # Drop "priority" component of "to_compile" entries and add a # simple progress indicator. N = len(to_compile) progress_fmt = "[{0:%d}/{1}] " % len(str(N)) for i in range(N): progress = progress_fmt.format(i+1, N) to_compile[i] = to_compile[i][1:] + (progress,) if N <= 1: nthreads = 0 if nthreads: # Requires multiprocessing (or Python >= 2.6) try: import multiprocessing pool = multiprocessing.Pool( nthreads, initializer=_init_multiprocessing_helper) except (ImportError, OSError): print("multiprocessing required for parallel cythonization") nthreads = 0 else: # This is a bit more involved than it should be, because KeyboardInterrupts # break the multiprocessing workers when using a normal pool.map(). # See, for example: # http://noswap.com/blog/python-multiprocessing-keyboardinterrupt try: result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1) pool.close() while not result.ready(): try: result.get(99999) # seconds except multiprocessing.TimeoutError: pass except KeyboardInterrupt: pool.terminate() raise pool.join() if not nthreads: for args in to_compile: cythonize_one(*args) if exclude_failures: failed_modules = set() for c_file, modules in modules_by_cfile.items(): if not os.path.exists(c_file): failed_modules.update(modules) elif os.path.getsize(c_file) < 200: f = io_open(c_file, 'r', encoding='iso8859-1') try: if f.read(len('#error ')) == '#error ': # dead compilation result failed_modules.update(modules) finally: f.close() if failed_modules: for module in failed_modules: module_list.remove(module) print("Failed compilations: %s" % ', '.join(sorted([ module.name for module in failed_modules]))) if options.cache: cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100)) # cythonize() is often followed by the (non-Python-buffered) # compiler output, flush now to avoid interleaving output. sys.stdout.flush() return module_list
[ "def", "cythonize", "(", "module_list", ",", "exclude", "=", "None", ",", "nthreads", "=", "0", ",", "aliases", "=", "None", ",", "quiet", "=", "False", ",", "force", "=", "False", ",", "language", "=", "None", ",", "exclude_failures", "=", "False", ",", "*", "*", "options", ")", ":", "if", "exclude", "is", "None", ":", "exclude", "=", "[", "]", "if", "'include_path'", "not", "in", "options", ":", "options", "[", "'include_path'", "]", "=", "[", "'.'", "]", "if", "'common_utility_include_dir'", "in", "options", ":", "safe_makedirs", "(", "options", "[", "'common_utility_include_dir'", "]", ")", "depfile", "=", "options", ".", "pop", "(", "'depfile'", ",", "None", ")", "if", "pythran", "is", "None", ":", "pythran_options", "=", "None", "else", ":", "pythran_options", "=", "CompilationOptions", "(", "*", "*", "options", ")", "pythran_options", ".", "cplus", "=", "True", "pythran_options", ".", "np_pythran", "=", "True", "c_options", "=", "CompilationOptions", "(", "*", "*", "options", ")", "cpp_options", "=", "CompilationOptions", "(", "*", "*", "options", ")", "cpp_options", ".", "cplus", "=", "True", "ctx", "=", "c_options", ".", "create_context", "(", ")", "options", "=", "c_options", "module_list", ",", "module_metadata", "=", "create_extension_list", "(", "module_list", ",", "exclude", "=", "exclude", ",", "ctx", "=", "ctx", ",", "quiet", "=", "quiet", ",", "exclude_failures", "=", "exclude_failures", ",", "language", "=", "language", ",", "aliases", "=", "aliases", ")", "deps", "=", "create_dependency_tree", "(", "ctx", ",", "quiet", "=", "quiet", ")", "build_dir", "=", "getattr", "(", "options", ",", "'build_dir'", ",", "None", ")", "def", "copy_to_build_dir", "(", "filepath", ",", "root", "=", "os", ".", "getcwd", "(", ")", ")", ":", "filepath_abs", "=", "os", ".", "path", ".", "abspath", "(", "filepath", ")", "if", "os", ".", "path", ".", "isabs", "(", "filepath", ")", ":", "filepath", "=", "filepath_abs", "if", "filepath_abs", ".", "startswith", "(", "root", ")", ":", "# distutil extension depends are relative to cwd", "mod_dir", "=", "join_path", "(", "build_dir", ",", "os", ".", "path", ".", "dirname", "(", "_relpath", "(", "filepath", ",", "root", ")", ")", ")", "copy_once_if_newer", "(", "filepath_abs", ",", "mod_dir", ")", "modules_by_cfile", "=", "collections", ".", "defaultdict", "(", "list", ")", "to_compile", "=", "[", "]", "for", "m", "in", "module_list", ":", "if", "build_dir", ":", "for", "dep", "in", "m", ".", "depends", ":", "copy_to_build_dir", "(", "dep", ")", "cy_sources", "=", "[", "source", "for", "source", "in", "m", ".", "sources", "if", "os", ".", "path", ".", "splitext", "(", "source", ")", "[", "1", "]", "in", "(", "'.pyx'", ",", "'.py'", ")", "]", "if", "len", "(", "cy_sources", ")", "==", "1", ":", "# normal \"special\" case: believe the Extension module name to allow user overrides", "full_module_name", "=", "m", ".", "name", "else", ":", "# infer FQMN from source files", "full_module_name", "=", "None", "new_sources", "=", "[", "]", "for", "source", "in", "m", ".", "sources", ":", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "source", ")", "if", "ext", "in", "(", "'.pyx'", ",", "'.py'", ")", ":", "if", "m", ".", "np_pythran", ":", "c_file", "=", "base", "+", "'.cpp'", "options", "=", "pythran_options", "elif", "m", ".", "language", "==", "'c++'", ":", "c_file", "=", "base", "+", "'.cpp'", "options", "=", "cpp_options", "else", ":", "c_file", "=", "base", "+", "'.c'", "options", "=", "c_options", "# setup for out of place build directory if enabled", "if", "build_dir", ":", "if", "os", ".", "path", ".", "isabs", "(", "c_file", ")", ":", "warnings", ".", "warn", "(", "\"build_dir has no effect for absolute source paths\"", ")", "c_file", "=", "os", ".", "path", ".", "join", "(", "build_dir", ",", "c_file", ")", "dir", "=", "os", ".", "path", ".", "dirname", "(", "c_file", ")", "safe_makedirs_once", "(", "dir", ")", "# write out the depfile, if requested", "if", "depfile", ":", "dependencies", "=", "deps", ".", "all_dependencies", "(", "source", ")", "src_base_dir", ",", "_", "=", "os", ".", "path", ".", "split", "(", "source", ")", "if", "not", "src_base_dir", ".", "endswith", "(", "os", ".", "sep", ")", ":", "src_base_dir", "+=", "os", ".", "sep", "# paths below the base_dir are relative, otherwise absolute", "paths", "=", "[", "]", "for", "fname", "in", "dependencies", ":", "if", "(", "fname", ".", "startswith", "(", "src_base_dir", ")", "or", "fname", ".", "startswith", "(", "'.'", "+", "os", ".", "path", ".", "sep", ")", ")", ":", "paths", ".", "append", "(", "os", ".", "path", ".", "relpath", "(", "fname", ",", "src_base_dir", ")", ")", "else", ":", "paths", ".", "append", "(", "os", ".", "path", ".", "abspath", "(", "fname", ")", ")", "depline", "=", "os", ".", "path", ".", "split", "(", "c_file", ")", "[", "1", "]", "+", "\": \\\\\\n \"", "depline", "+=", "\" \\\\\\n \"", ".", "join", "(", "paths", ")", "+", "\"\\n\"", "with", "open", "(", "c_file", "+", "'.dep'", ",", "'w'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "depline", ")", "if", "os", ".", "path", ".", "exists", "(", "c_file", ")", ":", "c_timestamp", "=", "os", ".", "path", ".", "getmtime", "(", "c_file", ")", "else", ":", "c_timestamp", "=", "-", "1", "# Priority goes first to modified files, second to direct", "# dependents, and finally to indirect dependents.", "if", "c_timestamp", "<", "deps", ".", "timestamp", "(", "source", ")", ":", "dep_timestamp", ",", "dep", "=", "deps", ".", "timestamp", "(", "source", ")", ",", "source", "priority", "=", "0", "else", ":", "dep_timestamp", ",", "dep", "=", "deps", ".", "newest_dependency", "(", "source", ")", "priority", "=", "2", "-", "(", "dep", "in", "deps", ".", "immediate_dependencies", "(", "source", ")", ")", "if", "force", "or", "c_timestamp", "<", "dep_timestamp", ":", "if", "not", "quiet", "and", "not", "force", ":", "if", "source", "==", "dep", ":", "print", "(", "\"Compiling %s because it changed.\"", "%", "source", ")", "else", ":", "print", "(", "\"Compiling %s because it depends on %s.\"", "%", "(", "source", ",", "dep", ")", ")", "if", "not", "force", "and", "options", ".", "cache", ":", "fingerprint", "=", "deps", ".", "transitive_fingerprint", "(", "source", ",", "m", ",", "options", ")", "else", ":", "fingerprint", "=", "None", "to_compile", ".", "append", "(", "(", "priority", ",", "source", ",", "c_file", ",", "fingerprint", ",", "quiet", ",", "options", ",", "not", "exclude_failures", ",", "module_metadata", ".", "get", "(", "m", ".", "name", ")", ",", "full_module_name", ")", ")", "new_sources", ".", "append", "(", "c_file", ")", "modules_by_cfile", "[", "c_file", "]", ".", "append", "(", "m", ")", "else", ":", "new_sources", ".", "append", "(", "source", ")", "if", "build_dir", ":", "copy_to_build_dir", "(", "source", ")", "m", ".", "sources", "=", "new_sources", "if", "options", ".", "cache", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "options", ".", "cache", ")", ":", "os", ".", "makedirs", "(", "options", ".", "cache", ")", "to_compile", ".", "sort", "(", ")", "# Drop \"priority\" component of \"to_compile\" entries and add a", "# simple progress indicator.", "N", "=", "len", "(", "to_compile", ")", "progress_fmt", "=", "\"[{0:%d}/{1}] \"", "%", "len", "(", "str", "(", "N", ")", ")", "for", "i", "in", "range", "(", "N", ")", ":", "progress", "=", "progress_fmt", ".", "format", "(", "i", "+", "1", ",", "N", ")", "to_compile", "[", "i", "]", "=", "to_compile", "[", "i", "]", "[", "1", ":", "]", "+", "(", "progress", ",", ")", "if", "N", "<=", "1", ":", "nthreads", "=", "0", "if", "nthreads", ":", "# Requires multiprocessing (or Python >= 2.6)", "try", ":", "import", "multiprocessing", "pool", "=", "multiprocessing", ".", "Pool", "(", "nthreads", ",", "initializer", "=", "_init_multiprocessing_helper", ")", "except", "(", "ImportError", ",", "OSError", ")", ":", "print", "(", "\"multiprocessing required for parallel cythonization\"", ")", "nthreads", "=", "0", "else", ":", "# This is a bit more involved than it should be, because KeyboardInterrupts", "# break the multiprocessing workers when using a normal pool.map().", "# See, for example:", "# http://noswap.com/blog/python-multiprocessing-keyboardinterrupt", "try", ":", "result", "=", "pool", ".", "map_async", "(", "cythonize_one_helper", ",", "to_compile", ",", "chunksize", "=", "1", ")", "pool", ".", "close", "(", ")", "while", "not", "result", ".", "ready", "(", ")", ":", "try", ":", "result", ".", "get", "(", "99999", ")", "# seconds", "except", "multiprocessing", ".", "TimeoutError", ":", "pass", "except", "KeyboardInterrupt", ":", "pool", ".", "terminate", "(", ")", "raise", "pool", ".", "join", "(", ")", "if", "not", "nthreads", ":", "for", "args", "in", "to_compile", ":", "cythonize_one", "(", "*", "args", ")", "if", "exclude_failures", ":", "failed_modules", "=", "set", "(", ")", "for", "c_file", ",", "modules", "in", "modules_by_cfile", ".", "items", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "c_file", ")", ":", "failed_modules", ".", "update", "(", "modules", ")", "elif", "os", ".", "path", ".", "getsize", "(", "c_file", ")", "<", "200", ":", "f", "=", "io_open", "(", "c_file", ",", "'r'", ",", "encoding", "=", "'iso8859-1'", ")", "try", ":", "if", "f", ".", "read", "(", "len", "(", "'#error '", ")", ")", "==", "'#error '", ":", "# dead compilation result", "failed_modules", ".", "update", "(", "modules", ")", "finally", ":", "f", ".", "close", "(", ")", "if", "failed_modules", ":", "for", "module", "in", "failed_modules", ":", "module_list", ".", "remove", "(", "module", ")", "print", "(", "\"Failed compilations: %s\"", "%", "', '", ".", "join", "(", "sorted", "(", "[", "module", ".", "name", "for", "module", "in", "failed_modules", "]", ")", ")", ")", "if", "options", ".", "cache", ":", "cleanup_cache", "(", "options", ".", "cache", ",", "getattr", "(", "options", ",", "'cache_size'", ",", "1024", "*", "1024", "*", "100", ")", ")", "# cythonize() is often followed by the (non-Python-buffered)", "# compiler output, flush now to avoid interleaving output.", "sys", ".", "stdout", ".", "flush", "(", ")", "return", "module_list" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/cython/Cython/Build/Dependencies.py#L881-L1153
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/actor/Actor.py
python
Actor.hasLOD
(self)
return self.__hasLOD
Return 1 if the actor has LODs, 0 otherwise
Return 1 if the actor has LODs, 0 otherwise
[ "Return", "1", "if", "the", "actor", "has", "LODs", "0", "otherwise" ]
def hasLOD(self): """ Return 1 if the actor has LODs, 0 otherwise """ return self.__hasLOD
[ "def", "hasLOD", "(", "self", ")", ":", "return", "self", ".", "__hasLOD" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/actor/Actor.py#L727-L731
google/earthenterprise
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
earth_enterprise/src/fusion/portableglobe/tools/qtnode_to_kml.py
python
GetNextQtnodeBounds
(qtnode, x, y, size)
Calculate next level boundary for qtnode. If the qtnode has further precision, call this routine recursively. Args: qtnode: The remaining string of the qtnode. x: Current left of the qtnode (degrees). y: Current bottom of the qtnode (degrees). size: Current size of sides of qtnode (degrees). Returns: List of lower left and upper right boundary. Raises: Exception: if qtnode is not well formed.
Calculate next level boundary for qtnode.
[ "Calculate", "next", "level", "boundary", "for", "qtnode", "." ]
def GetNextQtnodeBounds(qtnode, x, y, size): """Calculate next level boundary for qtnode. If the qtnode has further precision, call this routine recursively. Args: qtnode: The remaining string of the qtnode. x: Current left of the qtnode (degrees). y: Current bottom of the qtnode (degrees). size: Current size of sides of qtnode (degrees). Returns: List of lower left and upper right boundary. Raises: Exception: if qtnode is not well formed. """ if qtnode: size /= 2 if qtnode[0] == "3": return GetNextQtnodeBounds(qtnode[1:], x, y + size, size) elif qtnode[0] == "2": return GetNextQtnodeBounds(qtnode[1:], x + size, y + size, size) elif qtnode[0] == "1": return GetNextQtnodeBounds(qtnode[1:], x + size, y, size) elif qtnode[0] == "0": return GetNextQtnodeBounds(qtnode[1:], x, y, size) else: raise Exception("Error: unexpected qtnode value %s" % qtnode[0]) else: return [x, y, x + size, y + size]
[ "def", "GetNextQtnodeBounds", "(", "qtnode", ",", "x", ",", "y", ",", "size", ")", ":", "if", "qtnode", ":", "size", "/=", "2", "if", "qtnode", "[", "0", "]", "==", "\"3\"", ":", "return", "GetNextQtnodeBounds", "(", "qtnode", "[", "1", ":", "]", ",", "x", ",", "y", "+", "size", ",", "size", ")", "elif", "qtnode", "[", "0", "]", "==", "\"2\"", ":", "return", "GetNextQtnodeBounds", "(", "qtnode", "[", "1", ":", "]", ",", "x", "+", "size", ",", "y", "+", "size", ",", "size", ")", "elif", "qtnode", "[", "0", "]", "==", "\"1\"", ":", "return", "GetNextQtnodeBounds", "(", "qtnode", "[", "1", ":", "]", ",", "x", "+", "size", ",", "y", ",", "size", ")", "elif", "qtnode", "[", "0", "]", "==", "\"0\"", ":", "return", "GetNextQtnodeBounds", "(", "qtnode", "[", "1", ":", "]", ",", "x", ",", "y", ",", "size", ")", "else", ":", "raise", "Exception", "(", "\"Error: unexpected qtnode value %s\"", "%", "qtnode", "[", "0", "]", ")", "else", ":", "return", "[", "x", ",", "y", ",", "x", "+", "size", ",", "y", "+", "size", "]" ]
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/fusion/portableglobe/tools/qtnode_to_kml.py#L58-L87
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/timeseries/python/timeseries/estimators.py
python
ARRegressor.__init__
( self, periodicities, input_window_size, output_window_size, num_features, num_time_buckets=10, loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS, hidden_layer_sizes=None, anomaly_prior_probability=None, anomaly_distribution=None, optimizer=None, model_dir=None, config=None)
Initialize the Estimator. Args: periodicities: periodicities of the input data, in the same units as the time feature. Note this can be a single value or a list of values for multiple periodicities. input_window_size: Number of past time steps of data to look at when doing the regression. output_window_size: Number of future time steps to predict. Note that setting it to > 1 empirically seems to give a better fit. num_features: The dimensionality of the time series (one for univariate, more than one for multivariate). num_time_buckets: Number of buckets into which to divide (time % periodicity) for generating time based features. loss: Loss function to use for training. Currently supported values are SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For SQUARED_LOSS, the evaluation loss is reported based on un-scaled observations and predictions, while the training loss is computed on normalized data. hidden_layer_sizes: list of sizes of hidden layers. anomaly_prior_probability: If specified, constructs a mixture model under which anomalies (modeled with `anomaly_distribution`) have this prior probability. See `AnomalyMixtureARModel`. anomaly_distribution: May not be specified unless anomaly_prior_probability is specified and is not None. Controls the distribution of anomalies under the mixture model. Currently either `ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY` or `ar_model.AnomalyMixtureARModel.CAUCHY_ANOMALY`. See `AnomalyMixtureARModel`. Defaults to `GAUSSIAN_ANOMALY`. optimizer: The optimization algorithm to use when training, inheriting from tf.train.Optimizer. Defaults to Adagrad with step size 0.1. model_dir: See `Estimator`. config: See `Estimator`. Raises: ValueError: For invalid combinations of arguments.
Initialize the Estimator.
[ "Initialize", "the", "Estimator", "." ]
def __init__( self, periodicities, input_window_size, output_window_size, num_features, num_time_buckets=10, loss=ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS, hidden_layer_sizes=None, anomaly_prior_probability=None, anomaly_distribution=None, optimizer=None, model_dir=None, config=None): """Initialize the Estimator. Args: periodicities: periodicities of the input data, in the same units as the time feature. Note this can be a single value or a list of values for multiple periodicities. input_window_size: Number of past time steps of data to look at when doing the regression. output_window_size: Number of future time steps to predict. Note that setting it to > 1 empirically seems to give a better fit. num_features: The dimensionality of the time series (one for univariate, more than one for multivariate). num_time_buckets: Number of buckets into which to divide (time % periodicity) for generating time based features. loss: Loss function to use for training. Currently supported values are SQUARED_LOSS and NORMAL_LIKELIHOOD_LOSS. Note that for NORMAL_LIKELIHOOD_LOSS, we train the covariance term as well. For SQUARED_LOSS, the evaluation loss is reported based on un-scaled observations and predictions, while the training loss is computed on normalized data. hidden_layer_sizes: list of sizes of hidden layers. anomaly_prior_probability: If specified, constructs a mixture model under which anomalies (modeled with `anomaly_distribution`) have this prior probability. See `AnomalyMixtureARModel`. anomaly_distribution: May not be specified unless anomaly_prior_probability is specified and is not None. Controls the distribution of anomalies under the mixture model. Currently either `ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY` or `ar_model.AnomalyMixtureARModel.CAUCHY_ANOMALY`. See `AnomalyMixtureARModel`. Defaults to `GAUSSIAN_ANOMALY`. optimizer: The optimization algorithm to use when training, inheriting from tf.train.Optimizer. Defaults to Adagrad with step size 0.1. model_dir: See `Estimator`. config: See `Estimator`. Raises: ValueError: For invalid combinations of arguments. """ if optimizer is None: optimizer = train.AdagradOptimizer(0.1) if anomaly_prior_probability is None and anomaly_distribution is not None: raise ValueError("anomaly_prior_probability is required if " "anomaly_distribution is specified.") if anomaly_prior_probability is None: if anomaly_distribution is None: anomaly_distribution = ar_model.AnomalyMixtureARModel.GAUSSIAN_ANOMALY model = ar_model.ARModel( periodicities=periodicities, num_features=num_features, num_time_buckets=num_time_buckets, input_window_size=input_window_size, output_window_size=output_window_size, loss=loss, hidden_layer_sizes=hidden_layer_sizes) else: if loss != ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS: raise ValueError( "AnomalyMixtureARModel only supports " "ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS for its loss argument.") model = ar_model.AnomalyMixtureARModel( periodicities=periodicities, input_window_size=input_window_size, output_window_size=output_window_size, num_features=num_features, num_time_buckets=num_time_buckets, hidden_layer_sizes=hidden_layer_sizes, anomaly_prior_probability=anomaly_prior_probability, anomaly_distribution=anomaly_distribution) state_manager = state_management.FilteringOnlyStateManager() super(ARRegressor, self).__init__( model=model, state_manager=state_manager, optimizer=optimizer, model_dir=model_dir, config=config)
[ "def", "__init__", "(", "self", ",", "periodicities", ",", "input_window_size", ",", "output_window_size", ",", "num_features", ",", "num_time_buckets", "=", "10", ",", "loss", "=", "ar_model", ".", "ARModel", ".", "NORMAL_LIKELIHOOD_LOSS", ",", "hidden_layer_sizes", "=", "None", ",", "anomaly_prior_probability", "=", "None", ",", "anomaly_distribution", "=", "None", ",", "optimizer", "=", "None", ",", "model_dir", "=", "None", ",", "config", "=", "None", ")", ":", "if", "optimizer", "is", "None", ":", "optimizer", "=", "train", ".", "AdagradOptimizer", "(", "0.1", ")", "if", "anomaly_prior_probability", "is", "None", "and", "anomaly_distribution", "is", "not", "None", ":", "raise", "ValueError", "(", "\"anomaly_prior_probability is required if \"", "\"anomaly_distribution is specified.\"", ")", "if", "anomaly_prior_probability", "is", "None", ":", "if", "anomaly_distribution", "is", "None", ":", "anomaly_distribution", "=", "ar_model", ".", "AnomalyMixtureARModel", ".", "GAUSSIAN_ANOMALY", "model", "=", "ar_model", ".", "ARModel", "(", "periodicities", "=", "periodicities", ",", "num_features", "=", "num_features", ",", "num_time_buckets", "=", "num_time_buckets", ",", "input_window_size", "=", "input_window_size", ",", "output_window_size", "=", "output_window_size", ",", "loss", "=", "loss", ",", "hidden_layer_sizes", "=", "hidden_layer_sizes", ")", "else", ":", "if", "loss", "!=", "ar_model", ".", "ARModel", ".", "NORMAL_LIKELIHOOD_LOSS", ":", "raise", "ValueError", "(", "\"AnomalyMixtureARModel only supports \"", "\"ar_model.ARModel.NORMAL_LIKELIHOOD_LOSS for its loss argument.\"", ")", "model", "=", "ar_model", ".", "AnomalyMixtureARModel", "(", "periodicities", "=", "periodicities", ",", "input_window_size", "=", "input_window_size", ",", "output_window_size", "=", "output_window_size", ",", "num_features", "=", "num_features", ",", "num_time_buckets", "=", "num_time_buckets", ",", "hidden_layer_sizes", "=", "hidden_layer_sizes", ",", "anomaly_prior_probability", "=", "anomaly_prior_probability", ",", "anomaly_distribution", "=", "anomaly_distribution", ")", "state_manager", "=", "state_management", ".", "FilteringOnlyStateManager", "(", ")", "super", "(", "ARRegressor", ",", "self", ")", ".", "__init__", "(", "model", "=", "model", ",", "state_manager", "=", "state_manager", ",", "optimizer", "=", "optimizer", ",", "model_dir", "=", "model_dir", ",", "config", "=", "config", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/timeseries/python/timeseries/estimators.py#L160-L237
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/metrics/confusion_matrix.py
python
_decrease_metric
(chart, decrease="mean")
return chart, not_nans
This function is used to reduce the calculated metrics for each class of each example. Args: chart (ndarray): A data table containing the calculated measurement scores for each batch and class. The first two dims should be batch and class. decrease (str): Define the mode to reduce computation result of 1 batch data. Decrease will only be employed when 'calculation_method' is True. Default: "mean".
This function is used to reduce the calculated metrics for each class of each example.
[ "This", "function", "is", "used", "to", "reduce", "the", "calculated", "metrics", "for", "each", "class", "of", "each", "example", "." ]
def _decrease_metric(chart, decrease="mean"): """ This function is used to reduce the calculated metrics for each class of each example. Args: chart (ndarray): A data table containing the calculated measurement scores for each batch and class. The first two dims should be batch and class. decrease (str): Define the mode to reduce computation result of 1 batch data. Decrease will only be employed when 'calculation_method' is True. Default: "mean". """ nans = np.isnan(chart) not_nans = (~nans).astype(float) chart[nans] = 0 decrease_dict = {"mean": _decrease_mean(not_nans, chart), "sum": _decrease_sum(not_nans, chart), "mean_batch": _decrease_mean_batch, "sum_batch": _decrease_sum_batch(not_nans, chart), "mean_channel": _decrease_mean_channel(not_nans, chart), "sum_channel": _decrease_sum_channel(not_nans, chart), "none": _decrease_none(not_nans, chart)} not_nans, chart = decrease_dict.get(decrease) return chart, not_nans
[ "def", "_decrease_metric", "(", "chart", ",", "decrease", "=", "\"mean\"", ")", ":", "nans", "=", "np", ".", "isnan", "(", "chart", ")", "not_nans", "=", "(", "~", "nans", ")", ".", "astype", "(", "float", ")", "chart", "[", "nans", "]", "=", "0", "decrease_dict", "=", "{", "\"mean\"", ":", "_decrease_mean", "(", "not_nans", ",", "chart", ")", ",", "\"sum\"", ":", "_decrease_sum", "(", "not_nans", ",", "chart", ")", ",", "\"mean_batch\"", ":", "_decrease_mean_batch", ",", "\"sum_batch\"", ":", "_decrease_sum_batch", "(", "not_nans", ",", "chart", ")", ",", "\"mean_channel\"", ":", "_decrease_mean_channel", "(", "not_nans", ",", "chart", ")", ",", "\"sum_channel\"", ":", "_decrease_sum_channel", "(", "not_nans", ",", "chart", ")", ",", "\"none\"", ":", "_decrease_none", "(", "not_nans", ",", "chart", ")", "}", "not_nans", ",", "chart", "=", "decrease_dict", ".", "get", "(", "decrease", ")", "return", "chart", ",", "not_nans" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/metrics/confusion_matrix.py#L437-L461
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tix.py
python
Grid.entrycget
(self, x, y, option)
return self.tk.call(self, 'entrycget', x, y, option)
Get the option value for cell at (x,y)
Get the option value for cell at (x,y)
[ "Get", "the", "option", "value", "for", "cell", "at", "(", "x", "y", ")" ]
def entrycget(self, x, y, option): "Get the option value for cell at (x,y)" if option and option[0] != '-': option = '-' + option return self.tk.call(self, 'entrycget', x, y, option)
[ "def", "entrycget", "(", "self", ",", "x", ",", "y", ",", "option", ")", ":", "if", "option", "and", "option", "[", "0", "]", "!=", "'-'", ":", "option", "=", "'-'", "+", "option", "return", "self", ".", "tk", ".", "call", "(", "self", ",", "'entrycget'", ",", "x", ",", "y", ",", "option", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/Tix.py#L1856-L1860
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
build/android/gyp/util/md5_check.py
python
_Metadata.IterSubpaths
(self, path)
return (entry['path'] for entry in subentries)
Returns a generator for all subpaths in the given zip. If the given path is not a zip file or doesn't exist, returns an empty iterable.
Returns a generator for all subpaths in the given zip.
[ "Returns", "a", "generator", "for", "all", "subpaths", "in", "the", "given", "zip", "." ]
def IterSubpaths(self, path): """Returns a generator for all subpaths in the given zip. If the given path is not a zip file or doesn't exist, returns an empty iterable. """ outer_entry = self._GetEntry(path) if not outer_entry: return () subentries = outer_entry.get('entries', []) return (entry['path'] for entry in subentries)
[ "def", "IterSubpaths", "(", "self", ",", "path", ")", ":", "outer_entry", "=", "self", ".", "_GetEntry", "(", "path", ")", "if", "not", "outer_entry", ":", "return", "(", ")", "subentries", "=", "outer_entry", ".", "get", "(", "'entries'", ",", "[", "]", ")", "return", "(", "entry", "[", "'path'", "]", "for", "entry", "in", "subentries", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/build/android/gyp/util/md5_check.py#L348-L358
nasa/fprime
595cf3682d8365943d86c1a6fe7c78f0a116acf0
Autocoders/Python/src/fprime_ac/generators/MdStartPage.py
python
MdStartPage.addVisitor
(self, visitor)
Add a visitor to the list of visitors. @param visitor: the visitor to add, must be derived from AbstractVisitor.
Add a visitor to the list of visitors.
[ "Add", "a", "visitor", "to", "the", "list", "of", "visitors", "." ]
def addVisitor(self, visitor): """ Add a visitor to the list of visitors. @param visitor: the visitor to add, must be derived from AbstractVisitor. """ if issubclass(visitor.__class__, AbstractVisitor.AbstractVisitor): self.__visitor_list.append(visitor) else: DEBUG.error( "MdStartPage.addVisitor(v) - the given visitor is not a subclass of AbstractVisitor!" ) raise Exception( "MdStartPage.addVisitor(v) - the given visitor is not a subclass of AbstractVisitor!" )
[ "def", "addVisitor", "(", "self", ",", "visitor", ")", ":", "if", "issubclass", "(", "visitor", ".", "__class__", ",", "AbstractVisitor", ".", "AbstractVisitor", ")", ":", "self", ".", "__visitor_list", ".", "append", "(", "visitor", ")", "else", ":", "DEBUG", ".", "error", "(", "\"MdStartPage.addVisitor(v) - the given visitor is not a subclass of AbstractVisitor!\"", ")", "raise", "Exception", "(", "\"MdStartPage.addVisitor(v) - the given visitor is not a subclass of AbstractVisitor!\"", ")" ]
https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/generators/MdStartPage.py#L86-L99
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/webapp2/webapp2.py
python
Response.clear
(self)
Clears all data written to the output stream so that it is empty.
Clears all data written to the output stream so that it is empty.
[ "Clears", "all", "data", "written", "to", "the", "output", "stream", "so", "that", "it", "is", "empty", "." ]
def clear(self): """Clears all data written to the output stream so that it is empty.""" self.body = ''
[ "def", "clear", "(", "self", ")", ":", "self", ".", "body", "=", "''" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/webapp2/webapp2.py#L459-L461
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/extras/misc.py
python
action_process_file_func
(tsk)
return tsk.fun(tsk)
Ask the function attached to the task to process it
Ask the function attached to the task to process it
[ "Ask", "the", "function", "attached", "to", "the", "task", "to", "process", "it" ]
def action_process_file_func(tsk): "Ask the function attached to the task to process it" if not tsk.fun: raise Errors.WafError('task must have a function attached to it for copy_func to work!') return tsk.fun(tsk)
[ "def", "action_process_file_func", "(", "tsk", ")", ":", "if", "not", "tsk", ".", "fun", ":", "raise", "Errors", ".", "WafError", "(", "'task must have a function attached to it for copy_func to work!'", ")", "return", "tsk", ".", "fun", "(", "tsk", ")" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/extras/misc.py#L41-L44
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/path.py
python
split
(path)
return normsep(path).split('/')
Return the normalized path as a list of its components. split('foo/bar/baz') returns ['foo', 'bar', 'baz']
Return the normalized path as a list of its components. split('foo/bar/baz') returns ['foo', 'bar', 'baz']
[ "Return", "the", "normalized", "path", "as", "a", "list", "of", "its", "components", ".", "split", "(", "foo", "/", "bar", "/", "baz", ")", "returns", "[", "foo", "bar", "baz", "]" ]
def split(path): ''' Return the normalized path as a list of its components. split('foo/bar/baz') returns ['foo', 'bar', 'baz'] ''' return normsep(path).split('/')
[ "def", "split", "(", "path", ")", ":", "return", "normsep", "(", "path", ")", ".", "split", "(", "'/'", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozpack/path.py#L59-L64
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/base.py
python
BaseContext.make_helper
(self, builder, typ, value=None, ref=None)
return self._make_helper(builder, typ, value, ref, kind='value')
Get a helper object to access the *typ*'s members, for the given value or reference.
Get a helper object to access the *typ*'s members, for the given value or reference.
[ "Get", "a", "helper", "object", "to", "access", "the", "*", "typ", "*", "s", "members", "for", "the", "given", "value", "or", "reference", "." ]
def make_helper(self, builder, typ, value=None, ref=None): """ Get a helper object to access the *typ*'s members, for the given value or reference. """ return self._make_helper(builder, typ, value, ref, kind='value')
[ "def", "make_helper", "(", "self", ",", "builder", ",", "typ", ",", "value", "=", "None", ",", "ref", "=", "None", ")", ":", "return", "self", ".", "_make_helper", "(", "builder", ",", "typ", ",", "value", ",", "ref", ",", "kind", "=", "'value'", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/targets/base.py#L967-L972
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/telemetry/telemetry/core/backends/chrome/inspector_backend.py
python
InspectorBackend.UnregisterDomain
(self, domain_name)
Unregisters a previously registered domain.
Unregisters a previously registered domain.
[ "Unregisters", "a", "previously", "registered", "domain", "." ]
def UnregisterDomain(self, domain_name): """Unregisters a previously registered domain.""" assert domain_name in self._domain_handlers self._domain_handlers.pop(domain_name)
[ "def", "UnregisterDomain", "(", "self", ",", "domain_name", ")", ":", "assert", "domain_name", "in", "self", ".", "_domain_handlers", "self", ".", "_domain_handlers", ".", "pop", "(", "domain_name", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/telemetry/core/backends/chrome/inspector_backend.py#L311-L314
openai/triton
7b48340ffddd7d2624b0330b219eb05b673c086b
python/triton/language/core.py
python
dot
(input, other, allow_tf32=True, _builder=None)
return frontend.dot(input, other, allow_tf32, _builder)
Returns the matrix product of two blocks. The two blocks must be two dimensionals and have compatible inner dimensions. :param input: The first block to be multiplied. :type input: 2D block of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} :param other: The second block to be multiplied. :type other: 2D block of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`}
Returns the matrix product of two blocks.
[ "Returns", "the", "matrix", "product", "of", "two", "blocks", "." ]
def dot(input, other, allow_tf32=True, _builder=None): """ Returns the matrix product of two blocks. The two blocks must be two dimensionals and have compatible inner dimensions. :param input: The first block to be multiplied. :type input: 2D block of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} :param other: The second block to be multiplied. :type other: 2D block of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} """ return frontend.dot(input, other, allow_tf32, _builder)
[ "def", "dot", "(", "input", ",", "other", ",", "allow_tf32", "=", "True", ",", "_builder", "=", "None", ")", ":", "return", "frontend", ".", "dot", "(", "input", ",", "other", ",", "allow_tf32", ",", "_builder", ")" ]
https://github.com/openai/triton/blob/7b48340ffddd7d2624b0330b219eb05b673c086b/python/triton/language/core.py#L539-L550
facebook/mysql-5.6
65a650660ec7b4d627d1b738f397252ff4706207
arcanist/lint/cpp_linter/cpplint.py
python
CheckForHeaderGuard
(filename, lines, error)
Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
Checks that the file contains a header guard.
[ "Checks", "that", "the", "file", "contains", "a", "header", "guard", "." ]
def CheckForHeaderGuard(filename, lines, error): """Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ cppvar = GetHeaderGuardCPPVariable(filename) ifndef = None ifndef_linenum = 0 define = None endif = None endif_linenum = 0 for linenum, line in enumerate(lines): # Already been well guarded, no need for further checking. if line.strip() == "#pragma once": return linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return if not define: error(filename, 0, 'build/header_guard', 5, 'No #define header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) if define != ifndef: error(filename, 0, 'build/header_guard', 5, '#ifndef and #define don\'t match, suggested CPP variable is: %s' % cppvar) return if endif != ('#endif // %s' % cppvar): error_level = 0 if endif != ('#endif // %s' % (cppvar + '_')): error_level = 5 ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, error) error(filename, endif_linenum, 'build/header_guard', error_level, '#endif line should be "#endif // %s"' % cppvar)
[ "def", "CheckForHeaderGuard", "(", "filename", ",", "lines", ",", "error", ")", ":", "cppvar", "=", "GetHeaderGuardCPPVariable", "(", "filename", ")", "ifndef", "=", "None", "ifndef_linenum", "=", "0", "define", "=", "None", "endif", "=", "None", "endif_linenum", "=", "0", "for", "linenum", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "# Already been well guarded, no need for further checking.", "if", "line", ".", "strip", "(", ")", "==", "\"#pragma once\"", ":", "return", "linesplit", "=", "line", ".", "split", "(", ")", "if", "len", "(", "linesplit", ")", ">=", "2", ":", "# find the first occurrence of #ifndef and #define, save arg", "if", "not", "ifndef", "and", "linesplit", "[", "0", "]", "==", "'#ifndef'", ":", "# set ifndef to the header guard presented on the #ifndef line.", "ifndef", "=", "linesplit", "[", "1", "]", "ifndef_linenum", "=", "linenum", "if", "not", "define", "and", "linesplit", "[", "0", "]", "==", "'#define'", ":", "define", "=", "linesplit", "[", "1", "]", "# find the last occurrence of #endif, save entire line", "if", "line", ".", "startswith", "(", "'#endif'", ")", ":", "endif", "=", "line", "endif_linenum", "=", "linenum", "if", "not", "ifndef", ":", "error", "(", "filename", ",", "0", ",", "'build/header_guard'", ",", "5", ",", "'No #ifndef header guard found, suggested CPP variable is: %s'", "%", "cppvar", ")", "return", "if", "not", "define", ":", "error", "(", "filename", ",", "0", ",", "'build/header_guard'", ",", "5", ",", "'No #define header guard found, suggested CPP variable is: %s'", "%", "cppvar", ")", "return", "# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__", "# for backward compatibility.", "if", "ifndef", "!=", "cppvar", ":", "error_level", "=", "0", "if", "ifndef", "!=", "cppvar", "+", "'_'", ":", "error_level", "=", "5", "ParseNolintSuppressions", "(", "filename", ",", "lines", "[", "ifndef_linenum", "]", ",", "ifndef_linenum", ",", "error", ")", "error", "(", "filename", ",", "ifndef_linenum", ",", "'build/header_guard'", ",", "error_level", ",", "'#ifndef header guard has wrong style, please use: %s'", "%", "cppvar", ")", "if", "define", "!=", "ifndef", ":", "error", "(", "filename", ",", "0", ",", "'build/header_guard'", ",", "5", ",", "'#ifndef and #define don\\'t match, suggested CPP variable is: %s'", "%", "cppvar", ")", "return", "if", "endif", "!=", "(", "'#endif // %s'", "%", "cppvar", ")", ":", "error_level", "=", "0", "if", "endif", "!=", "(", "'#endif // %s'", "%", "(", "cppvar", "+", "'_'", ")", ")", ":", "error_level", "=", "5", "ParseNolintSuppressions", "(", "filename", ",", "lines", "[", "endif_linenum", "]", ",", "endif_linenum", ",", "error", ")", "error", "(", "filename", ",", "endif_linenum", ",", "'build/header_guard'", ",", "error_level", ",", "'#endif line should be \"#endif // %s\"'", "%", "cppvar", ")" ]
https://github.com/facebook/mysql-5.6/blob/65a650660ec7b4d627d1b738f397252ff4706207/arcanist/lint/cpp_linter/cpplint.py#L1403-L1478
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/stc.py
python
StyledTextEvent.SetFoldLevelPrev
(*args, **kwargs)
return _stc.StyledTextEvent_SetFoldLevelPrev(*args, **kwargs)
SetFoldLevelPrev(self, int val)
SetFoldLevelPrev(self, int val)
[ "SetFoldLevelPrev", "(", "self", "int", "val", ")" ]
def SetFoldLevelPrev(*args, **kwargs): """SetFoldLevelPrev(self, int val)""" return _stc.StyledTextEvent_SetFoldLevelPrev(*args, **kwargs)
[ "def", "SetFoldLevelPrev", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextEvent_SetFoldLevelPrev", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/stc.py#L7062-L7064
bundy-dns/bundy
3d41934996b82b0cd2fe22dd74d2abc1daba835d
src/lib/python/bundy/server_common/dns_tcp.py
python
DNSTCPContext.send
(self, data)
return self.__do_send()
Send a DNS message. In the non blocking mode, it sends as much data as possible via the underlying TCP socket until it would block or all data are sent out, and returns the corresponding result code. This method therefore doesn't block in this mode. Note: the initial implementation only works in the non blocking mode. This method must not be called once an error is detected and CLOSED is returned or a prior send attempt is ongoing (with the result code of SENDING); otherwise DNSTCPContextError is raised. Parameter: data (binary): A binary sequence that is supposed to be a complete DNS message in the wire format. It must meet the assumption that DNSTCPSendBuffer requires. Return: An integer constant representing the result: - SEND_DONE All data have been sent out successfully. - SENDING All writable data has been sent out, and further attempt would block at the moment. The caller is expected to detect it when the underlying socket is writable again and call send_ready() to continue the send. - CLOSED A network error happened before the send operation is completed. The underlying socket has been closed, and this context object will be unusable.
Send a DNS message.
[ "Send", "a", "DNS", "message", "." ]
def send(self, data): '''Send a DNS message. In the non blocking mode, it sends as much data as possible via the underlying TCP socket until it would block or all data are sent out, and returns the corresponding result code. This method therefore doesn't block in this mode. Note: the initial implementation only works in the non blocking mode. This method must not be called once an error is detected and CLOSED is returned or a prior send attempt is ongoing (with the result code of SENDING); otherwise DNSTCPContextError is raised. Parameter: data (binary): A binary sequence that is supposed to be a complete DNS message in the wire format. It must meet the assumption that DNSTCPSendBuffer requires. Return: An integer constant representing the result: - SEND_DONE All data have been sent out successfully. - SENDING All writable data has been sent out, and further attempt would block at the moment. The caller is expected to detect it when the underlying socket is writable again and call send_ready() to continue the send. - CLOSED A network error happened before the send operation is completed. The underlying socket has been closed, and this context object will be unusable. ''' if self.__sock is None: raise DNSTCPContextError('send() called after close') if self.__send_buffer is not None: raise DNSTCPContextError('duplicate send()') self.__send_buffer = DNSTCPSendBuffer(data) self.__send_marker = 0 return self.__do_send()
[ "def", "send", "(", "self", ",", "data", ")", ":", "if", "self", ".", "__sock", "is", "None", ":", "raise", "DNSTCPContextError", "(", "'send() called after close'", ")", "if", "self", ".", "__send_buffer", "is", "not", "None", ":", "raise", "DNSTCPContextError", "(", "'duplicate send()'", ")", "self", ".", "__send_buffer", "=", "DNSTCPSendBuffer", "(", "data", ")", "self", ".", "__send_marker", "=", "0", "return", "self", ".", "__do_send", "(", ")" ]
https://github.com/bundy-dns/bundy/blob/3d41934996b82b0cd2fe22dd74d2abc1daba835d/src/lib/python/bundy/server_common/dns_tcp.py#L163-L203
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/external/boost/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py
python
adjust_container_limits_for_variadic_sequences
(headerDir, containers, maxElements)
Adjusts the limits of variadic sequence MPL-containers.
Adjusts the limits of variadic sequence MPL-containers.
[ "Adjusts", "the", "limits", "of", "variadic", "sequence", "MPL", "-", "containers", "." ]
def adjust_container_limits_for_variadic_sequences(headerDir, containers, maxElements): """Adjusts the limits of variadic sequence MPL-containers.""" for container in containers: headerFile = os.path.join( headerDir, "limits", container + ".hpp" ) regexMatch = r'(define\s+BOOST_MPL_LIMIT_' + container.upper() + r'_SIZE\s+)[0-9]+' regexReplace = r'\g<1>' + re.escape( str(maxElements) ) for line in fileinput.input( headerFile, inplace=1, mode="rU" ): line = re.sub(regexMatch, regexReplace, line.rstrip()) print(line)
[ "def", "adjust_container_limits_for_variadic_sequences", "(", "headerDir", ",", "containers", ",", "maxElements", ")", ":", "for", "container", "in", "containers", ":", "headerFile", "=", "os", ".", "path", ".", "join", "(", "headerDir", ",", "\"limits\"", ",", "container", "+", "\".hpp\"", ")", "regexMatch", "=", "r'(define\\s+BOOST_MPL_LIMIT_'", "+", "container", ".", "upper", "(", ")", "+", "r'_SIZE\\s+)[0-9]+'", "regexReplace", "=", "r'\\g<1>'", "+", "re", ".", "escape", "(", "str", "(", "maxElements", ")", ")", "for", "line", "in", "fileinput", ".", "input", "(", "headerFile", ",", "inplace", "=", "1", ",", "mode", "=", "\"rU\"", ")", ":", "line", "=", "re", ".", "sub", "(", "regexMatch", ",", "regexReplace", ",", "line", ".", "rstrip", "(", ")", ")", "print", "(", "line", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/boost/boost_1_68_0/libs/mpl/preprocessed/boost_mpl_preprocess.py#L70-L78
google/shaka-packager
e1b0c7c45431327fd3ce193514a5407d07b39b22
packager/third_party/protobuf/python/mox.py
python
MockMethod.MultipleTimes
(self, group_name="default")
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
Move this method into group of calls which may be called multiple times. A group of repeating calls must be defined together, and must be executed in full before the next expected mehtod can be called. Args: group_name: the name of the unordered group. Returns: self
Move this method into group of calls which may be called multiple times.
[ "Move", "this", "method", "into", "group", "of", "calls", "which", "may", "be", "called", "multiple", "times", "." ]
def MultipleTimes(self, group_name="default"): """Move this method into group of calls which may be called multiple times. A group of repeating calls must be defined together, and must be executed in full before the next expected mehtod can be called. Args: group_name: the name of the unordered group. Returns: self """ return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
[ "def", "MultipleTimes", "(", "self", ",", "group_name", "=", "\"default\"", ")", ":", "return", "self", ".", "_CheckAndCreateNewGroup", "(", "group_name", ",", "MultipleTimesGroup", ")" ]
https://github.com/google/shaka-packager/blob/e1b0c7c45431327fd3ce193514a5407d07b39b22/packager/third_party/protobuf/python/mox.py#L704-L716
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_misc.py
python
TimeSpan_Minutes
(*args, **kwargs)
return _misc_.TimeSpan_Minutes(*args, **kwargs)
TimeSpan_Minutes(long min) -> TimeSpan
TimeSpan_Minutes(long min) -> TimeSpan
[ "TimeSpan_Minutes", "(", "long", "min", ")", "-", ">", "TimeSpan" ]
def TimeSpan_Minutes(*args, **kwargs): """TimeSpan_Minutes(long min) -> TimeSpan""" return _misc_.TimeSpan_Minutes(*args, **kwargs)
[ "def", "TimeSpan_Minutes", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "TimeSpan_Minutes", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L4572-L4574
eric612/MobileNet-YOLO
69b4441cb3ec8d553fbdef788ad033e246f901bd
python/caffe/io.py
python
array_to_blobproto
(arr, diff=None)
return blob
Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check.
Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check.
[ "Converts", "a", "N", "-", "dimensional", "array", "to", "blob", "proto", ".", "If", "diff", "is", "given", "also", "convert", "the", "diff", ".", "You", "need", "to", "make", "sure", "that", "arr", "and", "diff", "have", "the", "same", "shape", "and", "this", "function", "does", "not", "do", "sanity", "check", "." ]
def array_to_blobproto(arr, diff=None): """Converts a N-dimensional array to blob proto. If diff is given, also convert the diff. You need to make sure that arr and diff have the same shape, and this function does not do sanity check. """ blob = caffe_pb2.BlobProto() blob.shape.dim.extend(arr.shape) blob.data.extend(arr.astype(float).flat) if diff is not None: blob.diff.extend(diff.astype(float).flat) return blob
[ "def", "array_to_blobproto", "(", "arr", ",", "diff", "=", "None", ")", ":", "blob", "=", "caffe_pb2", ".", "BlobProto", "(", ")", "blob", ".", "shape", ".", "dim", ".", "extend", "(", "arr", ".", "shape", ")", "blob", ".", "data", ".", "extend", "(", "arr", ".", "astype", "(", "float", ")", ".", "flat", ")", "if", "diff", "is", "not", "None", ":", "blob", ".", "diff", ".", "extend", "(", "diff", ".", "astype", "(", "float", ")", ".", "flat", ")", "return", "blob" ]
https://github.com/eric612/MobileNet-YOLO/blob/69b4441cb3ec8d553fbdef788ad033e246f901bd/python/caffe/io.py#L36-L46
H-uru/Plasma
c2140ea046e82e9c199e257a7f2e7edb42602871
Scripts/Python/xCheat.py
python
GetSDL
(varName)
GetSDL is used to get the value of an Age SDL variable by name. Expects one argument: (string) VariableName
GetSDL is used to get the value of an Age SDL variable by name. Expects one argument: (string) VariableName
[ "GetSDL", "is", "used", "to", "get", "the", "value", "of", "an", "Age", "SDL", "variable", "by", "name", ".", "Expects", "one", "argument", ":", "(", "string", ")", "VariableName" ]
def GetSDL(varName): """ GetSDL is used to get the value of an Age SDL variable by name. Expects one argument: (string) VariableName """ import Plasma if not varName: print("xCheat.GetSDL(): GetSDL takes one argument: SDL variable name is required.\n Use 'all' to list all variables for the current Age.") return ageName = Plasma.PtGetAgeName() try: ageSDL = Plasma.PtGetAgeSDL() except: print(("xCheat.GetSDL(): Unable to retrieve SDL for '{}'.".format(ageName))) return varList = [] if varName == "all": if ageName == "Personal": varRecord = Plasma.ptVault().getPsnlAgeSDL() if varRecord: varList = varRecord.getVarList() else: vault = Plasma.ptAgeVault() if vault: varRecord = vault.getAgeSDL() if varRecord: varList = varRecord.getVarList() if not varList: print("xCheat.GetSDL(): Couldn't retrieve SDL list.") return maxlen = len(max(varList, key=len)) for var in varList: try: if len(ageSDL[var]) == 0: val = "" else: val = ageSDL[var][0] print(("xCheat.GetSDL(): {:>{width}} = {}".format(var, val, width=maxlen))) except: print(("xCheat.GetSDL(): Error retrieving value for '{}'.".format(var))) else: try: if len(ageSDL[varName]) == 0: print(("xCheat.GetSDL(): SDL variable '{}' is not set.".format(varName))) else: print(("xCheat.GetSDL(): {} = {}".format(varName, ageSDL[varName][0]))) except: print(("xCheat.GetSDL(): SDL variable '{}' not found.".format(varName))) return
[ "def", "GetSDL", "(", "varName", ")", ":", "import", "Plasma", "if", "not", "varName", ":", "print", "(", "\"xCheat.GetSDL(): GetSDL takes one argument: SDL variable name is required.\\n Use 'all' to list all variables for the current Age.\"", ")", "return", "ageName", "=", "Plasma", ".", "PtGetAgeName", "(", ")", "try", ":", "ageSDL", "=", "Plasma", ".", "PtGetAgeSDL", "(", ")", "except", ":", "print", "(", "(", "\"xCheat.GetSDL(): Unable to retrieve SDL for '{}'.\"", ".", "format", "(", "ageName", ")", ")", ")", "return", "varList", "=", "[", "]", "if", "varName", "==", "\"all\"", ":", "if", "ageName", "==", "\"Personal\"", ":", "varRecord", "=", "Plasma", ".", "ptVault", "(", ")", ".", "getPsnlAgeSDL", "(", ")", "if", "varRecord", ":", "varList", "=", "varRecord", ".", "getVarList", "(", ")", "else", ":", "vault", "=", "Plasma", ".", "ptAgeVault", "(", ")", "if", "vault", ":", "varRecord", "=", "vault", ".", "getAgeSDL", "(", ")", "if", "varRecord", ":", "varList", "=", "varRecord", ".", "getVarList", "(", ")", "if", "not", "varList", ":", "print", "(", "\"xCheat.GetSDL(): Couldn't retrieve SDL list.\"", ")", "return", "maxlen", "=", "len", "(", "max", "(", "varList", ",", "key", "=", "len", ")", ")", "for", "var", "in", "varList", ":", "try", ":", "if", "len", "(", "ageSDL", "[", "var", "]", ")", "==", "0", ":", "val", "=", "\"\"", "else", ":", "val", "=", "ageSDL", "[", "var", "]", "[", "0", "]", "print", "(", "(", "\"xCheat.GetSDL(): {:>{width}} = {}\"", ".", "format", "(", "var", ",", "val", ",", "width", "=", "maxlen", ")", ")", ")", "except", ":", "print", "(", "(", "\"xCheat.GetSDL(): Error retrieving value for '{}'.\"", ".", "format", "(", "var", ")", ")", ")", "else", ":", "try", ":", "if", "len", "(", "ageSDL", "[", "varName", "]", ")", "==", "0", ":", "print", "(", "(", "\"xCheat.GetSDL(): SDL variable '{}' is not set.\"", ".", "format", "(", "varName", ")", ")", ")", "else", ":", "print", "(", "(", "\"xCheat.GetSDL(): {} = {}\"", ".", "format", "(", "varName", ",", "ageSDL", "[", "varName", "]", "[", "0", "]", ")", ")", ")", "except", ":", "print", "(", "(", "\"xCheat.GetSDL(): SDL variable '{}' not found.\"", ".", "format", "(", "varName", ")", ")", ")", "return" ]
https://github.com/H-uru/Plasma/blob/c2140ea046e82e9c199e257a7f2e7edb42602871/Scripts/Python/xCheat.py#L721-L775
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/base64.py
python
urlsafe_b64encode
(s)
return b64encode(s).translate(_urlsafe_encode_translation)
Encode bytes using the URL- and filesystem-safe Base64 alphabet. Argument s is a bytes-like object to encode. The result is returned as a bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'.
Encode bytes using the URL- and filesystem-safe Base64 alphabet.
[ "Encode", "bytes", "using", "the", "URL", "-", "and", "filesystem", "-", "safe", "Base64", "alphabet", "." ]
def urlsafe_b64encode(s): """Encode bytes using the URL- and filesystem-safe Base64 alphabet. Argument s is a bytes-like object to encode. The result is returned as a bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation)
[ "def", "urlsafe_b64encode", "(", "s", ")", ":", "return", "b64encode", "(", "s", ")", ".", "translate", "(", "_urlsafe_encode_translation", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/base64.py#L111-L118
apache/arrow
af33dd1157eb8d7d9bfac25ebf61445b793b7943
dev/archery/archery/docker/cli.py
python
docker_build
(obj, image, *, force_pull, using_docker_cli, using_docker_buildx, use_cache, use_leaf_cache)
Execute docker-compose builds.
Execute docker-compose builds.
[ "Execute", "docker", "-", "compose", "builds", "." ]
def docker_build(obj, image, *, force_pull, using_docker_cli, using_docker_buildx, use_cache, use_leaf_cache): """ Execute docker-compose builds. """ compose = obj['compose'] using_docker_cli |= using_docker_buildx try: if force_pull: compose.pull(image, pull_leaf=use_leaf_cache, using_docker=using_docker_cli) compose.build(image, use_cache=use_cache, use_leaf_cache=use_leaf_cache, using_docker=using_docker_cli, using_buildx=using_docker_buildx, pull_parents=force_pull) except UndefinedImage as e: raise click.ClickException( "There is no service/image defined in docker-compose.yml with " "name: {}".format(str(e)) ) except RuntimeError as e: raise click.ClickException(str(e))
[ "def", "docker_build", "(", "obj", ",", "image", ",", "*", ",", "force_pull", ",", "using_docker_cli", ",", "using_docker_buildx", ",", "use_cache", ",", "use_leaf_cache", ")", ":", "compose", "=", "obj", "[", "'compose'", "]", "using_docker_cli", "|=", "using_docker_buildx", "try", ":", "if", "force_pull", ":", "compose", ".", "pull", "(", "image", ",", "pull_leaf", "=", "use_leaf_cache", ",", "using_docker", "=", "using_docker_cli", ")", "compose", ".", "build", "(", "image", ",", "use_cache", "=", "use_cache", ",", "use_leaf_cache", "=", "use_leaf_cache", ",", "using_docker", "=", "using_docker_cli", ",", "using_buildx", "=", "using_docker_buildx", ",", "pull_parents", "=", "force_pull", ")", "except", "UndefinedImage", "as", "e", ":", "raise", "click", ".", "ClickException", "(", "\"There is no service/image defined in docker-compose.yml with \"", "\"name: {}\"", ".", "format", "(", "str", "(", "e", ")", ")", ")", "except", "RuntimeError", "as", "e", ":", "raise", "click", ".", "ClickException", "(", "str", "(", "e", ")", ")" ]
https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/dev/archery/archery/docker/cli.py#L104-L127
grpc/grpc
27bc6fe7797e43298dc931b96dc57322d0852a9f
src/python/grpcio/grpc/_channel.py
python
_InactiveRpcError.running
(self)
return False
See grpc.Future.running.
See grpc.Future.running.
[ "See", "grpc", ".", "Future", ".", "running", "." ]
def running(self): """See grpc.Future.running.""" return False
[ "def", "running", "(", "self", ")", ":", "return", "False" ]
https://github.com/grpc/grpc/blob/27bc6fe7797e43298dc931b96dc57322d0852a9f/src/python/grpcio/grpc/_channel.py#L333-L335
geemaple/leetcode
68bc5032e1ee52c22ef2f2e608053484c487af54
leetcode/256.paint-house.py
python
Solution.minCost
(self, costs)
return min(table[-1])
:type costs: List[List[int]] :rtype: int
:type costs: List[List[int]] :rtype: int
[ ":", "type", "costs", ":", "List", "[", "List", "[", "int", "]]", ":", "rtype", ":", "int" ]
def minCost(self, costs): """ :type costs: List[List[int]] :rtype: int """ if costs is None or len(costs) == 0: return 0 size = len(costs) color_size = len(costs[0]) table = [[0 for _ in range(color_size)] for i in range(size + 1)] for i in range(1, size + 1): for j in range(color_size): cost = float('inf') for p in range(color_size): if j != p: cost = min(cost, table[i - 1][p] + costs[i - 1][j]) table[i][j] = cost return min(table[-1])
[ "def", "minCost", "(", "self", ",", "costs", ")", ":", "if", "costs", "is", "None", "or", "len", "(", "costs", ")", "==", "0", ":", "return", "0", "size", "=", "len", "(", "costs", ")", "color_size", "=", "len", "(", "costs", "[", "0", "]", ")", "table", "=", "[", "[", "0", "for", "_", "in", "range", "(", "color_size", ")", "]", "for", "i", "in", "range", "(", "size", "+", "1", ")", "]", "for", "i", "in", "range", "(", "1", ",", "size", "+", "1", ")", ":", "for", "j", "in", "range", "(", "color_size", ")", ":", "cost", "=", "float", "(", "'inf'", ")", "for", "p", "in", "range", "(", "color_size", ")", ":", "if", "j", "!=", "p", ":", "cost", "=", "min", "(", "cost", ",", "table", "[", "i", "-", "1", "]", "[", "p", "]", "+", "costs", "[", "i", "-", "1", "]", "[", "j", "]", ")", "table", "[", "i", "]", "[", "j", "]", "=", "cost", "return", "min", "(", "table", "[", "-", "1", "]", ")" ]
https://github.com/geemaple/leetcode/blob/68bc5032e1ee52c22ef2f2e608053484c487af54/leetcode/256.paint-house.py#L3-L24
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/urllib3/contrib/securetransport.py
python
SecureTransportContext.check_hostname
(self, value)
SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file.
SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file.
[ "SecureTransport", "cannot", "have", "its", "hostname", "checking", "disabled", ".", "For", "more", "see", "the", "comment", "on", "getpeercert", "()", "in", "this", "file", "." ]
def check_hostname(self, value): """ SecureTransport cannot have its hostname checking disabled. For more, see the comment on getpeercert() in this file. """ pass
[ "def", "check_hostname", "(", "self", ",", "value", ")", ":", "pass" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/urllib3/contrib/securetransport.py#L767-L772
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_controls.py
python
TextAttr.HasTabs
(*args, **kwargs)
return _controls_.TextAttr_HasTabs(*args, **kwargs)
HasTabs(self) -> bool
HasTabs(self) -> bool
[ "HasTabs", "(", "self", ")", "-", ">", "bool" ]
def HasTabs(*args, **kwargs): """HasTabs(self) -> bool""" return _controls_.TextAttr_HasTabs(*args, **kwargs)
[ "def", "HasTabs", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "TextAttr_HasTabs", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_controls.py#L1780-L1782
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/operations/prepare.py
python
RequirementPreparer._fetch_metadata_using_lazy_wheel
(self, link)
Fetch metadata using lazy wheel, if possible.
Fetch metadata using lazy wheel, if possible.
[ "Fetch", "metadata", "using", "lazy", "wheel", "if", "possible", "." ]
def _fetch_metadata_using_lazy_wheel(self, link): # type: (Link) -> Optional[Distribution] """Fetch metadata using lazy wheel, if possible.""" if not self.use_lazy_wheel: return None if self.require_hashes: logger.debug('Lazy wheel is not used as hash checking is required') return None if link.is_file or not link.is_wheel: logger.debug( 'Lazy wheel is not used as ' '%r does not points to a remote wheel', link, ) return None wheel = Wheel(link.filename) name = canonicalize_name(wheel.name) logger.info( 'Obtaining dependency information from %s %s', name, wheel.version, ) url = link.url.split('#', 1)[0] try: return dist_from_wheel_url(name, url, self._session) except HTTPRangeRequestUnsupported: logger.debug('%s does not support range requests', url) return None
[ "def", "_fetch_metadata_using_lazy_wheel", "(", "self", ",", "link", ")", ":", "# type: (Link) -> Optional[Distribution]", "if", "not", "self", ".", "use_lazy_wheel", ":", "return", "None", "if", "self", ".", "require_hashes", ":", "logger", ".", "debug", "(", "'Lazy wheel is not used as hash checking is required'", ")", "return", "None", "if", "link", ".", "is_file", "or", "not", "link", ".", "is_wheel", ":", "logger", ".", "debug", "(", "'Lazy wheel is not used as '", "'%r does not points to a remote wheel'", ",", "link", ",", ")", "return", "None", "wheel", "=", "Wheel", "(", "link", ".", "filename", ")", "name", "=", "canonicalize_name", "(", "wheel", ".", "name", ")", "logger", ".", "info", "(", "'Obtaining dependency information from %s %s'", ",", "name", ",", "wheel", ".", "version", ",", ")", "url", "=", "link", ".", "url", ".", "split", "(", "'#'", ",", "1", ")", "[", "0", "]", "try", ":", "return", "dist_from_wheel_url", "(", "name", ",", "url", ",", "self", ".", "_session", ")", "except", "HTTPRangeRequestUnsupported", ":", "logger", ".", "debug", "(", "'%s does not support range requests'", ",", "url", ")", "return", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_internal/operations/prepare.py#L403-L430
SoarGroup/Soar
a1c5e249499137a27da60533c72969eef3b8ab6b
scons/scons-local-4.1.0/SCons/Tool/FortranCommon.py
python
ComputeFortranSuffixes
(suffixes, ppsuffixes)
suffixes are fortran source files, and ppsuffixes the ones to be pre-processed. Both should be sequences, not strings.
suffixes are fortran source files, and ppsuffixes the ones to be pre-processed. Both should be sequences, not strings.
[ "suffixes", "are", "fortran", "source", "files", "and", "ppsuffixes", "the", "ones", "to", "be", "pre", "-", "processed", ".", "Both", "should", "be", "sequences", "not", "strings", "." ]
def ComputeFortranSuffixes(suffixes, ppsuffixes): """suffixes are fortran source files, and ppsuffixes the ones to be pre-processed. Both should be sequences, not strings.""" assert len(suffixes) > 0 s = suffixes[0] sup = s.upper() upper_suffixes = [_.upper() for _ in suffixes] if SCons.Util.case_sensitive_suffixes(s, sup): ppsuffixes.extend(upper_suffixes) else: suffixes.extend(upper_suffixes)
[ "def", "ComputeFortranSuffixes", "(", "suffixes", ",", "ppsuffixes", ")", ":", "assert", "len", "(", "suffixes", ")", ">", "0", "s", "=", "suffixes", "[", "0", "]", "sup", "=", "s", ".", "upper", "(", ")", "upper_suffixes", "=", "[", "_", ".", "upper", "(", ")", "for", "_", "in", "suffixes", "]", "if", "SCons", ".", "Util", ".", "case_sensitive_suffixes", "(", "s", ",", "sup", ")", ":", "ppsuffixes", ".", "extend", "(", "upper_suffixes", ")", "else", ":", "suffixes", ".", "extend", "(", "upper_suffixes", ")" ]
https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Tool/FortranCommon.py#L88-L98
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py
python
ObjectsRewrite.RunWithArgs
(self, sourceBucket, sourceObject, destinationBucket, destinationObject)
Rewrites a source object to a destination object. Optionally overrides metadata. Args: sourceBucket: Name of the bucket in which to find the source object. sourceObject: Name of the source object. destinationBucket: Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any. destinationObject: Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. Flags: destinationPredefinedAcl: Apply a predefined set of access controls to the destination object. ifGenerationMatch: Makes the operation conditional on whether the destination object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the destination object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the destination object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the destination object's current metageneration does not match the given value. ifSourceGenerationMatch: Makes the operation conditional on whether the source object's generation matches the given value. ifSourceGenerationNotMatch: Makes the operation conditional on whether the source object's generation does not match the given value. ifSourceMetagenerationMatch: Makes the operation conditional on whether the source object's current metageneration matches the given value. ifSourceMetagenerationNotMatch: Makes the operation conditional on whether the source object's current metageneration does not match the given value. maxBytesRewrittenPerCall: The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid. object: A Object resource to be passed as the request body. projection: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. rewriteToken: Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request. sourceGeneration: If present, selects a specific revision of the source object (as opposed to the latest version, the default).
Rewrites a source object to a destination object. Optionally overrides metadata.
[ "Rewrites", "a", "source", "object", "to", "a", "destination", "object", ".", "Optionally", "overrides", "metadata", "." ]
def RunWithArgs(self, sourceBucket, sourceObject, destinationBucket, destinationObject): """Rewrites a source object to a destination object. Optionally overrides metadata. Args: sourceBucket: Name of the bucket in which to find the source object. sourceObject: Name of the source object. destinationBucket: Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any. destinationObject: Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. Flags: destinationPredefinedAcl: Apply a predefined set of access controls to the destination object. ifGenerationMatch: Makes the operation conditional on whether the destination object's current generation matches the given value. ifGenerationNotMatch: Makes the operation conditional on whether the destination object's current generation does not match the given value. ifMetagenerationMatch: Makes the operation conditional on whether the destination object's current metageneration matches the given value. ifMetagenerationNotMatch: Makes the operation conditional on whether the destination object's current metageneration does not match the given value. ifSourceGenerationMatch: Makes the operation conditional on whether the source object's generation matches the given value. ifSourceGenerationNotMatch: Makes the operation conditional on whether the source object's generation does not match the given value. ifSourceMetagenerationMatch: Makes the operation conditional on whether the source object's current metageneration matches the given value. ifSourceMetagenerationNotMatch: Makes the operation conditional on whether the source object's current metageneration does not match the given value. maxBytesRewrittenPerCall: The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid. object: A Object resource to be passed as the request body. projection: Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. rewriteToken: Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request. sourceGeneration: If present, selects a specific revision of the source object (as opposed to the latest version, the default). """ client = GetClientFromFlags() global_params = GetGlobalParamsFromFlags() request = messages.StorageObjectsRewriteRequest( sourceBucket=sourceBucket.decode('utf8'), sourceObject=sourceObject.decode('utf8'), destinationBucket=destinationBucket.decode('utf8'), destinationObject=destinationObject.decode('utf8'), ) if FLAGS['destinationPredefinedAcl'].present: request.destinationPredefinedAcl = messages.StorageObjectsRewriteRequest.DestinationPredefinedAclValueValuesEnum(FLAGS.destinationPredefinedAcl) if FLAGS['ifGenerationMatch'].present: request.ifGenerationMatch = int(FLAGS.ifGenerationMatch) if FLAGS['ifGenerationNotMatch'].present: request.ifGenerationNotMatch = int(FLAGS.ifGenerationNotMatch) if FLAGS['ifMetagenerationMatch'].present: request.ifMetagenerationMatch = int(FLAGS.ifMetagenerationMatch) if FLAGS['ifMetagenerationNotMatch'].present: request.ifMetagenerationNotMatch = int(FLAGS.ifMetagenerationNotMatch) if FLAGS['ifSourceGenerationMatch'].present: request.ifSourceGenerationMatch = int(FLAGS.ifSourceGenerationMatch) if FLAGS['ifSourceGenerationNotMatch'].present: request.ifSourceGenerationNotMatch = int(FLAGS.ifSourceGenerationNotMatch) if FLAGS['ifSourceMetagenerationMatch'].present: request.ifSourceMetagenerationMatch = int(FLAGS.ifSourceMetagenerationMatch) if FLAGS['ifSourceMetagenerationNotMatch'].present: request.ifSourceMetagenerationNotMatch = int(FLAGS.ifSourceMetagenerationNotMatch) if FLAGS['maxBytesRewrittenPerCall'].present: request.maxBytesRewrittenPerCall = int(FLAGS.maxBytesRewrittenPerCall) if FLAGS['object'].present: request.object = apitools_base.JsonToMessage(messages.Object, FLAGS.object) if FLAGS['projection'].present: request.projection = messages.StorageObjectsRewriteRequest.ProjectionValueValuesEnum(FLAGS.projection) if FLAGS['rewriteToken'].present: request.rewriteToken = FLAGS.rewriteToken.decode('utf8') if FLAGS['sourceGeneration'].present: request.sourceGeneration = int(FLAGS.sourceGeneration) result = client.objects.Rewrite( request, global_params=global_params) print apitools_base_cli.FormatOutput(result)
[ "def", "RunWithArgs", "(", "self", ",", "sourceBucket", ",", "sourceObject", ",", "destinationBucket", ",", "destinationObject", ")", ":", "client", "=", "GetClientFromFlags", "(", ")", "global_params", "=", "GetGlobalParamsFromFlags", "(", ")", "request", "=", "messages", ".", "StorageObjectsRewriteRequest", "(", "sourceBucket", "=", "sourceBucket", ".", "decode", "(", "'utf8'", ")", ",", "sourceObject", "=", "sourceObject", ".", "decode", "(", "'utf8'", ")", ",", "destinationBucket", "=", "destinationBucket", ".", "decode", "(", "'utf8'", ")", ",", "destinationObject", "=", "destinationObject", ".", "decode", "(", "'utf8'", ")", ",", ")", "if", "FLAGS", "[", "'destinationPredefinedAcl'", "]", ".", "present", ":", "request", ".", "destinationPredefinedAcl", "=", "messages", ".", "StorageObjectsRewriteRequest", ".", "DestinationPredefinedAclValueValuesEnum", "(", "FLAGS", ".", "destinationPredefinedAcl", ")", "if", "FLAGS", "[", "'ifGenerationMatch'", "]", ".", "present", ":", "request", ".", "ifGenerationMatch", "=", "int", "(", "FLAGS", ".", "ifGenerationMatch", ")", "if", "FLAGS", "[", "'ifGenerationNotMatch'", "]", ".", "present", ":", "request", ".", "ifGenerationNotMatch", "=", "int", "(", "FLAGS", ".", "ifGenerationNotMatch", ")", "if", "FLAGS", "[", "'ifMetagenerationMatch'", "]", ".", "present", ":", "request", ".", "ifMetagenerationMatch", "=", "int", "(", "FLAGS", ".", "ifMetagenerationMatch", ")", "if", "FLAGS", "[", "'ifMetagenerationNotMatch'", "]", ".", "present", ":", "request", ".", "ifMetagenerationNotMatch", "=", "int", "(", "FLAGS", ".", "ifMetagenerationNotMatch", ")", "if", "FLAGS", "[", "'ifSourceGenerationMatch'", "]", ".", "present", ":", "request", ".", "ifSourceGenerationMatch", "=", "int", "(", "FLAGS", ".", "ifSourceGenerationMatch", ")", "if", "FLAGS", "[", "'ifSourceGenerationNotMatch'", "]", ".", "present", ":", "request", ".", "ifSourceGenerationNotMatch", "=", "int", "(", "FLAGS", ".", "ifSourceGenerationNotMatch", ")", "if", "FLAGS", "[", "'ifSourceMetagenerationMatch'", "]", ".", "present", ":", "request", ".", "ifSourceMetagenerationMatch", "=", "int", "(", "FLAGS", ".", "ifSourceMetagenerationMatch", ")", "if", "FLAGS", "[", "'ifSourceMetagenerationNotMatch'", "]", ".", "present", ":", "request", ".", "ifSourceMetagenerationNotMatch", "=", "int", "(", "FLAGS", ".", "ifSourceMetagenerationNotMatch", ")", "if", "FLAGS", "[", "'maxBytesRewrittenPerCall'", "]", ".", "present", ":", "request", ".", "maxBytesRewrittenPerCall", "=", "int", "(", "FLAGS", ".", "maxBytesRewrittenPerCall", ")", "if", "FLAGS", "[", "'object'", "]", ".", "present", ":", "request", ".", "object", "=", "apitools_base", ".", "JsonToMessage", "(", "messages", ".", "Object", ",", "FLAGS", ".", "object", ")", "if", "FLAGS", "[", "'projection'", "]", ".", "present", ":", "request", ".", "projection", "=", "messages", ".", "StorageObjectsRewriteRequest", ".", "ProjectionValueValuesEnum", "(", "FLAGS", ".", "projection", ")", "if", "FLAGS", "[", "'rewriteToken'", "]", ".", "present", ":", "request", ".", "rewriteToken", "=", "FLAGS", ".", "rewriteToken", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'sourceGeneration'", "]", ".", "present", ":", "request", ".", "sourceGeneration", "=", "int", "(", "FLAGS", ".", "sourceGeneration", ")", "result", "=", "client", ".", "objects", ".", "Rewrite", "(", "request", ",", "global_params", "=", "global_params", ")", "print", "apitools_base_cli", ".", "FormatOutput", "(", "result", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py#L2779-L2871
VAR-solutions/Algorithms
4ad6773e9675ef35aa858ca3969be5ddf6e3daea
LinkedList/LinkedListModule.py
python
LinkedList.deleteNodeByPosition
(self,pos)
deletes a node at the given position
deletes a node at the given position
[ "deletes", "a", "node", "at", "the", "given", "position" ]
def deleteNodeByPosition(self,pos): """deletes a node at the given position""" if self.head is None: return False elif pos == 0: self.head = self.head.next return True else: CurrentNode = self.head while pos > 1: CurrentNode = CurrentNode.next pos -= 1 CurrentNode.next = CurrentNode.next.next return True
[ "def", "deleteNodeByPosition", "(", "self", ",", "pos", ")", ":", "if", "self", ".", "head", "is", "None", ":", "return", "False", "elif", "pos", "==", "0", ":", "self", ".", "head", "=", "self", ".", "head", ".", "next", "return", "True", "else", ":", "CurrentNode", "=", "self", ".", "head", "while", "pos", ">", "1", ":", "CurrentNode", "=", "CurrentNode", ".", "next", "pos", "-=", "1", "CurrentNode", ".", "next", "=", "CurrentNode", ".", "next", ".", "next", "return", "True" ]
https://github.com/VAR-solutions/Algorithms/blob/4ad6773e9675ef35aa858ca3969be5ddf6e3daea/LinkedList/LinkedListModule.py#L89-L105
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBLaunchInfo.SetEnvironmentEntries
(self, *args)
return _lldb.SBLaunchInfo_SetEnvironmentEntries(self, *args)
SetEnvironmentEntries(self, list envp, bool append)
SetEnvironmentEntries(self, list envp, bool append)
[ "SetEnvironmentEntries", "(", "self", "list", "envp", "bool", "append", ")" ]
def SetEnvironmentEntries(self, *args): """SetEnvironmentEntries(self, list envp, bool append)""" return _lldb.SBLaunchInfo_SetEnvironmentEntries(self, *args)
[ "def", "SetEnvironmentEntries", "(", "self", ",", "*", "args", ")", ":", "return", "_lldb", ".", "SBLaunchInfo_SetEnvironmentEntries", "(", "self", ",", "*", "args", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L5485-L5487
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/grid.py
python
PyGridTableBase.__init__
(self, *args, **kwargs)
__init__(self) -> PyGridTableBase
__init__(self) -> PyGridTableBase
[ "__init__", "(", "self", ")", "-", ">", "PyGridTableBase" ]
def __init__(self, *args, **kwargs): """__init__(self) -> PyGridTableBase""" _grid.PyGridTableBase_swiginit(self,_grid.new_PyGridTableBase(*args, **kwargs)) self._setOORInfo(self);PyGridTableBase._setCallbackInfo(self, self, PyGridTableBase)
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_grid", ".", "PyGridTableBase_swiginit", "(", "self", ",", "_grid", ".", "new_PyGridTableBase", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "self", ".", "_setOORInfo", "(", "self", ")", "PyGridTableBase", ".", "_setCallbackInfo", "(", "self", ",", "self", ",", "PyGridTableBase", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L932-L935
hpi-xnor/BMXNet
ed0b201da6667887222b8e4b5f997c4f6b61943d
python/mxnet/callback.py
python
module_checkpoint
(mod, prefix, period=1, save_optimizer_states=False)
return _callback
Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit.
Callback to checkpoint Module to prefix every epoch.
[ "Callback", "to", "checkpoint", "Module", "to", "prefix", "every", "epoch", "." ]
def module_checkpoint(mod, prefix, period=1, save_optimizer_states=False): """Callback to checkpoint Module to prefix every epoch. Parameters ---------- mod : subclass of BaseModule The module to checkpoint. prefix : str The file prefix for this checkpoint. period : int How many epochs to wait before checkpointing. Defaults to 1. save_optimizer_states : bool Indicates whether or not to save optimizer states for continued training. Returns ------- callback : function The callback function that can be passed as iter_end_callback to fit. """ period = int(max(1, period)) # pylint: disable=unused-argument def _callback(iter_no, sym=None, arg=None, aux=None): """The checkpoint function.""" if (iter_no + 1) % period == 0: mod.save_checkpoint(prefix, iter_no + 1, save_optimizer_states) return _callback
[ "def", "module_checkpoint", "(", "mod", ",", "prefix", ",", "period", "=", "1", ",", "save_optimizer_states", "=", "False", ")", ":", "period", "=", "int", "(", "max", "(", "1", ",", "period", ")", ")", "# pylint: disable=unused-argument", "def", "_callback", "(", "iter_no", ",", "sym", "=", "None", ",", "arg", "=", "None", ",", "aux", "=", "None", ")", ":", "\"\"\"The checkpoint function.\"\"\"", "if", "(", "iter_no", "+", "1", ")", "%", "period", "==", "0", ":", "mod", ".", "save_checkpoint", "(", "prefix", ",", "iter_no", "+", "1", ",", "save_optimizer_states", ")", "return", "_callback" ]
https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/callback.py#L27-L52
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/computation/common.py
python
_ensure_decoded
(s)
return s
If we have bytes, decode them to unicode.
If we have bytes, decode them to unicode.
[ "If", "we", "have", "bytes", "decode", "them", "to", "unicode", "." ]
def _ensure_decoded(s): """ If we have bytes, decode them to unicode. """ if isinstance(s, (np.bytes_, bytes)): s = s.decode(get_option("display.encoding")) return s
[ "def", "_ensure_decoded", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "(", "np", ".", "bytes_", ",", "bytes", ")", ")", ":", "s", "=", "s", ".", "decode", "(", "get_option", "(", "\"display.encoding\"", ")", ")", "return", "s" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/computation/common.py#L8-L14
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/aui.py
python
AuiToolBarItem.SetDisabledBitmap
(*args, **kwargs)
return _aui.AuiToolBarItem_SetDisabledBitmap(*args, **kwargs)
SetDisabledBitmap(self, Bitmap bmp)
SetDisabledBitmap(self, Bitmap bmp)
[ "SetDisabledBitmap", "(", "self", "Bitmap", "bmp", ")" ]
def SetDisabledBitmap(*args, **kwargs): """SetDisabledBitmap(self, Bitmap bmp)""" return _aui.AuiToolBarItem_SetDisabledBitmap(*args, **kwargs)
[ "def", "SetDisabledBitmap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_aui", ".", "AuiToolBarItem_SetDisabledBitmap", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/aui.py#L1785-L1787
OSGeo/gdal
3748fc4ba4fba727492774b2b908a2130c864a83
swig/python/osgeo/osr.py
python
SpatialReference.GetSemiMajor
(self, *args)
return _osr.SpatialReference_GetSemiMajor(self, *args)
r"""GetSemiMajor(SpatialReference self) -> double
r"""GetSemiMajor(SpatialReference self) -> double
[ "r", "GetSemiMajor", "(", "SpatialReference", "self", ")", "-", ">", "double" ]
def GetSemiMajor(self, *args): r"""GetSemiMajor(SpatialReference self) -> double""" return _osr.SpatialReference_GetSemiMajor(self, *args)
[ "def", "GetSemiMajor", "(", "self", ",", "*", "args", ")", ":", "return", "_osr", ".", "SpatialReference_GetSemiMajor", "(", "self", ",", "*", "args", ")" ]
https://github.com/OSGeo/gdal/blob/3748fc4ba4fba727492774b2b908a2130c864a83/swig/python/osgeo/osr.py#L522-L524
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/asyncio/base_events.py
python
BaseEventLoop._connect_sock
(self, exceptions, addr_info, local_addr_infos=None)
Create, bind and connect one socket.
Create, bind and connect one socket.
[ "Create", "bind", "and", "connect", "one", "socket", "." ]
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): """Create, bind and connect one socket.""" my_exceptions = [] exceptions.append(my_exceptions) family, type_, proto, _, address = addr_info sock = None try: sock = socket.socket(family=family, type=type_, proto=proto) sock.setblocking(False) if local_addr_infos is not None: for _, _, _, _, laddr in local_addr_infos: try: sock.bind(laddr) break except OSError as exc: msg = ( f'error while attempting to bind on ' f'address {laddr!r}: ' f'{exc.strerror.lower()}' ) exc = OSError(exc.errno, msg) my_exceptions.append(exc) else: # all bind attempts failed raise my_exceptions.pop() await self.sock_connect(sock, address) return sock except OSError as exc: my_exceptions.append(exc) if sock is not None: sock.close() raise except: if sock is not None: sock.close() raise
[ "async", "def", "_connect_sock", "(", "self", ",", "exceptions", ",", "addr_info", ",", "local_addr_infos", "=", "None", ")", ":", "my_exceptions", "=", "[", "]", "exceptions", ".", "append", "(", "my_exceptions", ")", "family", ",", "type_", ",", "proto", ",", "_", ",", "address", "=", "addr_info", "sock", "=", "None", "try", ":", "sock", "=", "socket", ".", "socket", "(", "family", "=", "family", ",", "type", "=", "type_", ",", "proto", "=", "proto", ")", "sock", ".", "setblocking", "(", "False", ")", "if", "local_addr_infos", "is", "not", "None", ":", "for", "_", ",", "_", ",", "_", ",", "_", ",", "laddr", "in", "local_addr_infos", ":", "try", ":", "sock", ".", "bind", "(", "laddr", ")", "break", "except", "OSError", "as", "exc", ":", "msg", "=", "(", "f'error while attempting to bind on '", "f'address {laddr!r}: '", "f'{exc.strerror.lower()}'", ")", "exc", "=", "OSError", "(", "exc", ".", "errno", ",", "msg", ")", "my_exceptions", ".", "append", "(", "exc", ")", "else", ":", "# all bind attempts failed", "raise", "my_exceptions", ".", "pop", "(", ")", "await", "self", ".", "sock_connect", "(", "sock", ",", "address", ")", "return", "sock", "except", "OSError", "as", "exc", ":", "my_exceptions", ".", "append", "(", "exc", ")", "if", "sock", "is", "not", "None", ":", "sock", ".", "close", "(", ")", "raise", "except", ":", "if", "sock", "is", "not", "None", ":", "sock", ".", "close", "(", ")", "raise" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/asyncio/base_events.py#L931-L965
goldeneye-source/ges-code
2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d
thirdparty/protobuf-2.3.0/python/google/protobuf/internal/containers.py
python
RepeatedCompositeFieldContainer.__delitem__
(self, key)
Deletes the item at the specified position.
Deletes the item at the specified position.
[ "Deletes", "the", "item", "at", "the", "specified", "position", "." ]
def __delitem__(self, key): """Deletes the item at the specified position.""" del self._values[key] self._message_listener.Modified()
[ "def", "__delitem__", "(", "self", ",", "key", ")", ":", "del", "self", ".", "_values", "[", "key", "]", "self", ".", "_message_listener", ".", "Modified", "(", ")" ]
https://github.com/goldeneye-source/ges-code/blob/2630cd8ef3d015af53c72ec2e19fc1f7e7fe8d9d/thirdparty/protobuf-2.3.0/python/google/protobuf/internal/containers.py#L227-L230
unicode-org/icu
2f8749a026f3ddc8cf54d4622480b7c543bb7fc0
tools/unicode/py/preparseucd.py
python
ParseNameAliases
(in_file)
Parses Name_Alias from NameAliases.txt. A character can have multiple aliases. In Unicode 6.0, there are two columns, with a name correction in the second column. In Unicode 6.1, there are three columns. The second contains an alias, the third its type. The documented types are: correction, control, alternate, figment, abbreviation This function does not sort the types, assuming they appear in this order.
Parses Name_Alias from NameAliases.txt. A character can have multiple aliases.
[ "Parses", "Name_Alias", "from", "NameAliases", ".", "txt", ".", "A", "character", "can", "have", "multiple", "aliases", "." ]
def ParseNameAliases(in_file): """Parses Name_Alias from NameAliases.txt. A character can have multiple aliases. In Unicode 6.0, there are two columns, with a name correction in the second column. In Unicode 6.1, there are three columns. The second contains an alias, the third its type. The documented types are: correction, control, alternate, figment, abbreviation This function does not sort the types, assuming they appear in this order.""" for data in ReadUCDLines(in_file): start = data[2] end = data[3] if start != end: raise ValueError("NameAliases.txt has an alias for a range %04lX..%04lX" % (start, end)) fields = data[4] if len(fields) == 2: alias = "correction=" + fields[1] else: alias = fields[2] + '=' + fields[1] update = (NeedToSetAlways, DoSetNameAlias, alias) UpdateProps(start, end, update)
[ "def", "ParseNameAliases", "(", "in_file", ")", ":", "for", "data", "in", "ReadUCDLines", "(", "in_file", ")", ":", "start", "=", "data", "[", "2", "]", "end", "=", "data", "[", "3", "]", "if", "start", "!=", "end", ":", "raise", "ValueError", "(", "\"NameAliases.txt has an alias for a range %04lX..%04lX\"", "%", "(", "start", ",", "end", ")", ")", "fields", "=", "data", "[", "4", "]", "if", "len", "(", "fields", ")", "==", "2", ":", "alias", "=", "\"correction=\"", "+", "fields", "[", "1", "]", "else", ":", "alias", "=", "fields", "[", "2", "]", "+", "'='", "+", "fields", "[", "1", "]", "update", "=", "(", "NeedToSetAlways", ",", "DoSetNameAlias", ",", "alias", ")", "UpdateProps", "(", "start", ",", "end", ",", "update", ")" ]
https://github.com/unicode-org/icu/blob/2f8749a026f3ddc8cf54d4622480b7c543bb7fc0/tools/unicode/py/preparseucd.py#L841-L866
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/resolvelib/structs.py
python
_SequenceIterableView.for_preference
(self)
return self._sequence
Provide an candidate iterable for `get_preference()`
Provide an candidate iterable for `get_preference()`
[ "Provide", "an", "candidate", "iterable", "for", "get_preference", "()" ]
def for_preference(self): """Provide an candidate iterable for `get_preference()`""" return self._sequence
[ "def", "for_preference", "(", "self", ")", ":", "return", "self", ".", "_sequence" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/resolvelib/structs.py#L134-L136
microsoft/ivy
9f3c7ecc0b2383129fdd0953e10890d98d09a82d
ivy/z3_utils.py
python
_to_z3
(x)
Convert a term or a sort to a Z3 object.
Convert a term or a sort to a Z3 object.
[ "Convert", "a", "term", "or", "a", "sort", "to", "a", "Z3", "object", "." ]
def _to_z3(x): """ Convert a term or a sort to a Z3 object. """ if x in _z3_interpreted: return _z3_interpreted[x] elif type(x) is UninterpretedSort: if x not in _z3_uninterpreted_sorts: _z3_uninterpreted_sorts[x] = z3.DeclareSort(x.name) return _z3_uninterpreted_sorts[x] elif type(x) is FunctionSort: assert False, "FunctionSort's aren't converted to Z3" elif type(x) in (Var, Const) and first_order_sort(x.sort): return z3.Const(x.name + ':' + str(x.sort), to_z3(x.sort)) elif type(x) in (Var, Const) and type(x.sort) is FunctionSort and len(x.sort.sorts) == 1: # convert to first order s = x.sort.sorts[0] return z3.Const(x.name + ':' + str(s), to_z3(s)) elif type(x) in (Var, Const) and type(x.sort) is FunctionSort: assert type(x) is Const, "Cannot convert high-order variables to Z3, only constants" return z3.Function(x.name, *( to_z3(s) for s in x.sort )) elif type(x) is Apply and len(x.terms) == 0: # convert application to use of first order symbol return to_z3(x.func) elif type(x) is Apply: return to_z3(x.func)(*(to_z3(t) for t in x.terms)) elif type(x) in _z3_operators: return _z3_operators[type(x)](*(to_z3(y) for y in x)) elif type(x) in _z3_quantifiers: if len(x.variables) == 0: return to_z3(x.body) else: return _z3_quantifiers[type(x)]( [to_z3(v) for v in x.variables], to_z3(x.body), ) else: assert False, type(x)
[ "def", "_to_z3", "(", "x", ")", ":", "if", "x", "in", "_z3_interpreted", ":", "return", "_z3_interpreted", "[", "x", "]", "elif", "type", "(", "x", ")", "is", "UninterpretedSort", ":", "if", "x", "not", "in", "_z3_uninterpreted_sorts", ":", "_z3_uninterpreted_sorts", "[", "x", "]", "=", "z3", ".", "DeclareSort", "(", "x", ".", "name", ")", "return", "_z3_uninterpreted_sorts", "[", "x", "]", "elif", "type", "(", "x", ")", "is", "FunctionSort", ":", "assert", "False", ",", "\"FunctionSort's aren't converted to Z3\"", "elif", "type", "(", "x", ")", "in", "(", "Var", ",", "Const", ")", "and", "first_order_sort", "(", "x", ".", "sort", ")", ":", "return", "z3", ".", "Const", "(", "x", ".", "name", "+", "':'", "+", "str", "(", "x", ".", "sort", ")", ",", "to_z3", "(", "x", ".", "sort", ")", ")", "elif", "type", "(", "x", ")", "in", "(", "Var", ",", "Const", ")", "and", "type", "(", "x", ".", "sort", ")", "is", "FunctionSort", "and", "len", "(", "x", ".", "sort", ".", "sorts", ")", "==", "1", ":", "# convert to first order", "s", "=", "x", ".", "sort", ".", "sorts", "[", "0", "]", "return", "z3", ".", "Const", "(", "x", ".", "name", "+", "':'", "+", "str", "(", "s", ")", ",", "to_z3", "(", "s", ")", ")", "elif", "type", "(", "x", ")", "in", "(", "Var", ",", "Const", ")", "and", "type", "(", "x", ".", "sort", ")", "is", "FunctionSort", ":", "assert", "type", "(", "x", ")", "is", "Const", ",", "\"Cannot convert high-order variables to Z3, only constants\"", "return", "z3", ".", "Function", "(", "x", ".", "name", ",", "*", "(", "to_z3", "(", "s", ")", "for", "s", "in", "x", ".", "sort", ")", ")", "elif", "type", "(", "x", ")", "is", "Apply", "and", "len", "(", "x", ".", "terms", ")", "==", "0", ":", "# convert application to use of first order symbol", "return", "to_z3", "(", "x", ".", "func", ")", "elif", "type", "(", "x", ")", "is", "Apply", ":", "return", "to_z3", "(", "x", ".", "func", ")", "(", "*", "(", "to_z3", "(", "t", ")", "for", "t", "in", "x", ".", "terms", ")", ")", "elif", "type", "(", "x", ")", "in", "_z3_operators", ":", "return", "_z3_operators", "[", "type", "(", "x", ")", "]", "(", "*", "(", "to_z3", "(", "y", ")", "for", "y", "in", "x", ")", ")", "elif", "type", "(", "x", ")", "in", "_z3_quantifiers", ":", "if", "len", "(", "x", ".", "variables", ")", "==", "0", ":", "return", "to_z3", "(", "x", ".", "body", ")", "else", ":", "return", "_z3_quantifiers", "[", "type", "(", "x", ")", "]", "(", "[", "to_z3", "(", "v", ")", "for", "v", "in", "x", ".", "variables", "]", ",", "to_z3", "(", "x", ".", "body", ")", ",", ")", "else", ":", "assert", "False", ",", "type", "(", "x", ")" ]
https://github.com/microsoft/ivy/blob/9f3c7ecc0b2383129fdd0953e10890d98d09a82d/ivy/z3_utils.py#L52-L102
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/asyncio/events.py
python
AbstractServer.close
(self)
Stop serving. This leaves existing connections open.
Stop serving. This leaves existing connections open.
[ "Stop", "serving", ".", "This", "leaves", "existing", "connections", "open", "." ]
def close(self): """Stop serving. This leaves existing connections open.""" raise NotImplementedError
[ "def", "close", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/asyncio/events.py#L165-L167
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/traitlets/py2/traitlets/traitlets.py
python
HasTraits.add_traits
(self, **traits)
Dynamically add trait attributes to the HasTraits instance.
Dynamically add trait attributes to the HasTraits instance.
[ "Dynamically", "add", "trait", "attributes", "to", "the", "HasTraits", "instance", "." ]
def add_traits(self, **traits): """Dynamically add trait attributes to the HasTraits instance.""" self.__class__ = type(self.__class__.__name__, (self.__class__,), traits) for trait in traits.values(): trait.instance_init(self)
[ "def", "add_traits", "(", "self", ",", "*", "*", "traits", ")", ":", "self", ".", "__class__", "=", "type", "(", "self", ".", "__class__", ".", "__name__", ",", "(", "self", ".", "__class__", ",", ")", ",", "traits", ")", "for", "trait", "in", "traits", ".", "values", "(", ")", ":", "trait", ".", "instance_init", "(", "self", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/traitlets/py2/traitlets/traitlets.py#L1329-L1334
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/propgrid.py
python
PGTextCtrlEditor_GetTextCtrlValueFromControl
(*args, **kwargs)
return _propgrid.PGTextCtrlEditor_GetTextCtrlValueFromControl(*args, **kwargs)
PGTextCtrlEditor_GetTextCtrlValueFromControl(wxVariant variant, PGProperty property, Window ctrl) -> bool
PGTextCtrlEditor_GetTextCtrlValueFromControl(wxVariant variant, PGProperty property, Window ctrl) -> bool
[ "PGTextCtrlEditor_GetTextCtrlValueFromControl", "(", "wxVariant", "variant", "PGProperty", "property", "Window", "ctrl", ")", "-", ">", "bool" ]
def PGTextCtrlEditor_GetTextCtrlValueFromControl(*args, **kwargs): """PGTextCtrlEditor_GetTextCtrlValueFromControl(wxVariant variant, PGProperty property, Window ctrl) -> bool""" return _propgrid.PGTextCtrlEditor_GetTextCtrlValueFromControl(*args, **kwargs)
[ "def", "PGTextCtrlEditor_GetTextCtrlValueFromControl", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PGTextCtrlEditor_GetTextCtrlValueFromControl", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L2751-L2753
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/idl/check_stable_api_commands_have_idl_definitions.py
python
list_commands_for_api
(api_version: str, mongod_or_mongos: str, install_dir: str)
Get a list of commands in a given API version by calling listCommands.
Get a list of commands in a given API version by calling listCommands.
[ "Get", "a", "list", "of", "commands", "in", "a", "given", "API", "version", "by", "calling", "listCommands", "." ]
def list_commands_for_api(api_version: str, mongod_or_mongos: str, install_dir: str) -> Set[str]: """Get a list of commands in a given API version by calling listCommands.""" assert mongod_or_mongos in ("mongod", "mongos") logging.info("Calling listCommands on %s", mongod_or_mongos) dbpath = TemporaryDirectory() fixturelib = FixtureLib() mongod_executable = os.path.join(install_dir, "mongod") mongos_executable = os.path.join(install_dir, "mongos") if mongod_or_mongos == "mongod": logger = loggers.new_fixture_logger("MongoDFixture", 0) logger.parent = LOGGER fixture: interface.Fixture = fixturelib.make_fixture("MongoDFixture", logger, 0, dbpath_prefix=dbpath.name, mongod_executable=mongod_executable) else: logger = loggers.new_fixture_logger("ShardedClusterFixture", 0) logger.parent = LOGGER fixture = fixturelib.make_fixture("ShardedClusterFixture", logger, 0, dbpath_prefix=dbpath.name, mongos_executable=mongos_executable, mongod_executable=mongod_executable, mongod_options={}) fixture.setup() fixture.await_ready() try: client = MongoClient(fixture.get_driver_connection_url()) reply = client.admin.command('listCommands') commands = { name for name, info in reply['commands'].items() if api_version in info['apiVersions'] } logging.info("Found %s commands in API Version %s on %s", len(commands), api_version, mongod_or_mongos) return commands finally: fixture.teardown()
[ "def", "list_commands_for_api", "(", "api_version", ":", "str", ",", "mongod_or_mongos", ":", "str", ",", "install_dir", ":", "str", ")", "->", "Set", "[", "str", "]", ":", "assert", "mongod_or_mongos", "in", "(", "\"mongod\"", ",", "\"mongos\"", ")", "logging", ".", "info", "(", "\"Calling listCommands on %s\"", ",", "mongod_or_mongos", ")", "dbpath", "=", "TemporaryDirectory", "(", ")", "fixturelib", "=", "FixtureLib", "(", ")", "mongod_executable", "=", "os", ".", "path", ".", "join", "(", "install_dir", ",", "\"mongod\"", ")", "mongos_executable", "=", "os", ".", "path", ".", "join", "(", "install_dir", ",", "\"mongos\"", ")", "if", "mongod_or_mongos", "==", "\"mongod\"", ":", "logger", "=", "loggers", ".", "new_fixture_logger", "(", "\"MongoDFixture\"", ",", "0", ")", "logger", ".", "parent", "=", "LOGGER", "fixture", ":", "interface", ".", "Fixture", "=", "fixturelib", ".", "make_fixture", "(", "\"MongoDFixture\"", ",", "logger", ",", "0", ",", "dbpath_prefix", "=", "dbpath", ".", "name", ",", "mongod_executable", "=", "mongod_executable", ")", "else", ":", "logger", "=", "loggers", ".", "new_fixture_logger", "(", "\"ShardedClusterFixture\"", ",", "0", ")", "logger", ".", "parent", "=", "LOGGER", "fixture", "=", "fixturelib", ".", "make_fixture", "(", "\"ShardedClusterFixture\"", ",", "logger", ",", "0", ",", "dbpath_prefix", "=", "dbpath", ".", "name", ",", "mongos_executable", "=", "mongos_executable", ",", "mongod_executable", "=", "mongod_executable", ",", "mongod_options", "=", "{", "}", ")", "fixture", ".", "setup", "(", ")", "fixture", ".", "await_ready", "(", ")", "try", ":", "client", "=", "MongoClient", "(", "fixture", ".", "get_driver_connection_url", "(", ")", ")", "reply", "=", "client", ".", "admin", ".", "command", "(", "'listCommands'", ")", "commands", "=", "{", "name", "for", "name", ",", "info", "in", "reply", "[", "'commands'", "]", ".", "items", "(", ")", "if", "api_version", "in", "info", "[", "'apiVersions'", "]", "}", "logging", ".", "info", "(", "\"Found %s commands in API Version %s on %s\"", ",", "len", "(", "commands", ")", ",", "api_version", ",", "mongod_or_mongos", ")", "return", "commands", "finally", ":", "fixture", ".", "teardown", "(", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/check_stable_api_commands_have_idl_definitions.py#L88-L124
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/engine/data_adapter.py
python
GeneratorDataAdapter._standardize_batch
(self, data)
return data
Standardizes a batch output by a generator.
Standardizes a batch output by a generator.
[ "Standardizes", "a", "batch", "output", "by", "a", "generator", "." ]
def _standardize_batch(self, data): """Standardizes a batch output by a generator.""" # Removes `None`s. x, y, sample_weight = unpack_x_y_sample_weight(data) data = pack_x_y_sample_weight(x, y, sample_weight) data = nest.list_to_tuple(data) def _convert_dtype(t): if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)): return np.array(t, dtype=backend.floatx()) return t data = nest.map_structure(_convert_dtype, data) return data
[ "def", "_standardize_batch", "(", "self", ",", "data", ")", ":", "# Removes `None`s.", "x", ",", "y", ",", "sample_weight", "=", "unpack_x_y_sample_weight", "(", "data", ")", "data", "=", "pack_x_y_sample_weight", "(", "x", ",", "y", ",", "sample_weight", ")", "data", "=", "nest", ".", "list_to_tuple", "(", "data", ")", "def", "_convert_dtype", "(", "t", ")", ":", "if", "(", "isinstance", "(", "t", ",", "np", ".", "ndarray", ")", "and", "issubclass", "(", "t", ".", "dtype", ".", "type", ",", "np", ".", "floating", ")", ")", ":", "return", "np", ".", "array", "(", "t", ",", "dtype", "=", "backend", ".", "floatx", "(", ")", ")", "return", "t", "data", "=", "nest", ".", "map_structure", "(", "_convert_dtype", ",", "data", ")", "return", "data" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/engine/data_adapter.py#L846-L860
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_gdi.py
python
DC.EndPage
(*args, **kwargs)
return _gdi_.DC_EndPage(*args, **kwargs)
EndPage(self) Ends a document page (only relevant when outputting to a printer).
EndPage(self)
[ "EndPage", "(", "self", ")" ]
def EndPage(*args, **kwargs): """ EndPage(self) Ends a document page (only relevant when outputting to a printer). """ return _gdi_.DC_EndPage(*args, **kwargs)
[ "def", "EndPage", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "DC_EndPage", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L3998-L4004
htcondor/htcondor
4829724575176d1d6c936e4693dfd78a728569b0
bindings/python/htcondor/dags/dag.py
python
DAG.__init__
( self, dagman_config: Optional[Mapping[str, Any]] = None, dagman_job_attributes: Optional[Mapping[str, Any]] = None, max_jobs_by_category: Optional[Mapping[str, int]] = None, dot_config: Optional[DotConfig] = None, jobstate_log: Optional[Path] = None, node_status_file: Optional[NodeStatusFile] = None, )
Parameters ---------- dagman_config A mapping of DAGMan configuration options. dagman_job_attributes A mapping that describes additional HTCondor JobAd attributes for the DAGMan job itself. max_jobs_by_category A mapping that describes the maximum number of jobs (values) that should be run simultaneously from each category (keys). dot_config Configuration options for writing a DOT file, as a :class:`DotConfig`. jobstate_log The path to the jobstate log. If not given, the jobstate log will not be written. node_status_file Configuration options for the node status file, as a :class:`NodeStatusFile`.
Parameters ---------- dagman_config A mapping of DAGMan configuration options. dagman_job_attributes A mapping that describes additional HTCondor JobAd attributes for the DAGMan job itself. max_jobs_by_category A mapping that describes the maximum number of jobs (values) that should be run simultaneously from each category (keys). dot_config Configuration options for writing a DOT file, as a :class:`DotConfig`. jobstate_log The path to the jobstate log. If not given, the jobstate log will not be written. node_status_file Configuration options for the node status file, as a :class:`NodeStatusFile`.
[ "Parameters", "----------", "dagman_config", "A", "mapping", "of", "DAGMan", "configuration", "options", ".", "dagman_job_attributes", "A", "mapping", "that", "describes", "additional", "HTCondor", "JobAd", "attributes", "for", "the", "DAGMan", "job", "itself", ".", "max_jobs_by_category", "A", "mapping", "that", "describes", "the", "maximum", "number", "of", "jobs", "(", "values", ")", "that", "should", "be", "run", "simultaneously", "from", "each", "category", "(", "keys", ")", ".", "dot_config", "Configuration", "options", "for", "writing", "a", "DOT", "file", "as", "a", ":", "class", ":", "DotConfig", ".", "jobstate_log", "The", "path", "to", "the", "jobstate", "log", ".", "If", "not", "given", "the", "jobstate", "log", "will", "not", "be", "written", ".", "node_status_file", "Configuration", "options", "for", "the", "node", "status", "file", "as", "a", ":", "class", ":", "NodeStatusFile", "." ]
def __init__( self, dagman_config: Optional[Mapping[str, Any]] = None, dagman_job_attributes: Optional[Mapping[str, Any]] = None, max_jobs_by_category: Optional[Mapping[str, int]] = None, dot_config: Optional[DotConfig] = None, jobstate_log: Optional[Path] = None, node_status_file: Optional[NodeStatusFile] = None, ): """ Parameters ---------- dagman_config A mapping of DAGMan configuration options. dagman_job_attributes A mapping that describes additional HTCondor JobAd attributes for the DAGMan job itself. max_jobs_by_category A mapping that describes the maximum number of jobs (values) that should be run simultaneously from each category (keys). dot_config Configuration options for writing a DOT file, as a :class:`DotConfig`. jobstate_log The path to the jobstate log. If not given, the jobstate log will not be written. node_status_file Configuration options for the node status file, as a :class:`NodeStatusFile`. """ self._nodes = NodeStore() self._edges = EdgeStore() self._final_node = None self.jobstate_log = jobstate_log if jobstate_log is None else Path(jobstate_log) self.max_jobs_per_category = max_jobs_by_category or {} self.dagman_config = dagman_config or {} self.dagman_job_attrs = dagman_job_attributes or {} self.dot_config = dot_config self.node_status_file = node_status_file
[ "def", "__init__", "(", "self", ",", "dagman_config", ":", "Optional", "[", "Mapping", "[", "str", ",", "Any", "]", "]", "=", "None", ",", "dagman_job_attributes", ":", "Optional", "[", "Mapping", "[", "str", ",", "Any", "]", "]", "=", "None", ",", "max_jobs_by_category", ":", "Optional", "[", "Mapping", "[", "str", ",", "int", "]", "]", "=", "None", ",", "dot_config", ":", "Optional", "[", "DotConfig", "]", "=", "None", ",", "jobstate_log", ":", "Optional", "[", "Path", "]", "=", "None", ",", "node_status_file", ":", "Optional", "[", "NodeStatusFile", "]", "=", "None", ",", ")", ":", "self", ".", "_nodes", "=", "NodeStore", "(", ")", "self", ".", "_edges", "=", "EdgeStore", "(", ")", "self", ".", "_final_node", "=", "None", "self", ".", "jobstate_log", "=", "jobstate_log", "if", "jobstate_log", "is", "None", "else", "Path", "(", "jobstate_log", ")", "self", ".", "max_jobs_per_category", "=", "max_jobs_by_category", "or", "{", "}", "self", ".", "dagman_config", "=", "dagman_config", "or", "{", "}", "self", ".", "dagman_job_attrs", "=", "dagman_job_attributes", "or", "{", "}", "self", ".", "dot_config", "=", "dot_config", "self", ".", "node_status_file", "=", "node_status_file" ]
https://github.com/htcondor/htcondor/blob/4829724575176d1d6c936e4693dfd78a728569b0/bindings/python/htcondor/dags/dag.py#L132-L171
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py
python
rlocate
(iterable, pred=bool, window_size=None)
return reversed(list(locate(iterable, pred, window_size)))
Yield the index of each item in *iterable* for which *pred* returns ``True``, starting from the right and moving left. *pred* defaults to :func:`bool`, which will select truthy items: >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 [4, 2, 1] Set *pred* to a custom function to, e.g., find the indexes for a particular item: >>> iterable = iter('abcb') >>> pred = lambda x: x == 'b' >>> list(rlocate(iterable, pred)) [3, 1] If *window_size* is given, then the *pred* function will be called with that many items. This enables searching for sub-sequences: >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] >>> pred = lambda *args: args == (1, 2, 3) >>> list(rlocate(iterable, pred=pred, window_size=3)) [9, 5, 1] Beware, this function won't return anything for infinite iterables. If *iterable* is reversible, ``rlocate`` will reverse it and search from the right. Otherwise, it will search from the left and return the results in reverse order. See :func:`locate` to for other example applications.
Yield the index of each item in *iterable* for which *pred* returns ``True``, starting from the right and moving left.
[ "Yield", "the", "index", "of", "each", "item", "in", "*", "iterable", "*", "for", "which", "*", "pred", "*", "returns", "True", "starting", "from", "the", "right", "and", "moving", "left", "." ]
def rlocate(iterable, pred=bool, window_size=None): """Yield the index of each item in *iterable* for which *pred* returns ``True``, starting from the right and moving left. *pred* defaults to :func:`bool`, which will select truthy items: >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4 [4, 2, 1] Set *pred* to a custom function to, e.g., find the indexes for a particular item: >>> iterable = iter('abcb') >>> pred = lambda x: x == 'b' >>> list(rlocate(iterable, pred)) [3, 1] If *window_size* is given, then the *pred* function will be called with that many items. This enables searching for sub-sequences: >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3] >>> pred = lambda *args: args == (1, 2, 3) >>> list(rlocate(iterable, pred=pred, window_size=3)) [9, 5, 1] Beware, this function won't return anything for infinite iterables. If *iterable* is reversible, ``rlocate`` will reverse it and search from the right. Otherwise, it will search from the left and return the results in reverse order. See :func:`locate` to for other example applications. """ if window_size is None: try: len_iter = len(iterable) return (len_iter - i - 1 for i in locate(reversed(iterable), pred)) except TypeError: pass return reversed(list(locate(iterable, pred, window_size)))
[ "def", "rlocate", "(", "iterable", ",", "pred", "=", "bool", ",", "window_size", "=", "None", ")", ":", "if", "window_size", "is", "None", ":", "try", ":", "len_iter", "=", "len", "(", "iterable", ")", "return", "(", "len_iter", "-", "i", "-", "1", "for", "i", "in", "locate", "(", "reversed", "(", "iterable", ")", ",", "pred", ")", ")", "except", "TypeError", ":", "pass", "return", "reversed", "(", "list", "(", "locate", "(", "iterable", ",", "pred", ",", "window_size", ")", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/setuptools/_vendor/more_itertools/more.py#L2892-L2932
ApolloAuto/apollo
463fb82f9e979d02dcb25044e60931293ab2dba0
modules/tools/routing/debug_topo.py
python
plot_all
(graph, plot_id='')
plot topology graph
plot topology graph
[ "plot", "topology", "graph" ]
def plot_all(graph, plot_id=''): """plot topology graph""" plt.close() fig = plt.figure() fig.canvas.mpl_connect('button_press_event', util.onclick) lane_middle_point_map = {} for i, (nd, color) in enumerate(zip(graph.node, color_iter)): nd_mid_pt = plot_node(nd, plot_id, color) lane_middle_point_map[nd.lane_id] = nd_mid_pt for i, eg in enumerate(graph.edge): plot_edge(eg, lane_middle_point_map) plt.gca().set_aspect(1) plt.title('Routing topology graph') plt.xlabel('x') plt.ylabel('y') plt.legend() plt.draw()
[ "def", "plot_all", "(", "graph", ",", "plot_id", "=", "''", ")", ":", "plt", ".", "close", "(", ")", "fig", "=", "plt", ".", "figure", "(", ")", "fig", ".", "canvas", ".", "mpl_connect", "(", "'button_press_event'", ",", "util", ".", "onclick", ")", "lane_middle_point_map", "=", "{", "}", "for", "i", ",", "(", "nd", ",", "color", ")", "in", "enumerate", "(", "zip", "(", "graph", ".", "node", ",", "color_iter", ")", ")", ":", "nd_mid_pt", "=", "plot_node", "(", "nd", ",", "plot_id", ",", "color", ")", "lane_middle_point_map", "[", "nd", ".", "lane_id", "]", "=", "nd_mid_pt", "for", "i", ",", "eg", "in", "enumerate", "(", "graph", ".", "edge", ")", ":", "plot_edge", "(", "eg", ",", "lane_middle_point_map", ")", "plt", ".", "gca", "(", ")", ".", "set_aspect", "(", "1", ")", "plt", ".", "title", "(", "'Routing topology graph'", ")", "plt", ".", "xlabel", "(", "'x'", ")", "plt", ".", "ylabel", "(", "'y'", ")", "plt", ".", "legend", "(", ")", "plt", ".", "draw", "(", ")" ]
https://github.com/ApolloAuto/apollo/blob/463fb82f9e979d02dcb25044e60931293ab2dba0/modules/tools/routing/debug_topo.py#L156-L173
NERSC/timemory
431912b360ff50d1a160d7826e2eea04fbd1037f
timemory/util/util.py
python
rss_usage.__call__
(self, func)
return function_wrapper
Decorator
Decorator
[ "Decorator" ]
def __call__(self, func): """ Decorator """ _file = FILE(3) _line = LINE(2) @wraps(func) def function_wrapper(*args, **kwargs): self.parse_wrapped(func, args, kwargs) self.determine_signature( is_decorator=True, is_context_manager=False ) _frame = FRAME(1) _func = func.__name__ _key = "" _args = self.arg_string(_frame) if self.signature == context.blank: _key = "{}{}".format(self.key, _args) elif self.signature == context.basic: _key = "{}{}/{}".format(_func, _args, self.key) elif self.signature == context.full: _key = "{}{}@{}:{}/{}".format( _func, _args, _file, _line, self.key ) _key = _key.strip("/") self._self_obj = _rss_usage(_key) self._self_dif = _rss_usage(_key) self._self_dif.record() # run function ret = func(*args, **kwargs) # record self._self_obj.record() self._self_obj -= self._self_dif print("{}".format(self._self_obj)) return ret return function_wrapper
[ "def", "__call__", "(", "self", ",", "func", ")", ":", "_file", "=", "FILE", "(", "3", ")", "_line", "=", "LINE", "(", "2", ")", "@", "wraps", "(", "func", ")", "def", "function_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "parse_wrapped", "(", "func", ",", "args", ",", "kwargs", ")", "self", ".", "determine_signature", "(", "is_decorator", "=", "True", ",", "is_context_manager", "=", "False", ")", "_frame", "=", "FRAME", "(", "1", ")", "_func", "=", "func", ".", "__name__", "_key", "=", "\"\"", "_args", "=", "self", ".", "arg_string", "(", "_frame", ")", "if", "self", ".", "signature", "==", "context", ".", "blank", ":", "_key", "=", "\"{}{}\"", ".", "format", "(", "self", ".", "key", ",", "_args", ")", "elif", "self", ".", "signature", "==", "context", ".", "basic", ":", "_key", "=", "\"{}{}/{}\"", ".", "format", "(", "_func", ",", "_args", ",", "self", ".", "key", ")", "elif", "self", ".", "signature", "==", "context", ".", "full", ":", "_key", "=", "\"{}{}@{}:{}/{}\"", ".", "format", "(", "_func", ",", "_args", ",", "_file", ",", "_line", ",", "self", ".", "key", ")", "_key", "=", "_key", ".", "strip", "(", "\"/\"", ")", "self", ".", "_self_obj", "=", "_rss_usage", "(", "_key", ")", "self", ".", "_self_dif", "=", "_rss_usage", "(", "_key", ")", "self", ".", "_self_dif", ".", "record", "(", ")", "# run function", "ret", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# record", "self", ".", "_self_obj", ".", "record", "(", ")", "self", ".", "_self_obj", "-=", "self", ".", "_self_dif", "print", "(", "\"{}\"", ".", "format", "(", "self", ".", "_self_obj", ")", ")", "return", "ret", "return", "function_wrapper" ]
https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/timemory/util/util.py#L391-L431
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/util/_doctools.py
python
TablePlotter._conv
(self, data)
return data
Convert each input to appropriate for table outplot
Convert each input to appropriate for table outplot
[ "Convert", "each", "input", "to", "appropriate", "for", "table", "outplot" ]
def _conv(self, data): """Convert each input to appropriate for table outplot""" if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data
[ "def", "_conv", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "pd", ".", "Series", ")", ":", "if", "data", ".", "name", "is", "None", ":", "data", "=", "data", ".", "to_frame", "(", "name", "=", "''", ")", "else", ":", "data", "=", "data", ".", "to_frame", "(", ")", "data", "=", "data", ".", "fillna", "(", "'NaN'", ")", "return", "data" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/util/_doctools.py#L105-L113
wujixiu/helmet-detection
8eff5c59ddfba5a29e0b76aeb48babcb49246178
hardhat-wearing-detection/SSD-RPA/python/caffe/net_spec.py
python
param_name_dict
()
return dict(zip(param_type_names, param_names))
Find out the correspondence between layer names and parameter names.
Find out the correspondence between layer names and parameter names.
[ "Find", "out", "the", "correspondence", "between", "layer", "names", "and", "parameter", "names", "." ]
def param_name_dict(): """Find out the correspondence between layer names and parameter names.""" layer = caffe_pb2.LayerParameter() # get all parameter names (typically underscore case) and corresponding # type names (typically camel case), which contain the layer names # (note that not all parameters correspond to layers, but we'll ignore that) param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith('_param')] param_type_names = [type(getattr(layer, s)).__name__ for s in param_names] # strip the final '_param' or 'Parameter' param_names = [s[:-len('_param')] for s in param_names] param_type_names = [s[:-len('Parameter')] for s in param_type_names] return dict(zip(param_type_names, param_names))
[ "def", "param_name_dict", "(", ")", ":", "layer", "=", "caffe_pb2", ".", "LayerParameter", "(", ")", "# get all parameter names (typically underscore case) and corresponding", "# type names (typically camel case), which contain the layer names", "# (note that not all parameters correspond to layers, but we'll ignore that)", "param_names", "=", "[", "f", ".", "name", "for", "f", "in", "layer", ".", "DESCRIPTOR", ".", "fields", "if", "f", ".", "name", ".", "endswith", "(", "'_param'", ")", "]", "param_type_names", "=", "[", "type", "(", "getattr", "(", "layer", ",", "s", ")", ")", ".", "__name__", "for", "s", "in", "param_names", "]", "# strip the final '_param' or 'Parameter'", "param_names", "=", "[", "s", "[", ":", "-", "len", "(", "'_param'", ")", "]", "for", "s", "in", "param_names", "]", "param_type_names", "=", "[", "s", "[", ":", "-", "len", "(", "'Parameter'", ")", "]", "for", "s", "in", "param_type_names", "]", "return", "dict", "(", "zip", "(", "param_type_names", ",", "param_names", ")", ")" ]
https://github.com/wujixiu/helmet-detection/blob/8eff5c59ddfba5a29e0b76aeb48babcb49246178/hardhat-wearing-detection/SSD-RPA/python/caffe/net_spec.py#L28-L40
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/metrics/python/ops/histogram_ops.py
python
_auc_hist_accumulate
(hist_true, hist_false, nbins, collections)
Accumulate histograms in new variables.
Accumulate histograms in new variables.
[ "Accumulate", "histograms", "in", "new", "variables", "." ]
def _auc_hist_accumulate(hist_true, hist_false, nbins, collections): """Accumulate histograms in new variables.""" with variable_scope.variable_scope( None, 'hist_accumulate', [hist_true, hist_false]): # Holds running total histogram of scores for records labeled True. hist_true_acc = variable_scope.get_variable( 'hist_true_acc', shape=[nbins], dtype=hist_true.dtype, initializer=init_ops.zeros_initializer(), collections=collections, trainable=False) # Holds running total histogram of scores for records labeled False. hist_false_acc = variable_scope.get_variable( 'hist_false_acc', shape=[nbins], dtype=hist_true.dtype, initializer=init_ops.zeros_initializer(), collections=collections, trainable=False) update_op = control_flow_ops.group( hist_true_acc.assign_add(hist_true), hist_false_acc.assign_add(hist_false), name='update_op') return hist_true_acc, hist_false_acc, update_op
[ "def", "_auc_hist_accumulate", "(", "hist_true", ",", "hist_false", ",", "nbins", ",", "collections", ")", ":", "with", "variable_scope", ".", "variable_scope", "(", "None", ",", "'hist_accumulate'", ",", "[", "hist_true", ",", "hist_false", "]", ")", ":", "# Holds running total histogram of scores for records labeled True.", "hist_true_acc", "=", "variable_scope", ".", "get_variable", "(", "'hist_true_acc'", ",", "shape", "=", "[", "nbins", "]", ",", "dtype", "=", "hist_true", ".", "dtype", ",", "initializer", "=", "init_ops", ".", "zeros_initializer", "(", ")", ",", "collections", "=", "collections", ",", "trainable", "=", "False", ")", "# Holds running total histogram of scores for records labeled False.", "hist_false_acc", "=", "variable_scope", ".", "get_variable", "(", "'hist_false_acc'", ",", "shape", "=", "[", "nbins", "]", ",", "dtype", "=", "hist_true", ".", "dtype", ",", "initializer", "=", "init_ops", ".", "zeros_initializer", "(", ")", ",", "collections", "=", "collections", ",", "trainable", "=", "False", ")", "update_op", "=", "control_flow_ops", ".", "group", "(", "hist_true_acc", ".", "assign_add", "(", "hist_true", ")", ",", "hist_false_acc", ".", "assign_add", "(", "hist_false", ")", ",", "name", "=", "'update_op'", ")", "return", "hist_true_acc", ",", "hist_false_acc", ",", "update_op" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/metrics/python/ops/histogram_ops.py#L149-L175
swift/swift
12d031cf8177fdec0137f9aa7e2912fa23c4416b
3rdParty/SCons/scons-3.0.1/engine/SCons/Scanner/LaTeX.py
python
LaTeX.canonical_text
(self, text)
return '\n'.join(out).rstrip()+'\n'
Standardize an input TeX-file contents. Currently: * removes comments, unwrapping comment-wrapped lines.
Standardize an input TeX-file contents.
[ "Standardize", "an", "input", "TeX", "-", "file", "contents", "." ]
def canonical_text(self, text): """Standardize an input TeX-file contents. Currently: * removes comments, unwrapping comment-wrapped lines. """ out = [] line_continues_a_comment = False for line in text.splitlines(): line,comment = self.comment_re.findall(line)[0] if line_continues_a_comment == True: out[-1] = out[-1] + line.lstrip() else: out.append(line) line_continues_a_comment = len(comment) > 0 return '\n'.join(out).rstrip()+'\n'
[ "def", "canonical_text", "(", "self", ",", "text", ")", ":", "out", "=", "[", "]", "line_continues_a_comment", "=", "False", "for", "line", "in", "text", ".", "splitlines", "(", ")", ":", "line", ",", "comment", "=", "self", ".", "comment_re", ".", "findall", "(", "line", ")", "[", "0", "]", "if", "line_continues_a_comment", "==", "True", ":", "out", "[", "-", "1", "]", "=", "out", "[", "-", "1", "]", "+", "line", ".", "lstrip", "(", ")", "else", ":", "out", ".", "append", "(", "line", ")", "line_continues_a_comment", "=", "len", "(", "comment", ")", ">", "0", "return", "'\\n'", ".", "join", "(", "out", ")", ".", "rstrip", "(", ")", "+", "'\\n'" ]
https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Scanner/LaTeX.py#L326-L341
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/feature_column_v2.py
python
_TPUEmbeddingColumnV2.get_embedding_var_name
(self)
return self.categorical_column.name
get_embedding_var_name.
get_embedding_var_name.
[ "get_embedding_var_name", "." ]
def get_embedding_var_name(self): """get_embedding_var_name.""" return self.categorical_column.name
[ "def", "get_embedding_var_name", "(", "self", ")", ":", "return", "self", ".", "categorical_column", ".", "name" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/tpu/feature_column_v2.py#L326-L328
openmm/openmm
cb293447c4fc8b03976dfe11399f107bab70f3d9
wrappers/python/openmm/app/gromacstopfile.py
python
GromacsTopFile._processMoleculeType
(self, line)
Process a line in the [ moleculetypes ] category.
Process a line in the [ moleculetypes ] category.
[ "Process", "a", "line", "in", "the", "[", "moleculetypes", "]", "category", "." ]
def _processMoleculeType(self, line): """Process a line in the [ moleculetypes ] category.""" fields = line.split() if len(fields) < 1: raise ValueError('Too few fields in [ moleculetypes ] line: '+line) type = GromacsTopFile._MoleculeType() self._moleculeTypes[fields[0]] = type self._currentMoleculeType = type
[ "def", "_processMoleculeType", "(", "self", ",", "line", ")", ":", "fields", "=", "line", ".", "split", "(", ")", "if", "len", "(", "fields", ")", "<", "1", ":", "raise", "ValueError", "(", "'Too few fields in [ moleculetypes ] line: '", "+", "line", ")", "type", "=", "GromacsTopFile", ".", "_MoleculeType", "(", ")", "self", ".", "_moleculeTypes", "[", "fields", "[", "0", "]", "]", "=", "type", "self", ".", "_currentMoleculeType", "=", "type" ]
https://github.com/openmm/openmm/blob/cb293447c4fc8b03976dfe11399f107bab70f3d9/wrappers/python/openmm/app/gromacstopfile.py#L286-L293