nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/__init__.py
python
ZipProvider._is_current
(self, file_path, zip_path)
return zip_contents == file_contents
Return True if the file_path is current for this zip_path
Return True if the file_path is current for this zip_path
[ "Return", "True", "if", "the", "file_path", "is", "current", "for", "this", "zip_path" ]
def _is_current(self, file_path, zip_path): """ Return True if the file_path is current for this zip_path """ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents
[ "def", "_is_current", "(", "self", ",", "file_path", ",", "zip_path", ")", ":", "timestamp", ",", "size", "=", "self", ".", "_get_date_and_size", "(", "self", ".", "zipinfo", "[", "zip_path", "]", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "return", "False", "stat", "=", "os", ".", "stat", "(", "file_path", ")", "if", "stat", ".", "st_size", "!=", "size", "or", "stat", ".", "st_mtime", "!=", "timestamp", ":", "return", "False", "# check that the contents match", "zip_contents", "=", "self", ".", "loader", ".", "get_data", "(", "zip_path", ")", "with", "open", "(", "file_path", ",", "'rb'", ")", "as", "f", ":", "file_contents", "=", "f", ".", "read", "(", ")", "return", "zip_contents", "==", "file_contents" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/__init__.py#L1796-L1810
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/codegen.py
python
JitEngine._load_defined_symbols
(self, mod)
Extract symbols from the module
Extract symbols from the module
[ "Extract", "symbols", "from", "the", "module" ]
def _load_defined_symbols(self, mod): """Extract symbols from the module """ for gsets in (mod.functions, mod.global_variables): self._defined_symbols |= {gv.name for gv in gsets if not gv.is_declaration}
[ "def", "_load_defined_symbols", "(", "self", ",", "mod", ")", ":", "for", "gsets", "in", "(", "mod", ".", "functions", ",", "mod", ".", "global_variables", ")", ":", "self", ".", "_defined_symbols", "|=", "{", "gv", ".", "name", "for", "gv", "in", "gsets", "if", "not", "gv", ".", "is_declaration", "}" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/targets/codegen.py#L587-L592
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/nntplib.py
python
decode_header
(header_str)
return ''.join(parts)
Takes a unicode string representing a munged header value and decodes it as a (possibly non-ASCII) readable value.
Takes a unicode string representing a munged header value and decodes it as a (possibly non-ASCII) readable value.
[ "Takes", "a", "unicode", "string", "representing", "a", "munged", "header", "value", "and", "decodes", "it", "as", "a", "(", "possibly", "non", "-", "ASCII", ")", "readable", "value", "." ]
def decode_header(header_str): """Takes a unicode string representing a munged header value and decodes it as a (possibly non-ASCII) readable value.""" parts = [] for v, enc in _email_decode_header(header_str): if isinstance(v, bytes): parts.append(v.decode(enc or 'ascii')) else: parts.append(v) return ''.join(parts)
[ "def", "decode_header", "(", "header_str", ")", ":", "parts", "=", "[", "]", "for", "v", ",", "enc", "in", "_email_decode_header", "(", "header_str", ")", ":", "if", "isinstance", "(", "v", ",", "bytes", ")", ":", "parts", ".", "append", "(", "v", ".", "decode", "(", "enc", "or", "'ascii'", ")", ")", "else", ":", "parts", ".", "append", "(", "v", ")", "return", "''", ".", "join", "(", "parts", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/nntplib.py#L167-L176
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/training/python/training/training.py
python
create_train_op
(total_loss, optimizer, global_step=_USE_GLOBAL_STEP, update_ops=None, variables_to_train=None, transform_grads_fn=None, summarize_gradients=False, gate_gradients=tf_optimizer.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, check_numerics=True)
return train_op
Creates an `Operation` that evaluates the gradients and returns the loss. Args: total_loss: A `Tensor` representing the total loss. optimizer: A tf.Optimizer to use for computing the gradients. global_step: A `Tensor` representing the global step variable. If left as `_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used. update_ops: An optional list of updates to execute. If `update_ops` is `None`, then the update ops are set to the contents of the `tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`, a warning will be displayed. variables_to_train: an optional list of variables to train. If None, it will default to all tf.compat.v1.trainable_variables(). transform_grads_fn: A function which takes a single argument, a list of gradient to variable pairs (tuples), performs any requested gradient updates, such as gradient clipping or multipliers, and returns the updated list. summarize_gradients: Whether or not add summaries for each gradient. gate_gradients: How to gate the computation of gradients. See tf.Optimizer. aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class `AggregationMethod`. colocate_gradients_with_ops: Whether or not to try colocating the gradients with the ops that generated them. check_numerics: Whether or not we apply check_numerics. Returns: A `Tensor` that when evaluated, computes the gradients and returns the total loss value.
Creates an `Operation` that evaluates the gradients and returns the loss.
[ "Creates", "an", "Operation", "that", "evaluates", "the", "gradients", "and", "returns", "the", "loss", "." ]
def create_train_op(total_loss, optimizer, global_step=_USE_GLOBAL_STEP, update_ops=None, variables_to_train=None, transform_grads_fn=None, summarize_gradients=False, gate_gradients=tf_optimizer.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, check_numerics=True): """Creates an `Operation` that evaluates the gradients and returns the loss. Args: total_loss: A `Tensor` representing the total loss. optimizer: A tf.Optimizer to use for computing the gradients. global_step: A `Tensor` representing the global step variable. If left as `_USE_GLOBAL_STEP`, then tf.contrib.framework.global_step() is used. update_ops: An optional list of updates to execute. If `update_ops` is `None`, then the update ops are set to the contents of the `tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`, a warning will be displayed. variables_to_train: an optional list of variables to train. If None, it will default to all tf.compat.v1.trainable_variables(). transform_grads_fn: A function which takes a single argument, a list of gradient to variable pairs (tuples), performs any requested gradient updates, such as gradient clipping or multipliers, and returns the updated list. summarize_gradients: Whether or not add summaries for each gradient. gate_gradients: How to gate the computation of gradients. See tf.Optimizer. aggregation_method: Specifies the method used to combine gradient terms. Valid values are defined in the class `AggregationMethod`. colocate_gradients_with_ops: Whether or not to try colocating the gradients with the ops that generated them. check_numerics: Whether or not we apply check_numerics. Returns: A `Tensor` that when evaluated, computes the gradients and returns the total loss value. """ if global_step is _USE_GLOBAL_STEP: global_step = training_util.get_or_create_global_step() # Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None. global_update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) if update_ops is None: update_ops = global_update_ops else: update_ops = set(update_ops) if not global_update_ops.issubset(update_ops): logging.warning('update_ops in create_train_op does not contain all the ' 'update_ops in GraphKeys.UPDATE_OPS') # Make sure update_ops are computed before total_loss. if update_ops: with ops.control_dependencies(update_ops): barrier = control_flow_ops.no_op(name='update_barrier') total_loss = control_flow_ops.with_dependencies([barrier], total_loss) if variables_to_train is None: # Default to tf.compat.v1.trainable_variables() variables_to_train = tf_variables.trainable_variables() else: # Make sure that variables_to_train are in # tf.compat.v1.trainable_variables() for v in variables_to_train: assert v.trainable or v in tf_variables.trainable_variables() assert variables_to_train # Create the gradients. Note that apply_gradients adds the gradient # computation to the current graph. grads = optimizer.compute_gradients( total_loss, variables_to_train, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops) if transform_grads_fn: grads = transform_grads_fn(grads) # Summarize gradients. if summarize_gradients: with ops.name_scope('summarize_grads'): add_gradients_summaries(grads) # Create gradient updates. grad_updates = optimizer.apply_gradients(grads, global_step=global_step) with ops.name_scope('train_op'): # Make sure total_loss is valid. if check_numerics: total_loss = array_ops.check_numerics(total_loss, 'LossTensor is inf or nan') # Ensure the train_tensor computes grad_updates. train_op = control_flow_ops.with_dependencies([grad_updates], total_loss) # Add the operation used for training to the 'train_op' collection train_ops = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) if train_op not in train_ops: train_ops.append(train_op) return train_op
[ "def", "create_train_op", "(", "total_loss", ",", "optimizer", ",", "global_step", "=", "_USE_GLOBAL_STEP", ",", "update_ops", "=", "None", ",", "variables_to_train", "=", "None", ",", "transform_grads_fn", "=", "None", ",", "summarize_gradients", "=", "False", ",", "gate_gradients", "=", "tf_optimizer", ".", "Optimizer", ".", "GATE_OP", ",", "aggregation_method", "=", "None", ",", "colocate_gradients_with_ops", "=", "False", ",", "check_numerics", "=", "True", ")", ":", "if", "global_step", "is", "_USE_GLOBAL_STEP", ":", "global_step", "=", "training_util", ".", "get_or_create_global_step", "(", ")", "# Update ops use GraphKeys.UPDATE_OPS collection if update_ops is None.", "global_update_ops", "=", "set", "(", "ops", ".", "get_collection", "(", "ops", ".", "GraphKeys", ".", "UPDATE_OPS", ")", ")", "if", "update_ops", "is", "None", ":", "update_ops", "=", "global_update_ops", "else", ":", "update_ops", "=", "set", "(", "update_ops", ")", "if", "not", "global_update_ops", ".", "issubset", "(", "update_ops", ")", ":", "logging", ".", "warning", "(", "'update_ops in create_train_op does not contain all the '", "'update_ops in GraphKeys.UPDATE_OPS'", ")", "# Make sure update_ops are computed before total_loss.", "if", "update_ops", ":", "with", "ops", ".", "control_dependencies", "(", "update_ops", ")", ":", "barrier", "=", "control_flow_ops", ".", "no_op", "(", "name", "=", "'update_barrier'", ")", "total_loss", "=", "control_flow_ops", ".", "with_dependencies", "(", "[", "barrier", "]", ",", "total_loss", ")", "if", "variables_to_train", "is", "None", ":", "# Default to tf.compat.v1.trainable_variables()", "variables_to_train", "=", "tf_variables", ".", "trainable_variables", "(", ")", "else", ":", "# Make sure that variables_to_train are in", "# tf.compat.v1.trainable_variables()", "for", "v", "in", "variables_to_train", ":", "assert", "v", ".", "trainable", "or", "v", "in", "tf_variables", ".", "trainable_variables", "(", ")", "assert", "variables_to_train", "# Create the gradients. Note that apply_gradients adds the gradient", "# computation to the current graph.", "grads", "=", "optimizer", ".", "compute_gradients", "(", "total_loss", ",", "variables_to_train", ",", "gate_gradients", "=", "gate_gradients", ",", "aggregation_method", "=", "aggregation_method", ",", "colocate_gradients_with_ops", "=", "colocate_gradients_with_ops", ")", "if", "transform_grads_fn", ":", "grads", "=", "transform_grads_fn", "(", "grads", ")", "# Summarize gradients.", "if", "summarize_gradients", ":", "with", "ops", ".", "name_scope", "(", "'summarize_grads'", ")", ":", "add_gradients_summaries", "(", "grads", ")", "# Create gradient updates.", "grad_updates", "=", "optimizer", ".", "apply_gradients", "(", "grads", ",", "global_step", "=", "global_step", ")", "with", "ops", ".", "name_scope", "(", "'train_op'", ")", ":", "# Make sure total_loss is valid.", "if", "check_numerics", ":", "total_loss", "=", "array_ops", ".", "check_numerics", "(", "total_loss", ",", "'LossTensor is inf or nan'", ")", "# Ensure the train_tensor computes grad_updates.", "train_op", "=", "control_flow_ops", ".", "with_dependencies", "(", "[", "grad_updates", "]", ",", "total_loss", ")", "# Add the operation used for training to the 'train_op' collection", "train_ops", "=", "ops", ".", "get_collection_ref", "(", "ops", ".", "GraphKeys", ".", "TRAIN_OP", ")", "if", "train_op", "not", "in", "train_ops", ":", "train_ops", ".", "append", "(", "train_op", ")", "return", "train_op" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/training/python/training/training.py#L372-L477
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py
python
Mailbox.update
(self, arg=None)
Change the messages that correspond to certain keys.
Change the messages that correspond to certain keys.
[ "Change", "the", "messages", "that", "correspond", "to", "certain", "keys", "." ]
def update(self, arg=None): """Change the messages that correspond to certain keys.""" if hasattr(arg, 'iteritems'): source = arg.iteritems() elif hasattr(arg, 'items'): source = arg.items() else: source = arg bad_key = False for key, message in source: try: self[key] = message except KeyError: bad_key = True if bad_key: raise KeyError('No message with key(s)')
[ "def", "update", "(", "self", ",", "arg", "=", "None", ")", ":", "if", "hasattr", "(", "arg", ",", "'iteritems'", ")", ":", "source", "=", "arg", ".", "iteritems", "(", ")", "elif", "hasattr", "(", "arg", ",", "'items'", ")", ":", "source", "=", "arg", ".", "items", "(", ")", "else", ":", "source", "=", "arg", "bad_key", "=", "False", "for", "key", ",", "message", "in", "source", ":", "try", ":", "self", "[", "key", "]", "=", "message", "except", "KeyError", ":", "bad_key", "=", "True", "if", "bad_key", ":", "raise", "KeyError", "(", "'No message with key(s)'", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/mailbox.py#L163-L178
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/boto/boto/mturk/connection.py
python
MTurkConnection.get_assignments
(self, hit_id, status=None, sort_by='SubmitTime', sort_direction='Ascending', page_size=10, page_number=1, response_groups=None)
return self._process_request('GetAssignmentsForHIT', params, [('Assignment', Assignment)])
Retrieves completed assignments for a HIT. Use this operation to retrieve the results for a HIT. The returned ResultSet will have the following attributes: NumResults The number of assignments on the page in the filtered results list, equivalent to the number of assignments being returned by this call. A non-negative integer, as a string. PageNumber The number of the page in the filtered results list being returned. A positive integer, as a string. TotalNumResults The total number of HITs in the filtered results list based on this call. A non-negative integer, as a string. The ResultSet will contain zero or more Assignment objects
Retrieves completed assignments for a HIT. Use this operation to retrieve the results for a HIT.
[ "Retrieves", "completed", "assignments", "for", "a", "HIT", ".", "Use", "this", "operation", "to", "retrieve", "the", "results", "for", "a", "HIT", "." ]
def get_assignments(self, hit_id, status=None, sort_by='SubmitTime', sort_direction='Ascending', page_size=10, page_number=1, response_groups=None): """ Retrieves completed assignments for a HIT. Use this operation to retrieve the results for a HIT. The returned ResultSet will have the following attributes: NumResults The number of assignments on the page in the filtered results list, equivalent to the number of assignments being returned by this call. A non-negative integer, as a string. PageNumber The number of the page in the filtered results list being returned. A positive integer, as a string. TotalNumResults The total number of HITs in the filtered results list based on this call. A non-negative integer, as a string. The ResultSet will contain zero or more Assignment objects """ params = {'HITId': hit_id, 'SortProperty': sort_by, 'SortDirection': sort_direction, 'PageSize': page_size, 'PageNumber': page_number} if status is not None: params['AssignmentStatus'] = status # Handle optional response groups argument if response_groups: self.build_list_params(params, response_groups, 'ResponseGroup') return self._process_request('GetAssignmentsForHIT', params, [('Assignment', Assignment)])
[ "def", "get_assignments", "(", "self", ",", "hit_id", ",", "status", "=", "None", ",", "sort_by", "=", "'SubmitTime'", ",", "sort_direction", "=", "'Ascending'", ",", "page_size", "=", "10", ",", "page_number", "=", "1", ",", "response_groups", "=", "None", ")", ":", "params", "=", "{", "'HITId'", ":", "hit_id", ",", "'SortProperty'", ":", "sort_by", ",", "'SortDirection'", ":", "sort_direction", ",", "'PageSize'", ":", "page_size", ",", "'PageNumber'", ":", "page_number", "}", "if", "status", "is", "not", "None", ":", "params", "[", "'AssignmentStatus'", "]", "=", "status", "# Handle optional response groups argument", "if", "response_groups", ":", "self", ".", "build_list_params", "(", "params", ",", "response_groups", ",", "'ResponseGroup'", ")", "return", "self", ".", "_process_request", "(", "'GetAssignmentsForHIT'", ",", "params", ",", "[", "(", "'Assignment'", ",", "Assignment", ")", "]", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/mturk/connection.py#L377-L417
google/llvm-propeller
45c226984fe8377ebfb2ad7713c680d652ba678d
lldb/third_party/Python/module/pexpect-4.6/pexpect/spawnbase.py
python
SpawnBase.expect
(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw)
return self.expect_list(compiled_pattern_list, timeout, searchwindowsize, async_)
This seeks through the stream until a pattern is matched. The pattern is overloaded and may take several types. The pattern can be a StringType, EOF, a compiled re, or a list of any of those types. Strings will be compiled to re types. This returns the index into the pattern list. If the pattern was not a list this returns index 0 on a successful match. This may raise exceptions for EOF or TIMEOUT. To avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern list. That will cause expect to match an EOF or TIMEOUT condition instead of raising an exception. If you pass a list of patterns and more than one matches, the first match in the stream is chosen. If more than one pattern matches at that point, the leftmost in the pattern list is chosen. For example:: # the input is 'foobar' index = p.expect(['bar', 'foo', 'foobar']) # returns 1('foo') even though 'foobar' is a "better" match Please note, however, that buffering can affect this behavior, since input arrives in unpredictable chunks. For example:: # the input is 'foobar' index = p.expect(['foobar', 'foo']) # returns 0('foobar') if all input is available at once, # but returns 1('foo') if parts of the final 'bar' arrive late When a match is found for the given pattern, the class instance attribute *match* becomes an re.MatchObject result. Should an EOF or TIMEOUT pattern match, then the match attribute will be an instance of that exception class. The pairing before and after class instance attributes are views of the data preceding and following the matching pattern. On general exception, class attribute *before* is all data received up to the exception, while *match* and *after* attributes are value None. When the keyword argument timeout is -1 (default), then TIMEOUT will raise after the default value specified by the class timeout attribute. When None, TIMEOUT will not be raised and may block indefinitely until match. When the keyword argument searchwindowsize is -1 (default), then the value specified by the class maxread attribute is used. A list entry may be EOF or TIMEOUT instead of a string. This will catch these exceptions and return the index of the list entry instead of raising the exception. The attribute 'after' will be set to the exception type. The attribute 'match' will be None. This allows you to write code like this:: index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT]) if index == 0: do_something() elif index == 1: do_something_else() elif index == 2: do_some_other_thing() elif index == 3: do_something_completely_different() instead of code like this:: try: index = p.expect(['good', 'bad']) if index == 0: do_something() elif index == 1: do_something_else() except EOF: do_some_other_thing() except TIMEOUT: do_something_completely_different() These two forms are equivalent. It all depends on what you want. You can also just expect the EOF if you are waiting for all output of a child to finish. For example:: p = pexpect.spawn('/bin/ls') p.expect(pexpect.EOF) print p.before If you are trying to optimize for speed then see expect_list(). On Python 3.4, or Python 3.3 with asyncio installed, passing ``async_=True`` will make this return an :mod:`asyncio` coroutine, which you can yield from to get the same result that this method would normally give directly. So, inside a coroutine, you can replace this code:: index = p.expect(patterns) With this non-blocking form:: index = yield from p.expect(patterns, async_=True)
This seeks through the stream until a pattern is matched. The pattern is overloaded and may take several types. The pattern can be a StringType, EOF, a compiled re, or a list of any of those types. Strings will be compiled to re types. This returns the index into the pattern list. If the pattern was not a list this returns index 0 on a successful match. This may raise exceptions for EOF or TIMEOUT. To avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern list. That will cause expect to match an EOF or TIMEOUT condition instead of raising an exception.
[ "This", "seeks", "through", "the", "stream", "until", "a", "pattern", "is", "matched", ".", "The", "pattern", "is", "overloaded", "and", "may", "take", "several", "types", ".", "The", "pattern", "can", "be", "a", "StringType", "EOF", "a", "compiled", "re", "or", "a", "list", "of", "any", "of", "those", "types", ".", "Strings", "will", "be", "compiled", "to", "re", "types", ".", "This", "returns", "the", "index", "into", "the", "pattern", "list", ".", "If", "the", "pattern", "was", "not", "a", "list", "this", "returns", "index", "0", "on", "a", "successful", "match", ".", "This", "may", "raise", "exceptions", "for", "EOF", "or", "TIMEOUT", ".", "To", "avoid", "the", "EOF", "or", "TIMEOUT", "exceptions", "add", "EOF", "or", "TIMEOUT", "to", "the", "pattern", "list", ".", "That", "will", "cause", "expect", "to", "match", "an", "EOF", "or", "TIMEOUT", "condition", "instead", "of", "raising", "an", "exception", "." ]
def expect(self, pattern, timeout=-1, searchwindowsize=-1, async_=False, **kw): '''This seeks through the stream until a pattern is matched. The pattern is overloaded and may take several types. The pattern can be a StringType, EOF, a compiled re, or a list of any of those types. Strings will be compiled to re types. This returns the index into the pattern list. If the pattern was not a list this returns index 0 on a successful match. This may raise exceptions for EOF or TIMEOUT. To avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern list. That will cause expect to match an EOF or TIMEOUT condition instead of raising an exception. If you pass a list of patterns and more than one matches, the first match in the stream is chosen. If more than one pattern matches at that point, the leftmost in the pattern list is chosen. For example:: # the input is 'foobar' index = p.expect(['bar', 'foo', 'foobar']) # returns 1('foo') even though 'foobar' is a "better" match Please note, however, that buffering can affect this behavior, since input arrives in unpredictable chunks. For example:: # the input is 'foobar' index = p.expect(['foobar', 'foo']) # returns 0('foobar') if all input is available at once, # but returns 1('foo') if parts of the final 'bar' arrive late When a match is found for the given pattern, the class instance attribute *match* becomes an re.MatchObject result. Should an EOF or TIMEOUT pattern match, then the match attribute will be an instance of that exception class. The pairing before and after class instance attributes are views of the data preceding and following the matching pattern. On general exception, class attribute *before* is all data received up to the exception, while *match* and *after* attributes are value None. When the keyword argument timeout is -1 (default), then TIMEOUT will raise after the default value specified by the class timeout attribute. When None, TIMEOUT will not be raised and may block indefinitely until match. When the keyword argument searchwindowsize is -1 (default), then the value specified by the class maxread attribute is used. A list entry may be EOF or TIMEOUT instead of a string. This will catch these exceptions and return the index of the list entry instead of raising the exception. The attribute 'after' will be set to the exception type. The attribute 'match' will be None. This allows you to write code like this:: index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT]) if index == 0: do_something() elif index == 1: do_something_else() elif index == 2: do_some_other_thing() elif index == 3: do_something_completely_different() instead of code like this:: try: index = p.expect(['good', 'bad']) if index == 0: do_something() elif index == 1: do_something_else() except EOF: do_some_other_thing() except TIMEOUT: do_something_completely_different() These two forms are equivalent. It all depends on what you want. You can also just expect the EOF if you are waiting for all output of a child to finish. For example:: p = pexpect.spawn('/bin/ls') p.expect(pexpect.EOF) print p.before If you are trying to optimize for speed then see expect_list(). On Python 3.4, or Python 3.3 with asyncio installed, passing ``async_=True`` will make this return an :mod:`asyncio` coroutine, which you can yield from to get the same result that this method would normally give directly. So, inside a coroutine, you can replace this code:: index = p.expect(patterns) With this non-blocking form:: index = yield from p.expect(patterns, async_=True) ''' if 'async' in kw: async_ = kw.pop('async') if kw: raise TypeError("Unknown keyword arguments: {}".format(kw)) compiled_pattern_list = self.compile_pattern_list(pattern) return self.expect_list(compiled_pattern_list, timeout, searchwindowsize, async_)
[ "def", "expect", "(", "self", ",", "pattern", ",", "timeout", "=", "-", "1", ",", "searchwindowsize", "=", "-", "1", ",", "async_", "=", "False", ",", "*", "*", "kw", ")", ":", "if", "'async'", "in", "kw", ":", "async_", "=", "kw", ".", "pop", "(", "'async'", ")", "if", "kw", ":", "raise", "TypeError", "(", "\"Unknown keyword arguments: {}\"", ".", "format", "(", "kw", ")", ")", "compiled_pattern_list", "=", "self", ".", "compile_pattern_list", "(", "pattern", ")", "return", "self", ".", "expect_list", "(", "compiled_pattern_list", ",", "timeout", ",", "searchwindowsize", ",", "async_", ")" ]
https://github.com/google/llvm-propeller/blob/45c226984fe8377ebfb2ad7713c680d652ba678d/lldb/third_party/Python/module/pexpect-4.6/pexpect/spawnbase.py#L240-L341
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
contrib/gizmos/msw/gizmos.py
python
LEDNumberCtrl.SetAlignment
(*args, **kwargs)
return _gizmos.LEDNumberCtrl_SetAlignment(*args, **kwargs)
SetAlignment(self, int Alignment, bool Redraw=True)
SetAlignment(self, int Alignment, bool Redraw=True)
[ "SetAlignment", "(", "self", "int", "Alignment", "bool", "Redraw", "=", "True", ")" ]
def SetAlignment(*args, **kwargs): """SetAlignment(self, int Alignment, bool Redraw=True)""" return _gizmos.LEDNumberCtrl_SetAlignment(*args, **kwargs)
[ "def", "SetAlignment", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gizmos", ".", "LEDNumberCtrl_SetAlignment", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/msw/gizmos.py#L338-L340
PixarAnimationStudios/USD
faed18ce62c8736b02413635b584a2f637156bad
pxr/usdImaging/usdviewq/pythonInterpreter.py
python
_Completer._AttrMatches
(self, text)
return list(matches)
Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in the globals of __main__, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.
Compute matches when text contains a dot.
[ "Compute", "matches", "when", "text", "contains", "a", "dot", "." ]
def _AttrMatches(self, text): """Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in the globals of __main__, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated. """ import re, __main__ assert len(text) # This is all a bit hacky, but that's tab-completion for you. # Now find the last index in the text of a set of characters, and split # the string into a prefix and suffix token there. The suffix token # will be used for completion. splitChars = ' )(;,+=*/-%!<>' index = -1 for char in splitChars: index = max(text.rfind(char), index) if index >= len(text)-1: return [] prefix = '' suffix = text if index >= 0: prefix = text[:index+1] suffix = text[index+1:] m = re.match(r"([^.]+(\.[^.]+)*)\.(.*)", suffix) if not m: return [] expr, attr = m.group(1, 3) try: myobject = eval(expr, __main__.__dict__, self.locals) except (AttributeError, NameError, SyntaxError): return [] words = set(dir(myobject)) if hasattr(myobject,'__class__'): words.add('__class__') words = words.union(set(_GetClassMembers(myobject.__class__))) words = list(words) matches = set() n = len(attr) for word in words: if word[:n] == attr and word != "__builtins__": matches.add("%s%s.%s" % (prefix, expr, word)) return list(matches)
[ "def", "_AttrMatches", "(", "self", ",", "text", ")", ":", "import", "re", ",", "__main__", "assert", "len", "(", "text", ")", "# This is all a bit hacky, but that's tab-completion for you.", "# Now find the last index in the text of a set of characters, and split", "# the string into a prefix and suffix token there. The suffix token", "# will be used for completion.", "splitChars", "=", "' )(;,+=*/-%!<>'", "index", "=", "-", "1", "for", "char", "in", "splitChars", ":", "index", "=", "max", "(", "text", ".", "rfind", "(", "char", ")", ",", "index", ")", "if", "index", ">=", "len", "(", "text", ")", "-", "1", ":", "return", "[", "]", "prefix", "=", "''", "suffix", "=", "text", "if", "index", ">=", "0", ":", "prefix", "=", "text", "[", ":", "index", "+", "1", "]", "suffix", "=", "text", "[", "index", "+", "1", ":", "]", "m", "=", "re", ".", "match", "(", "r\"([^.]+(\\.[^.]+)*)\\.(.*)\"", ",", "suffix", ")", "if", "not", "m", ":", "return", "[", "]", "expr", ",", "attr", "=", "m", ".", "group", "(", "1", ",", "3", ")", "try", ":", "myobject", "=", "eval", "(", "expr", ",", "__main__", ".", "__dict__", ",", "self", ".", "locals", ")", "except", "(", "AttributeError", ",", "NameError", ",", "SyntaxError", ")", ":", "return", "[", "]", "words", "=", "set", "(", "dir", "(", "myobject", ")", ")", "if", "hasattr", "(", "myobject", ",", "'__class__'", ")", ":", "words", ".", "add", "(", "'__class__'", ")", "words", "=", "words", ".", "union", "(", "set", "(", "_GetClassMembers", "(", "myobject", ".", "__class__", ")", ")", ")", "words", "=", "list", "(", "words", ")", "matches", "=", "set", "(", ")", "n", "=", "len", "(", "attr", ")", "for", "word", "in", "words", ":", "if", "word", "[", ":", "n", "]", "==", "attr", "and", "word", "!=", "\"__builtins__\"", ":", "matches", ".", "add", "(", "\"%s%s.%s\"", "%", "(", "prefix", ",", "expr", ",", "word", ")", ")", "return", "list", "(", "matches", ")" ]
https://github.com/PixarAnimationStudios/USD/blob/faed18ce62c8736b02413635b584a2f637156bad/pxr/usdImaging/usdviewq/pythonInterpreter.py#L101-L157
rootm0s/Protectors
5b3f4d11687a5955caf9c3af30666c4bfc2c19ab
OWASP-ZSC/module/readline_windows/pyreadline/modes/notemacs.py
python
NotEmacsMode.kill_region
(self, e)
Kill the text in the current region. By default, this command is unbound.
Kill the text in the current region. By default, this command is unbound.
[ "Kill", "the", "text", "in", "the", "current", "region", ".", "By", "default", "this", "command", "is", "unbound", "." ]
def kill_region(self, e): # () '''Kill the text in the current region. By default, this command is unbound. ''' pass
[ "def", "kill_region", "(", "self", ",", "e", ")", ":", "# ()", "pass" ]
https://github.com/rootm0s/Protectors/blob/5b3f4d11687a5955caf9c3af30666c4bfc2c19ab/OWASP-ZSC/module/readline_windows/pyreadline/modes/notemacs.py#L367-L369
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/textwrap.py
python
dedent
(text)
return text
Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\\thello" are considered to have no common leading whitespace. Entirely blank lines are normalized to a newline character.
Remove any common leading whitespace from every line in `text`.
[ "Remove", "any", "common", "leading", "whitespace", "from", "every", "line", "in", "text", "." ]
def dedent(text): """Remove any common leading whitespace from every line in `text`. This can be used to make triple-quoted strings line up with the left edge of the display, while still presenting them in the source code in indented form. Note that tabs and spaces are both treated as whitespace, but they are not equal: the lines " hello" and "\\thello" are considered to have no common leading whitespace. Entirely blank lines are normalized to a newline character. """ # Look for the longest leading string of spaces and tabs common to # all lines. margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Find the largest common whitespace between current line and previous # winner. else: for i, (x, y) in enumerate(zip(margin, indent)): if x != y: margin = margin[:i] break # sanity check (testing/debugging only) if 0 and margin: for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text
[ "def", "dedent", "(", "text", ")", ":", "# Look for the longest leading string of spaces and tabs common to", "# all lines.", "margin", "=", "None", "text", "=", "_whitespace_only_re", ".", "sub", "(", "''", ",", "text", ")", "indents", "=", "_leading_whitespace_re", ".", "findall", "(", "text", ")", "for", "indent", "in", "indents", ":", "if", "margin", "is", "None", ":", "margin", "=", "indent", "# Current line more deeply indented than previous winner:", "# no change (previous winner is still on top).", "elif", "indent", ".", "startswith", "(", "margin", ")", ":", "pass", "# Current line consistent with and no deeper than previous winner:", "# it's the new winner.", "elif", "margin", ".", "startswith", "(", "indent", ")", ":", "margin", "=", "indent", "# Find the largest common whitespace between current line and previous", "# winner.", "else", ":", "for", "i", ",", "(", "x", ",", "y", ")", "in", "enumerate", "(", "zip", "(", "margin", ",", "indent", ")", ")", ":", "if", "x", "!=", "y", ":", "margin", "=", "margin", "[", ":", "i", "]", "break", "# sanity check (testing/debugging only)", "if", "0", "and", "margin", ":", "for", "line", "in", "text", ".", "split", "(", "\"\\n\"", ")", ":", "assert", "not", "line", "or", "line", ".", "startswith", "(", "margin", ")", ",", "\"line = %r, margin = %r\"", "%", "(", "line", ",", "margin", ")", "if", "margin", ":", "text", "=", "re", ".", "sub", "(", "r'(?m)^'", "+", "margin", ",", "''", ",", "text", ")", "return", "text" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/textwrap.py#L414-L462
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/configobj/configobj.py
python
Section.rename
(self, oldkey, newkey)
Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments.
Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments.
[ "Change", "a", "keyname", "to", "another", "without", "changing", "position", "in", "sequence", ".", "Implemented", "so", "that", "transformations", "can", "be", "made", "on", "keys", "as", "well", "as", "on", "values", ".", "(", "used", "by", "encode", "and", "decode", ")", "Also", "renames", "comments", "." ]
def rename(self, oldkey, newkey): """ Change a keyname to another, without changing position in sequence. Implemented so that transformations can be made on keys, as well as on values. (used by encode and decode) Also renames comments. """ if oldkey in self.scalars: the_list = self.scalars elif oldkey in self.sections: the_list = self.sections else: raise KeyError('Key "%s" not found.' % oldkey) pos = the_list.index(oldkey) # val = self[oldkey] dict.__delitem__(self, oldkey) dict.__setitem__(self, newkey, val) the_list.remove(oldkey) the_list.insert(pos, newkey) comm = self.comments[oldkey] inline_comment = self.inline_comments[oldkey] del self.comments[oldkey] del self.inline_comments[oldkey] self.comments[newkey] = comm self.inline_comments[newkey] = inline_comment
[ "def", "rename", "(", "self", ",", "oldkey", ",", "newkey", ")", ":", "if", "oldkey", "in", "self", ".", "scalars", ":", "the_list", "=", "self", ".", "scalars", "elif", "oldkey", "in", "self", ".", "sections", ":", "the_list", "=", "self", ".", "sections", "else", ":", "raise", "KeyError", "(", "'Key \"%s\" not found.'", "%", "oldkey", ")", "pos", "=", "the_list", ".", "index", "(", "oldkey", ")", "#", "val", "=", "self", "[", "oldkey", "]", "dict", ".", "__delitem__", "(", "self", ",", "oldkey", ")", "dict", ".", "__setitem__", "(", "self", ",", "newkey", ",", "val", ")", "the_list", ".", "remove", "(", "oldkey", ")", "the_list", ".", "insert", "(", "pos", ",", "newkey", ")", "comm", "=", "self", ".", "comments", "[", "oldkey", "]", "inline_comment", "=", "self", ".", "inline_comments", "[", "oldkey", "]", "del", "self", ".", "comments", "[", "oldkey", "]", "del", "self", ".", "inline_comments", "[", "oldkey", "]", "self", ".", "comments", "[", "newkey", "]", "=", "comm", "self", ".", "inline_comments", "[", "newkey", "]", "=", "inline_comment" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/configobj/configobj.py#L825-L852
alexgkendall/caffe-posenet
62aafbd7c45df91acdba14f5d1406d8295c2bc6f
scripts/cpp_lint.py
python
CheckCaffeDataLayerSetUp
(filename, clean_lines, linenum, error)
Except the base classes, Caffe DataLayer should define DataLayerSetUp instead of LayerSetUp. The base DataLayers define common SetUp steps, the subclasses should not override them. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Except the base classes, Caffe DataLayer should define DataLayerSetUp instead of LayerSetUp. The base DataLayers define common SetUp steps, the subclasses should not override them. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
[ "Except", "the", "base", "classes", "Caffe", "DataLayer", "should", "define", "DataLayerSetUp", "instead", "of", "LayerSetUp", ".", "The", "base", "DataLayers", "define", "common", "SetUp", "steps", "the", "subclasses", "should", "not", "override", "them", ".", "Args", ":", "filename", ":", "The", "name", "of", "the", "current", "file", ".", "clean_lines", ":", "A", "CleansedLines", "instance", "containing", "the", "file", ".", "linenum", ":", "The", "number", "of", "the", "line", "to", "check", ".", "error", ":", "The", "function", "to", "call", "with", "any", "errors", "found", "." ]
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error): """Except the base classes, Caffe DataLayer should define DataLayerSetUp instead of LayerSetUp. The base DataLayers define common SetUp steps, the subclasses should not override them. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] ix = line.find('DataLayer<Dtype>::LayerSetUp') if ix >= 0 and ( line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1): error(filename, linenum, 'caffe/data_layer_setup', 2, 'Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers' + ' define common SetUp steps, the subclasses should' + ' not override them.') ix = line.find('DataLayer<Dtype>::DataLayerSetUp') if ix >= 0 and ( line.find('void Base') == -1 and line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1): error(filename, linenum, 'caffe/data_layer_setup', 2, 'Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers' + ' define common SetUp steps, the subclasses should' + ' not override them.')
[ "def", "CheckCaffeDataLayerSetUp", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "ix", "=", "line", ".", "find", "(", "'DataLayer<Dtype>::LayerSetUp'", ")", "if", "ix", ">=", "0", "and", "(", "line", ".", "find", "(", "'void DataLayer<Dtype>::LayerSetUp'", ")", "!=", "-", "1", "or", "line", ".", "find", "(", "'void ImageDataLayer<Dtype>::LayerSetUp'", ")", "!=", "-", "1", "or", "line", ".", "find", "(", "'void MemoryDataLayer<Dtype>::LayerSetUp'", ")", "!=", "-", "1", "or", "line", ".", "find", "(", "'void WindowDataLayer<Dtype>::LayerSetUp'", ")", "!=", "-", "1", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'caffe/data_layer_setup'", ",", "2", ",", "'Except the base classes, Caffe DataLayer should define'", "+", "' DataLayerSetUp instead of LayerSetUp. The base DataLayers'", "+", "' define common SetUp steps, the subclasses should'", "+", "' not override them.'", ")", "ix", "=", "line", ".", "find", "(", "'DataLayer<Dtype>::DataLayerSetUp'", ")", "if", "ix", ">=", "0", "and", "(", "line", ".", "find", "(", "'void Base'", ")", "==", "-", "1", "and", "line", ".", "find", "(", "'void DataLayer<Dtype>::DataLayerSetUp'", ")", "==", "-", "1", "and", "line", ".", "find", "(", "'void ImageDataLayer<Dtype>::DataLayerSetUp'", ")", "==", "-", "1", "and", "line", ".", "find", "(", "'void MemoryDataLayer<Dtype>::DataLayerSetUp'", ")", "==", "-", "1", "and", "line", ".", "find", "(", "'void WindowDataLayer<Dtype>::DataLayerSetUp'", ")", "==", "-", "1", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'caffe/data_layer_setup'", ",", "2", ",", "'Except the base classes, Caffe DataLayer should define'", "+", "' DataLayerSetUp instead of LayerSetUp. The base DataLayers'", "+", "' define common SetUp steps, the subclasses should'", "+", "' not override them.'", ")" ]
https://github.com/alexgkendall/caffe-posenet/blob/62aafbd7c45df91acdba14f5d1406d8295c2bc6f/scripts/cpp_lint.py#L1595-L1631
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/excel/_util.py
python
register_writer
(klass)
Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter
Add engine to the excel writer registry.io.excel.
[ "Add", "engine", "to", "the", "excel", "writer", "registry", ".", "io", ".", "excel", "." ]
def register_writer(klass): """ Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter """ if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass
[ "def", "register_writer", "(", "klass", ")", ":", "if", "not", "callable", "(", "klass", ")", ":", "raise", "ValueError", "(", "\"Can only register callables as engines\"", ")", "engine_name", "=", "klass", ".", "engine", "_writers", "[", "engine_name", "]", "=", "klass" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/io/excel/_util.py#L8-L21
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/ros_comm/roslaunch/src/roslaunch/config.py
python
ROSLaunchConfig.has_remote_nodes
(self)
return self._remote_nodes_present
@return: True if roslaunch will launch nodes on a remote machine @rtype: bool @raises: RLException
[]
def has_remote_nodes(self): """ @return: True if roslaunch will launch nodes on a remote machine @rtype: bool @raises: RLException """ if not self._assign_machines_complete: raise RLException("ERROR: has_remote_nodes() cannot be called until prelaunch check is complete") return self._remote_nodes_present
[ "def", "has_remote_nodes", "(", "self", ")", ":", "if", "not", "self", ".", "_assign_machines_complete", ":", "raise", "RLException", "(", "\"ERROR: has_remote_nodes() cannot be called until prelaunch check is complete\"", ")", "return", "self", ".", "_remote_nodes_present" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros_comm/roslaunch/src/roslaunch/config.py#L175-L183
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_gdi.py
python
RegionIterator.HaveRects
(*args, **kwargs)
return _gdi_.RegionIterator_HaveRects(*args, **kwargs)
HaveRects(self) -> bool
HaveRects(self) -> bool
[ "HaveRects", "(", "self", ")", "-", ">", "bool" ]
def HaveRects(*args, **kwargs): """HaveRects(self) -> bool""" return _gdi_.RegionIterator_HaveRects(*args, **kwargs)
[ "def", "HaveRects", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "RegionIterator_HaveRects", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_gdi.py#L1706-L1708
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/distutils/cmd.py
python
Command.copy_file
(self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1)
return file_util.copy_file(infile, outfile, preserve_mode, preserve_times, not self.force, link, dry_run=self.dry_run)
Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)
Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)
[ "Copy", "a", "file", "respecting", "verbose", "dry", "-", "run", "and", "force", "flags", ".", "(", "The", "former", "two", "default", "to", "whatever", "is", "in", "the", "Distribution", "object", "and", "the", "latter", "defaults", "to", "false", "for", "commands", "that", "don", "t", "define", "it", ".", ")" ]
def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1): """Copy a file respecting verbose, dry-run and force flags. (The former two default to whatever is in the Distribution object, and the latter defaults to false for commands that don't define it.)""" return file_util.copy_file(infile, outfile, preserve_mode, preserve_times, not self.force, link, dry_run=self.dry_run)
[ "def", "copy_file", "(", "self", ",", "infile", ",", "outfile", ",", "preserve_mode", "=", "1", ",", "preserve_times", "=", "1", ",", "link", "=", "None", ",", "level", "=", "1", ")", ":", "return", "file_util", ".", "copy_file", "(", "infile", ",", "outfile", ",", "preserve_mode", ",", "preserve_times", ",", "not", "self", ".", "force", ",", "link", ",", "dry_run", "=", "self", ".", "dry_run", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/distutils/cmd.py#L340-L347
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
setup/snap.py
python
TNEANetAFltI.__lt__
(self, *args)
return _snap.TNEANetAFltI___lt__(self, *args)
__lt__(TNEANetAFltI self, TNEANetAFltI I) -> bool Parameters: I: TNEANetAFltI const &
__lt__(TNEANetAFltI self, TNEANetAFltI I) -> bool
[ "__lt__", "(", "TNEANetAFltI", "self", "TNEANetAFltI", "I", ")", "-", ">", "bool" ]
def __lt__(self, *args): """ __lt__(TNEANetAFltI self, TNEANetAFltI I) -> bool Parameters: I: TNEANetAFltI const & """ return _snap.TNEANetAFltI___lt__(self, *args)
[ "def", "__lt__", "(", "self", ",", "*", "args", ")", ":", "return", "_snap", ".", "TNEANetAFltI___lt__", "(", "self", ",", "*", "args", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L21153-L21161
etotheipi/BitcoinArmory
2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98
urllib3/util.py
python
Timeout.connect_timeout
(self)
return min(self._connect, self.total)
Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: the connect timeout :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
Get the value to use when setting a connection timeout.
[ "Get", "the", "value", "to", "use", "when", "setting", "a", "connection", "timeout", "." ]
def connect_timeout(self): """ Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: the connect timeout :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None """ if self.total is None: return self._connect if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: return self.total return min(self._connect, self.total)
[ "def", "connect_timeout", "(", "self", ")", ":", "if", "self", ".", "total", "is", "None", ":", "return", "self", ".", "_connect", "if", "self", ".", "_connect", "is", "None", "or", "self", ".", "_connect", "is", "self", ".", "DEFAULT_TIMEOUT", ":", "return", "self", ".", "total", "return", "min", "(", "self", ".", "_connect", ",", "self", ".", "total", ")" ]
https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/urllib3/util.py#L220-L235
QMCPACK/qmcpack
d0948ab455e38364458740cc8e2239600a14c5cd
utils/afqmctools/afqmctools/utils/qe_driver.py
python
qe_driver_MP2NO
(qe_info,out_prefix='pyscf_drv', appnos=False, diag_type='keep_occ', nread_from_h5=0,h5_add_orbs='',nskip=0, eigcut=1e-3,nextracut=1e-6,mp2noecut=1e-6,kappa=0.0,regp=0)
Calls the MP2NO routine in the driver. Parameters ---------- qe_info: Python Dictionary. Dictionary with information from QE calculation, generated by qe_driver_init. out_prefix: string. Default: 'pyscf_drv' Prefix used in all the files generated by the driver. appnos: Bool. Default: False. If True, generates approximate natural orbitals. diag_type: string. Default: 'keep_occ' Defines the type of HF diagonalization performed before the MP2 calculation. Options: 'keep_occ': Only the virtual orbitals/eigenvalues are calculated. Occupied orbitals/eigenvalues are kept from the QE calculation. 'full': All orbitals/eigenvalues are recalculated. 'fullpw': A basis set is generated that contains all the plane waves below the QE wfn cutoff. The HF eigenvalues/orbitals and MP2NO are calculated in this basis. nread_from_h5: integer. Default: 0 Number of orbitals to read from h5_add_orbs. h5_add_orbs: string. Default: '' Name of hdf5 file with additional orbitals to add to the basis set. nskip: integer. Default: 0 Number of states above the HOMO state of the solid to skip during the calculation of MP2 NOs. This can be used to avoid divergencies in metals. The assumption being that these states will be included in the orbital set directly. eigcut: fp number. Default: 1e-3 Cutoff used during the generation of the spin independent basis in UHF/GHF calculations. Only the eigenvalues of the overlap matrix (alpha/beta) above this cutoff are kept in the calculation. In order to reproduce the UHF/GHF energy accurately, this number must be set to a small value (e.g. 1e-8). nextracut: fp number. Default: 1e-6 Cutoff used when adding states from h5_add_orbs to the basis set. When a new state from the file is being added to the orbital set, the component along all current orbitals in the set is removed. The resulting (orthogonal) state is added only if the norm of the unnormalized orbital is larger than nextracut (state is afterwards normalized). This is used as a way to remove linear dependencies from the basis set. mp2noecut: fp number. Default: 1e-6 Cutoff used when adding natural orbitals from the MP2 RDM, only states with eigenvalue > mp2noecut will be kept. If this number is < 0.0, then a specific number of states is kept and is given by nint(-mp2noecut).
Calls the MP2NO routine in the driver.
[ "Calls", "the", "MP2NO", "routine", "in", "the", "driver", "." ]
def qe_driver_MP2NO(qe_info,out_prefix='pyscf_drv', appnos=False, diag_type='keep_occ', nread_from_h5=0,h5_add_orbs='',nskip=0, eigcut=1e-3,nextracut=1e-6,mp2noecut=1e-6,kappa=0.0,regp=0): """ Calls the MP2NO routine in the driver. Parameters ---------- qe_info: Python Dictionary. Dictionary with information from QE calculation, generated by qe_driver_init. out_prefix: string. Default: 'pyscf_drv' Prefix used in all the files generated by the driver. appnos: Bool. Default: False. If True, generates approximate natural orbitals. diag_type: string. Default: 'keep_occ' Defines the type of HF diagonalization performed before the MP2 calculation. Options: 'keep_occ': Only the virtual orbitals/eigenvalues are calculated. Occupied orbitals/eigenvalues are kept from the QE calculation. 'full': All orbitals/eigenvalues are recalculated. 'fullpw': A basis set is generated that contains all the plane waves below the QE wfn cutoff. The HF eigenvalues/orbitals and MP2NO are calculated in this basis. nread_from_h5: integer. Default: 0 Number of orbitals to read from h5_add_orbs. h5_add_orbs: string. Default: '' Name of hdf5 file with additional orbitals to add to the basis set. nskip: integer. Default: 0 Number of states above the HOMO state of the solid to skip during the calculation of MP2 NOs. This can be used to avoid divergencies in metals. The assumption being that these states will be included in the orbital set directly. eigcut: fp number. Default: 1e-3 Cutoff used during the generation of the spin independent basis in UHF/GHF calculations. Only the eigenvalues of the overlap matrix (alpha/beta) above this cutoff are kept in the calculation. In order to reproduce the UHF/GHF energy accurately, this number must be set to a small value (e.g. 1e-8). nextracut: fp number. Default: 1e-6 Cutoff used when adding states from h5_add_orbs to the basis set. When a new state from the file is being added to the orbital set, the component along all current orbitals in the set is removed. The resulting (orthogonal) state is added only if the norm of the unnormalized orbital is larger than nextracut (state is afterwards normalized). This is used as a way to remove linear dependencies from the basis set. mp2noecut: fp number. Default: 1e-6 Cutoff used when adding natural orbitals from the MP2 RDM, only states with eigenvalue > mp2noecut will be kept. If this number is < 0.0, then a specific number of states is kept and is given by nint(-mp2noecut). """ if diag_type=='fullpw': pyscf_driver_mp2no(out_prefix,True,diag_type,appnos, 0,'',nskip,0.0, 0.0,mp2noecut,kappa,regp) else: pyscf_driver_mp2no(out_prefix,True,diag_type,appnos, nread_from_h5,h5_add_orbs,nskip,eigcut, mp2noecut,nextracut,kappa,regp)
[ "def", "qe_driver_MP2NO", "(", "qe_info", ",", "out_prefix", "=", "'pyscf_drv'", ",", "appnos", "=", "False", ",", "diag_type", "=", "'keep_occ'", ",", "nread_from_h5", "=", "0", ",", "h5_add_orbs", "=", "''", ",", "nskip", "=", "0", ",", "eigcut", "=", "1e-3", ",", "nextracut", "=", "1e-6", ",", "mp2noecut", "=", "1e-6", ",", "kappa", "=", "0.0", ",", "regp", "=", "0", ")", ":", "if", "diag_type", "==", "'fullpw'", ":", "pyscf_driver_mp2no", "(", "out_prefix", ",", "True", ",", "diag_type", ",", "appnos", ",", "0", ",", "''", ",", "nskip", ",", "0.0", ",", "0.0", ",", "mp2noecut", ",", "kappa", ",", "regp", ")", "else", ":", "pyscf_driver_mp2no", "(", "out_prefix", ",", "True", ",", "diag_type", ",", "appnos", ",", "nread_from_h5", ",", "h5_add_orbs", ",", "nskip", ",", "eigcut", ",", "mp2noecut", ",", "nextracut", ",", "kappa", ",", "regp", ")" ]
https://github.com/QMCPACK/qmcpack/blob/d0948ab455e38364458740cc8e2239600a14c5cd/utils/afqmctools/afqmctools/utils/qe_driver.py#L359-L418
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/fractions.py
python
Fraction.__gt__
(a, b)
return a._subtractAndCompareToZero(b, operator.gt)
a > b
a > b
[ "a", ">", "b" ]
def __gt__(a, b): """a > b""" return a._subtractAndCompareToZero(b, operator.gt)
[ "def", "__gt__", "(", "a", ",", "b", ")", ":", "return", "a", ".", "_subtractAndCompareToZero", "(", "b", ",", "operator", ".", "gt", ")" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/fractions.py#L510-L512
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/aquabutton.py
python
AquaButton.Invalidate
(self)
Invalidate the saved bitmap and refresh the button.
Invalidate the saved bitmap and refresh the button.
[ "Invalidate", "the", "saved", "bitmap", "and", "refresh", "the", "button", "." ]
def Invalidate(self): """ Invalidate the saved bitmap and refresh the button. """ self._saveBitmap = True self._storedBitmap = wx.NullBitmap self.Refresh()
[ "def", "Invalidate", "(", "self", ")", ":", "self", ".", "_saveBitmap", "=", "True", "self", ".", "_storedBitmap", "=", "wx", ".", "NullBitmap", "self", ".", "Refresh", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aquabutton.py#L828-L834
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
dom/bindings/parser/WebIDL.py
python
Tokenizer.t_OTHER
(self, t)
return t
r'[^\t\n\r 0-9A-Z_a-z]
r'[^\t\n\r 0-9A-Z_a-z]
[ "r", "[", "^", "\\", "t", "\\", "n", "\\", "r", "0", "-", "9A", "-", "Z_a", "-", "z", "]" ]
def t_OTHER(self, t): r'[^\t\n\r 0-9A-Z_a-z]' t.type = self.keywords.get(t.value, 'OTHER') return t
[ "def", "t_OTHER", "(", "self", ",", "t", ")", ":", "t", ".", "type", "=", "self", ".", "keywords", ".", "get", "(", "t", ".", "value", ",", "'OTHER'", ")", "return", "t" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/dom/bindings/parser/WebIDL.py#L4100-L4103
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_misc.py
python
DateTime.Today
(*args, **kwargs)
return _misc_.DateTime_Today(*args, **kwargs)
Today() -> DateTime
Today() -> DateTime
[ "Today", "()", "-", ">", "DateTime" ]
def Today(*args, **kwargs): """Today() -> DateTime""" return _misc_.DateTime_Today(*args, **kwargs)
[ "def", "Today", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "DateTime_Today", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L3776-L3778
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/arrays/categorical.py
python
Categorical.sort_values
(self, inplace=False, ascending=True, na_position='last')
Sorts the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : boolean, default False Do operation in place. ascending : boolean, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- y : Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] Inplace sorting can be done as well: >>> c.sort_values(inplace=True) >>> c [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2.0, 2.0, NaN, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values() [2.0, 2.0, 5.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5.0, 2.0, 2.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2.0, 2.0, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5.0, 2.0, 2.0] Categories (2, int64): [2, 5]
Sorts the Categorical by category value returning a new Categorical by default.
[ "Sorts", "the", "Categorical", "by", "category", "value", "returning", "a", "new", "Categorical", "by", "default", "." ]
def sort_values(self, inplace=False, ascending=True, na_position='last'): """ Sorts the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : boolean, default False Do operation in place. ascending : boolean, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- y : Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] Inplace sorting can be done as well: >>> c.sort_values(inplace=True) >>> c [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2.0, 2.0, NaN, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values() [2.0, 2.0, 5.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5.0, 2.0, 2.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2.0, 2.0, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5.0, 2.0, 2.0] Categories (2, int64): [2, 5] """ inplace = validate_bool_kwarg(inplace, 'inplace') if na_position not in ['last', 'first']: msg = 'invalid na_position: {na_position!r}' raise ValueError(msg.format(na_position=na_position)) sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if inplace: self._codes = self._codes[sorted_idx] else: return self._constructor(values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True)
[ "def", "sort_values", "(", "self", ",", "inplace", "=", "False", ",", "ascending", "=", "True", ",", "na_position", "=", "'last'", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "na_position", "not", "in", "[", "'last'", ",", "'first'", "]", ":", "msg", "=", "'invalid na_position: {na_position!r}'", "raise", "ValueError", "(", "msg", ".", "format", "(", "na_position", "=", "na_position", ")", ")", "sorted_idx", "=", "nargsort", "(", "self", ",", "ascending", "=", "ascending", ",", "na_position", "=", "na_position", ")", "if", "inplace", ":", "self", ".", "_codes", "=", "self", ".", "_codes", "[", "sorted_idx", "]", "else", ":", "return", "self", ".", "_constructor", "(", "values", "=", "self", ".", "_codes", "[", "sorted_idx", "]", ",", "dtype", "=", "self", ".", "dtype", ",", "fastpath", "=", "True", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/arrays/categorical.py#L1563-L1651
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/mac/symbolicate_crash.py
python
CrashReport._CollectAddressesForImages
(self, images)
return collection
Iterates all the threads and stack frames and all the stack frames that are in a list of binary |images|. The result is a dictionary, keyed by the image name that maps to a list of tuples. Each is a 2-Tuple of (stack_frame, address)
Iterates all the threads and stack frames and all the stack frames that are in a list of binary |images|. The result is a dictionary, keyed by the image name that maps to a list of tuples. Each is a 2-Tuple of (stack_frame, address)
[ "Iterates", "all", "the", "threads", "and", "stack", "frames", "and", "all", "the", "stack", "frames", "that", "are", "in", "a", "list", "of", "binary", "|images|", ".", "The", "result", "is", "a", "dictionary", "keyed", "by", "the", "image", "name", "that", "maps", "to", "a", "list", "of", "tuples", ".", "Each", "is", "a", "2", "-", "Tuple", "of", "(", "stack_frame", "address", ")" ]
def _CollectAddressesForImages(self, images): """Iterates all the threads and stack frames and all the stack frames that are in a list of binary |images|. The result is a dictionary, keyed by the image name that maps to a list of tuples. Each is a 2-Tuple of (stack_frame, address)""" # Create the collection and initialize it with empty lists for each image. collection = {} for image in images: collection[image] = [] # Perform the iteration. for thread in self.threads: for frame in thread.stack: image_name = self._ImageForAddress(frame.address) if image_name in images: # Replace the image name in the frame in case it was elided. frame.image = image_name collection[frame.image].append((frame, frame.address)) # Return the result. return collection
[ "def", "_CollectAddressesForImages", "(", "self", ",", "images", ")", ":", "# Create the collection and initialize it with empty lists for each image.", "collection", "=", "{", "}", "for", "image", "in", "images", ":", "collection", "[", "image", "]", "=", "[", "]", "# Perform the iteration.", "for", "thread", "in", "self", ".", "threads", ":", "for", "frame", "in", "thread", ".", "stack", ":", "image_name", "=", "self", ".", "_ImageForAddress", "(", "frame", ".", "address", ")", "if", "image_name", "in", "images", ":", "# Replace the image name in the frame in case it was elided.", "frame", ".", "image", "=", "image_name", "collection", "[", "frame", ".", "image", "]", ".", "append", "(", "(", "frame", ",", "frame", ".", "address", ")", ")", "# Return the result.", "return", "collection" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/mac/symbolicate_crash.py#L288-L308
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/apiclient/googleapiclient/mimeparse.py
python
parse_mime_type
(mime_type)
return (type.strip(), subtype.strip(), params)
Parses a mime-type into its component parts. Carves up a mime-type and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/xhtml;q=0.5' would get parsed into: ('application', 'xhtml', {'q', '0.5'})
Parses a mime-type into its component parts.
[ "Parses", "a", "mime", "-", "type", "into", "its", "component", "parts", "." ]
def parse_mime_type(mime_type): """Parses a mime-type into its component parts. Carves up a mime-type and returns a tuple of the (type, subtype, params) where 'params' is a dictionary of all the parameters for the media range. For example, the media range 'application/xhtml;q=0.5' would get parsed into: ('application', 'xhtml', {'q', '0.5'}) """ parts = mime_type.split(';') params = dict([tuple([s.strip() for s in param.split('=', 1)])\ for param in parts[1:] ]) full_type = parts[0].strip() # Java URLConnection class sends an Accept header that includes a # single '*'. Turn it into a legal wildcard. if full_type == '*': full_type = '*/*' (type, subtype) = full_type.split('/') return (type.strip(), subtype.strip(), params)
[ "def", "parse_mime_type", "(", "mime_type", ")", ":", "parts", "=", "mime_type", ".", "split", "(", "';'", ")", "params", "=", "dict", "(", "[", "tuple", "(", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "param", ".", "split", "(", "'='", ",", "1", ")", "]", ")", "for", "param", "in", "parts", "[", "1", ":", "]", "]", ")", "full_type", "=", "parts", "[", "0", "]", ".", "strip", "(", ")", "# Java URLConnection class sends an Accept header that includes a", "# single '*'. Turn it into a legal wildcard.", "if", "full_type", "==", "'*'", ":", "full_type", "=", "'*/*'", "(", "type", ",", "subtype", ")", "=", "full_type", ".", "split", "(", "'/'", ")", "return", "(", "type", ".", "strip", "(", ")", ",", "subtype", ".", "strip", "(", ")", ",", "params", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/apiclient/googleapiclient/mimeparse.py#L35-L56
tkn-tub/ns3-gym
19bfe0a583e641142609939a090a09dfc63a095f
utils/grid.py
python
Timelines.__init__
(self)
Initializer @param self: this object
Initializer
[ "Initializer" ]
def __init__(self): """ Initializer @param self: this object """ self.timelines = []
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "timelines", "=", "[", "]" ]
https://github.com/tkn-tub/ns3-gym/blob/19bfe0a583e641142609939a090a09dfc63a095f/utils/grid.py#L380-L384
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/protobuf/py2/google/protobuf/internal/python_message.py
python
_IsPresent
(item)
Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().
Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().
[ "Given", "a", "(", "FieldDescriptor", "value", ")", "tuple", "from", "_fields", "return", "true", "if", "the", "value", "should", "be", "included", "in", "the", "list", "returned", "by", "ListFields", "()", "." ]
def _IsPresent(item): """Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().""" if item[0].label == _FieldDescriptor.LABEL_REPEATED: return bool(item[1]) elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: return item[1]._is_present_in_parent else: return True
[ "def", "_IsPresent", "(", "item", ")", ":", "if", "item", "[", "0", "]", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "return", "bool", "(", "item", "[", "1", "]", ")", "elif", "item", "[", "0", "]", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "return", "item", "[", "1", "]", ".", "_is_present_in_parent", "else", ":", "return", "True" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/protobuf/py2/google/protobuf/internal/python_message.py#L817-L826
Kitware/ParaView
f760af9124ff4634b23ebbeab95a4f56e0261955
Web/Python/paraview/web/protocols.py
python
ParaViewWebProtocol.mapIdToProxy
(self, id)
return simple.servermanager._getPyProxy( simple.servermanager.ActiveConnection.Session.GetRemoteObject(id) )
Maps global-id for a proxy to the proxy instance. May return None if the id is not valid.
Maps global-id for a proxy to the proxy instance. May return None if the id is not valid.
[ "Maps", "global", "-", "id", "for", "a", "proxy", "to", "the", "proxy", "instance", ".", "May", "return", "None", "if", "the", "id", "is", "not", "valid", "." ]
def mapIdToProxy(self, id): """ Maps global-id for a proxy to the proxy instance. May return None if the id is not valid. """ try: id = int(id) except: return None if id <= 0: return None return simple.servermanager._getPyProxy( simple.servermanager.ActiveConnection.Session.GetRemoteObject(id) )
[ "def", "mapIdToProxy", "(", "self", ",", "id", ")", ":", "try", ":", "id", "=", "int", "(", "id", ")", "except", ":", "return", "None", "if", "id", "<=", "0", ":", "return", "None", "return", "simple", ".", "servermanager", ".", "_getPyProxy", "(", "simple", ".", "servermanager", ".", "ActiveConnection", ".", "Session", ".", "GetRemoteObject", "(", "id", ")", ")" ]
https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Web/Python/paraview/web/protocols.py#L104-L117
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
calc_chksums
(buf)
return unsigned_chksum, signed_chksum
Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed.
Calculate the checksum for a member's header by summing up all
[ "Calculate", "the", "checksum", "for", "a", "member", "s", "header", "by", "summing", "up", "all" ]
def calc_chksums(buf): """Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) return unsigned_chksum, signed_chksum
[ "def", "calc_chksums", "(", "buf", ")", ":", "unsigned_chksum", "=", "256", "+", "sum", "(", "struct", ".", "unpack", "(", "\"148B\"", ",", "buf", "[", ":", "148", "]", ")", "+", "struct", ".", "unpack", "(", "\"356B\"", ",", "buf", "[", "156", ":", "512", "]", ")", ")", "signed_chksum", "=", "256", "+", "sum", "(", "struct", ".", "unpack", "(", "\"148b\"", ",", "buf", "[", ":", "148", "]", ")", "+", "struct", ".", "unpack", "(", "\"356b\"", ",", "buf", "[", "156", ":", "512", "]", ")", ")", "return", "unsigned_chksum", ",", "signed_chksum" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L485-L507
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/liquidation.py
python
Liquidation.symbol
(self, symbol)
Sets the symbol of this Liquidation. :param symbol: The symbol of this Liquidation. # noqa: E501 :type: str
Sets the symbol of this Liquidation.
[ "Sets", "the", "symbol", "of", "this", "Liquidation", "." ]
def symbol(self, symbol): """Sets the symbol of this Liquidation. :param symbol: The symbol of this Liquidation. # noqa: E501 :type: str """ self._symbol = symbol
[ "def", "symbol", "(", "self", ",", "symbol", ")", ":", "self", ".", "_symbol", "=", "symbol" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/liquidation.py#L103-L111
ROCmSoftwarePlatform/hipCaffe
4ec5d482515cce532348553b6db6d00d015675d5
scripts/cpp_lint.py
python
FileInfo.Split
(self)
return (project,) + os.path.splitext(rest)
Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension).
Splits the file into the directory, basename, and extension.
[ "Splits", "the", "file", "into", "the", "directory", "basename", "and", "extension", "." ]
def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest)
[ "def", "Split", "(", "self", ")", ":", "googlename", "=", "self", ".", "RepositoryName", "(", ")", "project", ",", "rest", "=", "os", ".", "path", ".", "split", "(", "googlename", ")", "return", "(", "project", ",", ")", "+", "os", ".", "path", ".", "splitext", "(", "rest", ")" ]
https://github.com/ROCmSoftwarePlatform/hipCaffe/blob/4ec5d482515cce532348553b6db6d00d015675d5/scripts/cpp_lint.py#L930-L942
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/collections/__init__.py
python
OrderedDict.setdefault
(self, key, default=None)
return default
Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default.
Insert key with a value of default if key is not in the dictionary.
[ "Insert", "key", "with", "a", "value", "of", "default", "if", "key", "is", "not", "in", "the", "dictionary", "." ]
def setdefault(self, key, default=None): '''Insert key with a value of default if key is not in the dictionary. Return the value for key if key is in the dictionary, else default. ''' if key in self: return self[key] self[key] = default return default
[ "def", "setdefault", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "if", "key", "in", "self", ":", "return", "self", "[", "key", "]", "self", "[", "key", "]", "=", "default", "return", "default" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/collections/__init__.py#L256-L264
rapidsai/cudf
d5b2448fc69f17509304d594f029d0df56984962
python/cudf/cudf/core/column/string.py
python
StringMethods.filter_alphanum
( self, repl: str = None, keep: bool = True )
return self._return_or_inplace( libstrings.filter_alphanum(self._column, cudf.Scalar(repl), keep), )
Remove non-alphanumeric characters from strings in this column. Parameters ---------- repl : str Optional string to use in place of removed characters. keep : bool Set to False to remove all alphanumeric characters instead of keeping them. Returns ------- Series/Index of str dtype Strings with only alphanumeric characters. Examples -------- >>> import cudf >>> s = cudf.Series(["pears £12", "plums $34", "Temp 72℉", "100K℧"]) >>> s.str.filter_alphanum(" ") 0 pears 12 1 plums 34 2 Temp 72 3 100K dtype: object
Remove non-alphanumeric characters from strings in this column.
[ "Remove", "non", "-", "alphanumeric", "characters", "from", "strings", "in", "this", "column", "." ]
def filter_alphanum( self, repl: str = None, keep: bool = True ) -> SeriesOrIndex: """ Remove non-alphanumeric characters from strings in this column. Parameters ---------- repl : str Optional string to use in place of removed characters. keep : bool Set to False to remove all alphanumeric characters instead of keeping them. Returns ------- Series/Index of str dtype Strings with only alphanumeric characters. Examples -------- >>> import cudf >>> s = cudf.Series(["pears £12", "plums $34", "Temp 72℉", "100K℧"]) >>> s.str.filter_alphanum(" ") 0 pears 12 1 plums 34 2 Temp 72 3 100K dtype: object """ if repl is None: repl = "" return self._return_or_inplace( libstrings.filter_alphanum(self._column, cudf.Scalar(repl), keep), )
[ "def", "filter_alphanum", "(", "self", ",", "repl", ":", "str", "=", "None", ",", "keep", ":", "bool", "=", "True", ")", "->", "SeriesOrIndex", ":", "if", "repl", "is", "None", ":", "repl", "=", "\"\"", "return", "self", ".", "_return_or_inplace", "(", "libstrings", ".", "filter_alphanum", "(", "self", ".", "_column", ",", "cudf", ".", "Scalar", "(", "repl", ")", ",", "keep", ")", ",", ")" ]
https://github.com/rapidsai/cudf/blob/d5b2448fc69f17509304d594f029d0df56984962/python/cudf/cudf/core/column/string.py#L1959-L1994
NVIDIA/DALI
bf16cc86ba8f091b145f91962f21fe1b6aff243d
docs/examples/use_cases/mxnet/resnetn/symbols/resnet.py
python
get_symbol
(num_classes, num_layers, image_shape, conv_workspace=256, dtype='float32', **kwargs)
return resnet(units = units, num_stages = num_stages, filter_list = filter_list, num_classes = num_classes, image_shape = image_shape, bottle_neck = bottle_neck, workspace = conv_workspace, dtype = dtype)
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py Original author Wei Wu
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py Original author Wei Wu
[ "Adapted", "from", "https", ":", "//", "github", ".", "com", "/", "tornadomeet", "/", "ResNet", "/", "blob", "/", "master", "/", "train_resnet", ".", "py", "Original", "author", "Wei", "Wu" ]
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, dtype='float32', **kwargs): """ Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py Original author Wei Wu """ image_shape = [int(l) for l in image_shape.split(',')] (nchannel, height, width) = image_shape if height <= 28: num_stages = 3 if (num_layers-2) % 9 == 0 and num_layers >= 164: per_unit = [(num_layers-2)//9] filter_list = [16, 64, 128, 256] bottle_neck = True elif (num_layers-2) % 6 == 0 and num_layers < 164: per_unit = [(num_layers-2)//6] filter_list = [16, 16, 32, 64] bottle_neck = False else: raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers)) units = per_unit * num_stages else: if num_layers >= 50: filter_list = [64, 256, 512, 1024, 2048] bottle_neck = True else: filter_list = [64, 64, 128, 256, 512] bottle_neck = False num_stages = 4 if num_layers == 18: units = [2, 2, 2, 2] elif num_layers == 34: units = [3, 4, 6, 3] elif num_layers == 50: units = [3, 4, 6, 3] elif num_layers == 101: units = [3, 4, 23, 3] elif num_layers == 152: units = [3, 8, 36, 3] elif num_layers == 200: units = [3, 24, 36, 3] elif num_layers == 269: units = [3, 30, 48, 8] else: raise ValueError("no experiments done on num_layers {}, you can do it yourself".format(num_layers)) return resnet(units = units, num_stages = num_stages, filter_list = filter_list, num_classes = num_classes, image_shape = image_shape, bottle_neck = bottle_neck, workspace = conv_workspace, dtype = dtype)
[ "def", "get_symbol", "(", "num_classes", ",", "num_layers", ",", "image_shape", ",", "conv_workspace", "=", "256", ",", "dtype", "=", "'float32'", ",", "*", "*", "kwargs", ")", ":", "image_shape", "=", "[", "int", "(", "l", ")", "for", "l", "in", "image_shape", ".", "split", "(", "','", ")", "]", "(", "nchannel", ",", "height", ",", "width", ")", "=", "image_shape", "if", "height", "<=", "28", ":", "num_stages", "=", "3", "if", "(", "num_layers", "-", "2", ")", "%", "9", "==", "0", "and", "num_layers", ">=", "164", ":", "per_unit", "=", "[", "(", "num_layers", "-", "2", ")", "//", "9", "]", "filter_list", "=", "[", "16", ",", "64", ",", "128", ",", "256", "]", "bottle_neck", "=", "True", "elif", "(", "num_layers", "-", "2", ")", "%", "6", "==", "0", "and", "num_layers", "<", "164", ":", "per_unit", "=", "[", "(", "num_layers", "-", "2", ")", "//", "6", "]", "filter_list", "=", "[", "16", ",", "16", ",", "32", ",", "64", "]", "bottle_neck", "=", "False", "else", ":", "raise", "ValueError", "(", "\"no experiments done on num_layers {}, you can do it yourself\"", ".", "format", "(", "num_layers", ")", ")", "units", "=", "per_unit", "*", "num_stages", "else", ":", "if", "num_layers", ">=", "50", ":", "filter_list", "=", "[", "64", ",", "256", ",", "512", ",", "1024", ",", "2048", "]", "bottle_neck", "=", "True", "else", ":", "filter_list", "=", "[", "64", ",", "64", ",", "128", ",", "256", ",", "512", "]", "bottle_neck", "=", "False", "num_stages", "=", "4", "if", "num_layers", "==", "18", ":", "units", "=", "[", "2", ",", "2", ",", "2", ",", "2", "]", "elif", "num_layers", "==", "34", ":", "units", "=", "[", "3", ",", "4", ",", "6", ",", "3", "]", "elif", "num_layers", "==", "50", ":", "units", "=", "[", "3", ",", "4", ",", "6", ",", "3", "]", "elif", "num_layers", "==", "101", ":", "units", "=", "[", "3", ",", "4", ",", "23", ",", "3", "]", "elif", "num_layers", "==", "152", ":", "units", "=", "[", "3", ",", "8", ",", "36", ",", "3", "]", "elif", "num_layers", "==", "200", ":", "units", "=", "[", "3", ",", "24", ",", "36", ",", "3", "]", "elif", "num_layers", "==", "269", ":", "units", "=", "[", "3", ",", "30", ",", "48", ",", "8", "]", "else", ":", "raise", "ValueError", "(", "\"no experiments done on num_layers {}, you can do it yourself\"", ".", "format", "(", "num_layers", ")", ")", "return", "resnet", "(", "units", "=", "units", ",", "num_stages", "=", "num_stages", ",", "filter_list", "=", "filter_list", ",", "num_classes", "=", "num_classes", ",", "image_shape", "=", "image_shape", ",", "bottle_neck", "=", "bottle_neck", ",", "workspace", "=", "conv_workspace", ",", "dtype", "=", "dtype", ")" ]
https://github.com/NVIDIA/DALI/blob/bf16cc86ba8f091b145f91962f21fe1b6aff243d/docs/examples/use_cases/mxnet/resnetn/symbols/resnet.py#L144-L196
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/io/sas/sas_xport.py
python
XportReader.get_chunk
(self, size=None)
return self.read(nrows=size)
Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame
Reads lines from Xport file and returns as dataframe
[ "Reads", "lines", "from", "Xport", "file", "and", "returns", "as", "dataframe" ]
def get_chunk(self, size=None): """ Reads lines from Xport file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size)
[ "def", "get_chunk", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "self", ".", "_chunksize", "return", "self", ".", "read", "(", "nrows", "=", "size", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/io/sas/sas_xport.py#L401-L416
1989Ryan/Semantic_SLAM
0284b3f832ca431c494f9c134fe46c40ec86ee38
Third_Part/PSPNet_Keras_tensorflow/Semantic_Information_Publisher.py
python
Semantic_Imformation_Publisher.callback
(self, image_msg)
call back funcion, which will send the image and category of each pixel
call back funcion, which will send the image and category of each pixel
[ "call", "back", "funcion", "which", "will", "send", "the", "image", "and", "category", "of", "each", "pixel" ]
def callback(self, image_msg): '''call back funcion, which will send the image and category of each pixel''' cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "rgb8") h_ori, w_ori = cv_image.shape[:2] with self.graph.as_default(): probs = self.pspnet.model.predict(self.img_proc(cv_image))[0] if cv_image.shape[0:1] != (713,713): # upscale prediction if necessary h, w = probs.shape[:2] probs = ndimage.zoom(probs, (1. * h_ori / h, 1. * w_ori / w, 1.), order=1, prefilter=False) rospy.loginfo("running") cm = np.argmax(probs, axis=2).astype(np.uint8) #print(probs) #print(cm) category = self._cv_bridge.cv2_to_imgmsg(cm) probs = self._cv_bridge.cv2_to_imgmsg(probs) f = frame() f.image = image_msg f.category = category self._pub.publish(f)
[ "def", "callback", "(", "self", ",", "image_msg", ")", ":", "cv_image", "=", "self", ".", "_cv_bridge", ".", "imgmsg_to_cv2", "(", "image_msg", ",", "\"rgb8\"", ")", "h_ori", ",", "w_ori", "=", "cv_image", ".", "shape", "[", ":", "2", "]", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "probs", "=", "self", ".", "pspnet", ".", "model", ".", "predict", "(", "self", ".", "img_proc", "(", "cv_image", ")", ")", "[", "0", "]", "if", "cv_image", ".", "shape", "[", "0", ":", "1", "]", "!=", "(", "713", ",", "713", ")", ":", "# upscale prediction if necessary", "h", ",", "w", "=", "probs", ".", "shape", "[", ":", "2", "]", "probs", "=", "ndimage", ".", "zoom", "(", "probs", ",", "(", "1.", "*", "h_ori", "/", "h", ",", "1.", "*", "w_ori", "/", "w", ",", "1.", ")", ",", "order", "=", "1", ",", "prefilter", "=", "False", ")", "rospy", ".", "loginfo", "(", "\"running\"", ")", "cm", "=", "np", ".", "argmax", "(", "probs", ",", "axis", "=", "2", ")", ".", "astype", "(", "np", ".", "uint8", ")", "#print(probs)", "#print(cm)", "category", "=", "self", ".", "_cv_bridge", ".", "cv2_to_imgmsg", "(", "cm", ")", "probs", "=", "self", ".", "_cv_bridge", ".", "cv2_to_imgmsg", "(", "probs", ")", "f", "=", "frame", "(", ")", "f", ".", "image", "=", "image_msg", "f", ".", "category", "=", "category", "self", ".", "_pub", ".", "publish", "(", "f", ")" ]
https://github.com/1989Ryan/Semantic_SLAM/blob/0284b3f832ca431c494f9c134fe46c40ec86ee38/Third_Part/PSPNet_Keras_tensorflow/Semantic_Information_Publisher.py#L44-L63
Slicer/SlicerGitSVNArchive
65e92bb16c2b32ea47a1a66bee71f238891ee1ca
Base/Python/tpycl/tpycl.py
python
tpycl.py_puts
(self, noNewLine, message)
print into the python shell
print into the python shell
[ "print", "into", "the", "python", "shell" ]
def py_puts(self, noNewLine, message): """ print into the python shell """ print(message) if noNewLine == "0": print("\n")
[ "def", "py_puts", "(", "self", ",", "noNewLine", ",", "message", ")", ":", "print", "(", "message", ")", "if", "noNewLine", "==", "\"0\"", ":", "print", "(", "\"\\n\"", ")" ]
https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Base/Python/tpycl/tpycl.py#L121-L126
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/eslint.py
python
callo
(args)
return subprocess.check_output(args).decode('utf-8')
Call a program, and capture its output.
Call a program, and capture its output.
[ "Call", "a", "program", "and", "capture", "its", "output", "." ]
def callo(args): """Call a program, and capture its output.""" return subprocess.check_output(args).decode('utf-8')
[ "def", "callo", "(", "args", ")", ":", "return", "subprocess", ".", "check_output", "(", "args", ")", ".", "decode", "(", "'utf-8'", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/eslint.py#L65-L67
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/lmbrwaflib/utils.py
python
copy_files_to_folder
(required_src_files, optional_src_files, dst_folder, status_format=None)
Copy files to a destination folder :param required_src_files: List of required files relative to the current path :param optional_src_files: List of optional files relative to the current path :param dst_folder: Target folder :param status_format: Optional format string to print the status of each copy
Copy files to a destination folder
[ "Copy", "files", "to", "a", "destination", "folder" ]
def copy_files_to_folder(required_src_files, optional_src_files, dst_folder, status_format=None): """ Copy files to a destination folder :param required_src_files: List of required files relative to the current path :param optional_src_files: List of optional files relative to the current path :param dst_folder: Target folder :param status_format: Optional format string to print the status of each copy """ if not os.path.exists(dst_folder): os.makedirs(dst_folder) for src_file in required_src_files+optional_src_files: if not os.path.isfile(src_file): raise ValueError('"{}" is not a file.'.format(src_file)) if not os.path.exists(src_file): if src_file in required_src_files: raise ValueError('Required file ({}) does not exist.'.format(src_file)) else: continue source_file_abs_path = os.path.abspath(src_file) dest_file_abs_path = os.path.join(os.path.abspath(dst_folder), os.path.basename(src_file)) if copy_file_if_needed(source_file_abs_path, dest_file_abs_path): if status_format: print((status_format.format(src_file)))
[ "def", "copy_files_to_folder", "(", "required_src_files", ",", "optional_src_files", ",", "dst_folder", ",", "status_format", "=", "None", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dst_folder", ")", ":", "os", ".", "makedirs", "(", "dst_folder", ")", "for", "src_file", "in", "required_src_files", "+", "optional_src_files", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "src_file", ")", ":", "raise", "ValueError", "(", "'\"{}\" is not a file.'", ".", "format", "(", "src_file", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "src_file", ")", ":", "if", "src_file", "in", "required_src_files", ":", "raise", "ValueError", "(", "'Required file ({}) does not exist.'", ".", "format", "(", "src_file", ")", ")", "else", ":", "continue", "source_file_abs_path", "=", "os", ".", "path", ".", "abspath", "(", "src_file", ")", "dest_file_abs_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "dst_folder", ")", ",", "os", ".", "path", ".", "basename", "(", "src_file", ")", ")", "if", "copy_file_if_needed", "(", "source_file_abs_path", ",", "dest_file_abs_path", ")", ":", "if", "status_format", ":", "print", "(", "(", "status_format", ".", "format", "(", "src_file", ")", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/utils.py#L238-L262
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/format.py
python
dtype_to_descr
(dtype)
Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype.
Get a serializable descriptor from the dtype.
[ "Get", "a", "serializable", "descriptor", "from", "the", "dtype", "." ]
def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if _has_metadata(dtype): warnings.warn("metadata on a dtype may be saved or ignored, but will " "raise if saved when read. Use another form of storage.", UserWarning, stacklevel=2) if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str
[ "def", "dtype_to_descr", "(", "dtype", ")", ":", "if", "_has_metadata", "(", "dtype", ")", ":", "warnings", ".", "warn", "(", "\"metadata on a dtype may be saved or ignored, but will \"", "\"raise if saved when read. Use another form of storage.\"", ",", "UserWarning", ",", "stacklevel", "=", "2", ")", "if", "dtype", ".", "names", "is", "not", "None", ":", "# This is a record array. The .descr is fine. XXX: parts of the", "# record array with an empty name, like padding bytes, still get", "# fiddled with. This needs to be fixed in the C implementation of", "# dtype().", "return", "dtype", ".", "descr", "else", ":", "return", "dtype", ".", "str" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/format.py#L255-L289
rbgirshick/caffe-fast-rcnn
28a579eaf0668850705598b3075b8969f22226d9
tools/extra/extract_seconds.py
python
get_log_created_year
(input_file)
return log_created_year
Get year from log file system timestamp
Get year from log file system timestamp
[ "Get", "year", "from", "log", "file", "system", "timestamp" ]
def get_log_created_year(input_file): """Get year from log file system timestamp """ log_created_time = os.path.getctime(input_file) log_created_year = datetime.datetime.fromtimestamp(log_created_time).year return log_created_year
[ "def", "get_log_created_year", "(", "input_file", ")", ":", "log_created_time", "=", "os", ".", "path", ".", "getctime", "(", "input_file", ")", "log_created_year", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "log_created_time", ")", ".", "year", "return", "log_created_year" ]
https://github.com/rbgirshick/caffe-fast-rcnn/blob/28a579eaf0668850705598b3075b8969f22226d9/tools/extra/extract_seconds.py#L22-L28
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/lite/python/convert.py
python
_run_deprecated_conversion_binary
(model_flags_str, conversion_flags_str, input_data_str, debug_info_str=None)
Convert `input_data_str` using deprecated conversion binary. Args: model_flags_str: Serialized proto describing model properties, see `model_flags.proto`. conversion_flags_str: Serialized proto describing TFLite converter properties, see `toco/toco_flags.proto`. input_data_str: Input data in serialized form (e.g. a graphdef is common) debug_info_str: Serialized `GraphDebugInfo` proto describing logging information. (default None) Returns: Converted model in serialized form (e.g. a TFLITE model is common). Raises: ConverterError: When cannot find the deprecated conversion binary. RuntimeError: When conversion fails, an exception is raised with the error message embedded.
Convert `input_data_str` using deprecated conversion binary.
[ "Convert", "input_data_str", "using", "deprecated", "conversion", "binary", "." ]
def _run_deprecated_conversion_binary(model_flags_str, conversion_flags_str, input_data_str, debug_info_str=None): """Convert `input_data_str` using deprecated conversion binary. Args: model_flags_str: Serialized proto describing model properties, see `model_flags.proto`. conversion_flags_str: Serialized proto describing TFLite converter properties, see `toco/toco_flags.proto`. input_data_str: Input data in serialized form (e.g. a graphdef is common) debug_info_str: Serialized `GraphDebugInfo` proto describing logging information. (default None) Returns: Converted model in serialized form (e.g. a TFLITE model is common). Raises: ConverterError: When cannot find the deprecated conversion binary. RuntimeError: When conversion fails, an exception is raised with the error message embedded. """ if distutils.spawn.find_executable(_deprecated_conversion_binary) is None: raise ConverterError("""Could not find `toco_from_protos` binary, make sure your virtualenv bin directory or pip local bin directory is in your path. In particular, if you have installed TensorFlow with --user, make sure you add the install directory to your path. For example: Linux: export PATH=$PATH:~/.local/bin/ Mac: export PATH=$PATH:~/Library/Python/<version#>/bin Alternative, use virtualenv.""") # Windows and TemporaryFile are not that useful together, # since you cannot have two readers/writers. So we have to # make the temporaries and close and delete them explicitly. conversion_filename, model_filename, input_filename, output_filename = ( None, None, None, None) try: # Build all input files with _tempfile.NamedTemporaryFile(delete=False) as fp_conversion, \ _tempfile.NamedTemporaryFile(delete=False) as fp_model, \ _tempfile.NamedTemporaryFile(delete=False) as fp_input, \ _tempfile.NamedTemporaryFile(delete=False) as fp_debug: conversion_filename = fp_conversion.name input_filename = fp_input.name model_filename = fp_model.name debug_filename = fp_debug.name fp_model.write(model_flags_str) fp_conversion.write(conversion_flags_str) fp_input.write(six.ensure_binary(input_data_str)) debug_info_str = debug_info_str if debug_info_str else "" # if debug_info_str contains a "string value", then the call to # fp_debug.write(debug_info_str) will fail with the following error # # TypeError: a bytes-like object is required, not 'str' # # Some of the subtests within the "convert_test" unit-test fail # with the error shown above. So watch out for that scenario and # convert debug_info_str to bytes where needed if not isinstance(debug_info_str, bytes): fp_debug.write(debug_info_str.encode("utf-8")) else: fp_debug.write(debug_info_str) # Reserve an output file with _tempfile.NamedTemporaryFile(delete=False) as fp: output_filename = fp.name # Run cmd = [ _deprecated_conversion_binary, model_filename, conversion_filename, input_filename, output_filename, "--debug_proto_file={}".format(debug_filename), ] cmdline = " ".join(cmd) is_windows = _platform.system() == "Windows" proc = _subprocess.Popen( cmdline, shell=True, stdout=_subprocess.PIPE, stderr=_subprocess.STDOUT, close_fds=not is_windows) stdout, stderr = proc.communicate() exitcode = proc.returncode if exitcode == 0: with open(output_filename, "rb") as fp: return fp.read() else: stdout = _try_convert_to_unicode(stdout) stderr = _try_convert_to_unicode(stderr) raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr)) finally: # Must manually cleanup files. for filename in [ conversion_filename, input_filename, model_filename, output_filename ]: try: _os.unlink(filename) except (OSError, TypeError): pass
[ "def", "_run_deprecated_conversion_binary", "(", "model_flags_str", ",", "conversion_flags_str", ",", "input_data_str", ",", "debug_info_str", "=", "None", ")", ":", "if", "distutils", ".", "spawn", ".", "find_executable", "(", "_deprecated_conversion_binary", ")", "is", "None", ":", "raise", "ConverterError", "(", "\"\"\"Could not find `toco_from_protos` binary, make sure\nyour virtualenv bin directory or pip local bin directory is in your path.\nIn particular, if you have installed TensorFlow with --user, make sure you\nadd the install directory to your path.\n\nFor example:\nLinux: export PATH=$PATH:~/.local/bin/\nMac: export PATH=$PATH:~/Library/Python/<version#>/bin\n\nAlternative, use virtualenv.\"\"\"", ")", "# Windows and TemporaryFile are not that useful together,", "# since you cannot have two readers/writers. So we have to", "# make the temporaries and close and delete them explicitly.", "conversion_filename", ",", "model_filename", ",", "input_filename", ",", "output_filename", "=", "(", "None", ",", "None", ",", "None", ",", "None", ")", "try", ":", "# Build all input files", "with", "_tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "fp_conversion", ",", "_tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "fp_model", ",", "_tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "fp_input", ",", "_tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "fp_debug", ":", "conversion_filename", "=", "fp_conversion", ".", "name", "input_filename", "=", "fp_input", ".", "name", "model_filename", "=", "fp_model", ".", "name", "debug_filename", "=", "fp_debug", ".", "name", "fp_model", ".", "write", "(", "model_flags_str", ")", "fp_conversion", ".", "write", "(", "conversion_flags_str", ")", "fp_input", ".", "write", "(", "six", ".", "ensure_binary", "(", "input_data_str", ")", ")", "debug_info_str", "=", "debug_info_str", "if", "debug_info_str", "else", "\"\"", "# if debug_info_str contains a \"string value\", then the call to", "# fp_debug.write(debug_info_str) will fail with the following error", "#", "# TypeError: a bytes-like object is required, not 'str'", "#", "# Some of the subtests within the \"convert_test\" unit-test fail", "# with the error shown above. So watch out for that scenario and", "# convert debug_info_str to bytes where needed", "if", "not", "isinstance", "(", "debug_info_str", ",", "bytes", ")", ":", "fp_debug", ".", "write", "(", "debug_info_str", ".", "encode", "(", "\"utf-8\"", ")", ")", "else", ":", "fp_debug", ".", "write", "(", "debug_info_str", ")", "# Reserve an output file", "with", "_tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "as", "fp", ":", "output_filename", "=", "fp", ".", "name", "# Run", "cmd", "=", "[", "_deprecated_conversion_binary", ",", "model_filename", ",", "conversion_filename", ",", "input_filename", ",", "output_filename", ",", "\"--debug_proto_file={}\"", ".", "format", "(", "debug_filename", ")", ",", "]", "cmdline", "=", "\" \"", ".", "join", "(", "cmd", ")", "is_windows", "=", "_platform", ".", "system", "(", ")", "==", "\"Windows\"", "proc", "=", "_subprocess", ".", "Popen", "(", "cmdline", ",", "shell", "=", "True", ",", "stdout", "=", "_subprocess", ".", "PIPE", ",", "stderr", "=", "_subprocess", ".", "STDOUT", ",", "close_fds", "=", "not", "is_windows", ")", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", ")", "exitcode", "=", "proc", ".", "returncode", "if", "exitcode", "==", "0", ":", "with", "open", "(", "output_filename", ",", "\"rb\"", ")", "as", "fp", ":", "return", "fp", ".", "read", "(", ")", "else", ":", "stdout", "=", "_try_convert_to_unicode", "(", "stdout", ")", "stderr", "=", "_try_convert_to_unicode", "(", "stderr", ")", "raise", "ConverterError", "(", "\"See console for info.\\n%s\\n%s\\n\"", "%", "(", "stdout", ",", "stderr", ")", ")", "finally", ":", "# Must manually cleanup files.", "for", "filename", "in", "[", "conversion_filename", ",", "input_filename", ",", "model_filename", ",", "output_filename", "]", ":", "try", ":", "_os", ".", "unlink", "(", "filename", ")", "except", "(", "OSError", ",", "TypeError", ")", ":", "pass" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/lite/python/convert.py#L314-L418
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_controls.py
python
SpinButton.SetValue
(*args, **kwargs)
return _controls_.SpinButton_SetValue(*args, **kwargs)
SetValue(self, int val)
SetValue(self, int val)
[ "SetValue", "(", "self", "int", "val", ")" ]
def SetValue(*args, **kwargs): """SetValue(self, int val)""" return _controls_.SpinButton_SetValue(*args, **kwargs)
[ "def", "SetValue", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "SpinButton_SetValue", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L2266-L2268
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/smtplib.py
python
SMTP.rcpt
(self, recip, options=[])
return self.getreply()
SMTP 'rcpt' command -- indicates 1 recipient for this mail.
SMTP 'rcpt' command -- indicates 1 recipient for this mail.
[ "SMTP", "rcpt", "command", "--", "indicates", "1", "recipient", "for", "this", "mail", "." ]
def rcpt(self, recip, options=[]): """SMTP 'rcpt' command -- indicates 1 recipient for this mail.""" optionlist = '' if options and self.does_esmtp: optionlist = ' ' + ' '.join(options) self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist)) return self.getreply()
[ "def", "rcpt", "(", "self", ",", "recip", ",", "options", "=", "[", "]", ")", ":", "optionlist", "=", "''", "if", "options", "and", "self", ".", "does_esmtp", ":", "optionlist", "=", "' '", "+", "' '", ".", "join", "(", "options", ")", "self", ".", "putcmd", "(", "\"rcpt\"", ",", "\"TO:%s%s\"", "%", "(", "quoteaddr", "(", "recip", ")", ",", "optionlist", ")", ")", "return", "self", ".", "getreply", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/smtplib.py#L475-L481
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
PaletteChangedEvent.SetChangedWindow
(*args, **kwargs)
return _core_.PaletteChangedEvent_SetChangedWindow(*args, **kwargs)
SetChangedWindow(self, Window win)
SetChangedWindow(self, Window win)
[ "SetChangedWindow", "(", "self", "Window", "win", ")" ]
def SetChangedWindow(*args, **kwargs): """SetChangedWindow(self, Window win)""" return _core_.PaletteChangedEvent_SetChangedWindow(*args, **kwargs)
[ "def", "SetChangedWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "PaletteChangedEvent_SetChangedWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L7170-L7172
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/client/timeline.py
python
_ChromeTraceFormatter.emit_tid
(self, name, pid, tid)
Adds a thread metadata event to the trace. Args: name: The thread name as a string. pid: Identifier of the process as an integer. tid: Identifier of the thread as an integer.
Adds a thread metadata event to the trace.
[ "Adds", "a", "thread", "metadata", "event", "to", "the", "trace", "." ]
def emit_tid(self, name, pid, tid): """Adds a thread metadata event to the trace. Args: name: The thread name as a string. pid: Identifier of the process as an integer. tid: Identifier of the thread as an integer. """ event = {} event['name'] = 'thread_name' event['ph'] = 'M' event['pid'] = pid event['tid'] = tid event['args'] = {'name': name} self._metadata.append(event)
[ "def", "emit_tid", "(", "self", ",", "name", ",", "pid", ",", "tid", ")", ":", "event", "=", "{", "}", "event", "[", "'name'", "]", "=", "'thread_name'", "event", "[", "'ph'", "]", "=", "'M'", "event", "[", "'pid'", "]", "=", "pid", "event", "[", "'tid'", "]", "=", "tid", "event", "[", "'args'", "]", "=", "{", "'name'", ":", "name", "}", "self", ".", "_metadata", ".", "append", "(", "event", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/client/timeline.py#L104-L118
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
clang/bindings/python/clang/cindex.py
python
Cursor.semantic_parent
(self)
return self._semantic_parent
Return the semantic parent for this cursor.
Return the semantic parent for this cursor.
[ "Return", "the", "semantic", "parent", "for", "this", "cursor", "." ]
def semantic_parent(self): """Return the semantic parent for this cursor.""" if not hasattr(self, '_semantic_parent'): self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self) return self._semantic_parent
[ "def", "semantic_parent", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_semantic_parent'", ")", ":", "self", ".", "_semantic_parent", "=", "conf", ".", "lib", ".", "clang_getCursorSemanticParent", "(", "self", ")", "return", "self", ".", "_semantic_parent" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/clang/bindings/python/clang/cindex.py#L1757-L1762
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
samples/ide/activegrid/tool/AbstractEditor.py
python
CanvasView.OnRightClick
(self, event)
return (shape, model)
force selection underneath right click position.
force selection underneath right click position.
[ "force", "selection", "underneath", "right", "click", "position", "." ]
def OnRightClick(self, event): """ force selection underneath right click position. """ self.Activate() self._canvas.SetFocus() dc = wx.ClientDC(self._canvas) self._canvas.PrepareDC(dc) x, y = event.GetLogicalPosition(dc) # this takes into account scrollbar offset self.SetLastRightClick(x, y) shape = self._canvas.FindShape(x, y)[0] model = None if not shape: self.SetSelection(None) self.SetPropertyShape(None) elif hasattr(shape, "GetModel"): self.BringToFront(shape) self.SetPropertyShape(shape) self.SetSelection(shape) shape.Select(True, dc) model = shape.GetModel() elif shape.GetParent() and isinstance(shape.GetParent(), ogl.CompositeShape): # ComplexTypeHeader for ComplexTypeShape self.BringToFront(shape) self.SetPropertyShape(shape.GetParent()) self.SetSelection(shape.GetParent()) shape.GetParent().Select(True, dc) model = shape.GetParent().GetModel() self.SetPropertyModel(model) return (shape, model)
[ "def", "OnRightClick", "(", "self", ",", "event", ")", ":", "self", ".", "Activate", "(", ")", "self", ".", "_canvas", ".", "SetFocus", "(", ")", "dc", "=", "wx", ".", "ClientDC", "(", "self", ".", "_canvas", ")", "self", ".", "_canvas", ".", "PrepareDC", "(", "dc", ")", "x", ",", "y", "=", "event", ".", "GetLogicalPosition", "(", "dc", ")", "# this takes into account scrollbar offset", "self", ".", "SetLastRightClick", "(", "x", ",", "y", ")", "shape", "=", "self", ".", "_canvas", ".", "FindShape", "(", "x", ",", "y", ")", "[", "0", "]", "model", "=", "None", "if", "not", "shape", ":", "self", ".", "SetSelection", "(", "None", ")", "self", ".", "SetPropertyShape", "(", "None", ")", "elif", "hasattr", "(", "shape", ",", "\"GetModel\"", ")", ":", "self", ".", "BringToFront", "(", "shape", ")", "self", ".", "SetPropertyShape", "(", "shape", ")", "self", ".", "SetSelection", "(", "shape", ")", "shape", ".", "Select", "(", "True", ",", "dc", ")", "model", "=", "shape", ".", "GetModel", "(", ")", "elif", "shape", ".", "GetParent", "(", ")", "and", "isinstance", "(", "shape", ".", "GetParent", "(", ")", ",", "ogl", ".", "CompositeShape", ")", ":", "# ComplexTypeHeader for ComplexTypeShape", "self", ".", "BringToFront", "(", "shape", ")", "self", ".", "SetPropertyShape", "(", "shape", ".", "GetParent", "(", ")", ")", "self", ".", "SetSelection", "(", "shape", ".", "GetParent", "(", ")", ")", "shape", ".", "GetParent", "(", ")", ".", "Select", "(", "True", ",", "dc", ")", "model", "=", "shape", ".", "GetParent", "(", ")", ".", "GetModel", "(", ")", "self", ".", "SetPropertyModel", "(", "model", ")", "return", "(", "shape", ",", "model", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/ide/activegrid/tool/AbstractEditor.py#L218-L248
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/python/ops/math_grad.py
python
_SparseSegmentSumGrad
(op, grad)
return (math_ops.unsorted_segment_sum( array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None, None)
Gradient for SparseSegmentSum.
Gradient for SparseSegmentSum.
[ "Gradient", "for", "SparseSegmentSum", "." ]
def _SparseSegmentSumGrad(op, grad): """Gradient for SparseSegmentSum.""" input_rows = array_ops.shape(op.inputs[0])[0] return (math_ops.unsorted_segment_sum( array_ops.gather(grad, op.inputs[2]), op.inputs[1], input_rows), None, None)
[ "def", "_SparseSegmentSumGrad", "(", "op", ",", "grad", ")", ":", "input_rows", "=", "array_ops", ".", "shape", "(", "op", ".", "inputs", "[", "0", "]", ")", "[", "0", "]", "return", "(", "math_ops", ".", "unsorted_segment_sum", "(", "array_ops", ".", "gather", "(", "grad", ",", "op", ".", "inputs", "[", "2", "]", ")", ",", "op", ".", "inputs", "[", "1", "]", ",", "input_rows", ")", ",", "None", ",", "None", ")" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/math_grad.py#L169-L174
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/math_ops.py
python
trace
(x, name=None)
Compute the trace of a tensor `x`. `trace(x)` returns the sum along the main diagonal of each inner-most matrix in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])` For example: ```python x = tf.constant([[1, 2], [3, 4]]) tf.linalg.trace(x) # 5 x = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) tf.linalg.trace(x) # 15 x = tf.constant([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]]]) tf.linalg.trace(x) # [15, -15] ``` Args: x: tensor. name: A name for the operation (optional). Returns: The trace of input tensor.
Compute the trace of a tensor `x`.
[ "Compute", "the", "trace", "of", "a", "tensor", "x", "." ]
def trace(x, name=None): """Compute the trace of a tensor `x`. `trace(x)` returns the sum along the main diagonal of each inner-most matrix in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where `output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])` For example: ```python x = tf.constant([[1, 2], [3, 4]]) tf.linalg.trace(x) # 5 x = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) tf.linalg.trace(x) # 15 x = tf.constant([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]]]) tf.linalg.trace(x) # [15, -15] ``` Args: x: tensor. name: A name for the operation (optional). Returns: The trace of input tensor. """ with ops.name_scope(name, "Trace", [x]) as name: x = ops.convert_to_tensor(x, name="x") return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
[ "def", "trace", "(", "x", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "name_scope", "(", "name", ",", "\"Trace\"", ",", "[", "x", "]", ")", "as", "name", ":", "x", "=", "ops", ".", "convert_to_tensor", "(", "x", ",", "name", "=", "\"x\"", ")", "return", "reduce_sum", "(", "array_ops", ".", "matrix_diag_part", "(", "x", ")", ",", "[", "-", "1", "]", ",", "name", "=", "name", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/math_ops.py#L2524-L2562
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/propgrid.py
python
PropertyGridInterface.DoDefaultTypeMappings
(self)
Map built-in properties.
Map built-in properties.
[ "Map", "built", "-", "in", "properties", "." ]
def DoDefaultTypeMappings(self): "Map built-in properties." global _type2property try: mappings = _type2property return except NameError: mappings = {} _type2property = mappings mappings[str] = StringProperty mappings[unicode] = StringProperty mappings[int] = IntProperty mappings[float] = FloatProperty mappings[bool] = BoolProperty mappings[list] = ArrayStringProperty mappings[tuple] = ArrayStringProperty mappings[wx.Font] = FontProperty mappings[wx.Colour] = ColourProperty "mappings[wx.Size] = SizeProperty" "mappings[wx.Point] = PointProperty" "mappings[wx.FontData] = FontDataProperty"
[ "def", "DoDefaultTypeMappings", "(", "self", ")", ":", "global", "_type2property", "try", ":", "mappings", "=", "_type2property", "return", "except", "NameError", ":", "mappings", "=", "{", "}", "_type2property", "=", "mappings", "mappings", "[", "str", "]", "=", "StringProperty", "mappings", "[", "unicode", "]", "=", "StringProperty", "mappings", "[", "int", "]", "=", "IntProperty", "mappings", "[", "float", "]", "=", "FloatProperty", "mappings", "[", "bool", "]", "=", "BoolProperty", "mappings", "[", "list", "]", "=", "ArrayStringProperty", "mappings", "[", "tuple", "]", "=", "ArrayStringProperty", "mappings", "[", "wx", ".", "Font", "]", "=", "FontProperty", "mappings", "[", "wx", ".", "Colour", "]", "=", "ColourProperty", "\"mappings[wx.Size] = SizeProperty\"", "\"mappings[wx.Point] = PointProperty\"", "\"mappings[wx.FontData] = FontDataProperty\"" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/propgrid.py#L1495-L1517
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/selectors.py
python
BaseSelector.register
(self, fileobj, events, data=None)
Register a file object. Parameters: fileobj -- file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: ValueError if events is invalid KeyError if fileobj is already registered OSError if fileobj is closed or otherwise is unacceptable to the underlying system call (if a system call is made) Note: OSError may or may not be raised
Register a file object.
[ "Register", "a", "file", "object", "." ]
def register(self, fileobj, events, data=None): """Register a file object. Parameters: fileobj -- file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: ValueError if events is invalid KeyError if fileobj is already registered OSError if fileobj is closed or otherwise is unacceptable to the underlying system call (if a system call is made) Note: OSError may or may not be raised """ raise NotImplementedError
[ "def", "register", "(", "self", ",", "fileobj", ",", "events", ",", "data", "=", "None", ")", ":", "raise", "NotImplementedError" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/selectors.py#L96-L116
FEniCS/dolfinx
3dfdf038cccdb70962865b58a63bf29c2e55ec6e
utils/pylit/pylit.py
python
Text2Code.documentation_handler
(self, lines)
Convert documentation blocks from text to code format
Convert documentation blocks from text to code format
[ "Convert", "documentation", "blocks", "from", "text", "to", "code", "format" ]
def documentation_handler(self, lines): """Convert documentation blocks from text to code format """ for line in lines: # test lines following the code-block marker for false positives if (self.state == "code_block" and line.rstrip() and not self.directive_option_regexp.search(line)): self.state = "documentation" # test for end of documentation block if self.marker_regexp.search(line): self.state = "code_block" self._textindent = self.get_indent(line) # yield lines if self.strip: continue # do not comment blank lines preceding a code block if self.state == "code_block" and not line.rstrip(): yield line else: yield self.comment_string + line
[ "def", "documentation_handler", "(", "self", ",", "lines", ")", ":", "for", "line", "in", "lines", ":", "# test lines following the code-block marker for false positives", "if", "(", "self", ".", "state", "==", "\"code_block\"", "and", "line", ".", "rstrip", "(", ")", "and", "not", "self", ".", "directive_option_regexp", ".", "search", "(", "line", ")", ")", ":", "self", ".", "state", "=", "\"documentation\"", "# test for end of documentation block", "if", "self", ".", "marker_regexp", ".", "search", "(", "line", ")", ":", "self", ".", "state", "=", "\"code_block\"", "self", ".", "_textindent", "=", "self", ".", "get_indent", "(", "line", ")", "# yield lines", "if", "self", ".", "strip", ":", "continue", "# do not comment blank lines preceding a code block", "if", "self", ".", "state", "==", "\"code_block\"", "and", "not", "line", ".", "rstrip", "(", ")", ":", "yield", "line", "else", ":", "yield", "self", ".", "comment_string", "+", "line" ]
https://github.com/FEniCS/dolfinx/blob/3dfdf038cccdb70962865b58a63bf29c2e55ec6e/utils/pylit/pylit.py#L801-L820
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/resources.py
python
finder
(package)
return result
Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package.
Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package.
[ "Return", "a", "resource", "finder", "for", "a", "package", ".", ":", "param", "package", ":", "The", "name", "of", "the", "package", ".", ":", "return", ":", "A", ":", "class", ":", "ResourceFinder", "instance", "for", "the", "package", "." ]
def finder(package): """ Return a resource finder for a package. :param package: The name of the package. :return: A :class:`ResourceFinder` instance for the package. """ if package in _finder_cache: result = _finder_cache[package] else: if package not in sys.modules: __import__(package) module = sys.modules[package] path = getattr(module, '__path__', None) if path is None: raise DistlibException('You cannot get a finder for a module, ' 'only for a package') loader = getattr(module, '__loader__', None) finder_maker = _finder_registry.get(type(loader)) if finder_maker is None: raise DistlibException('Unable to locate finder for %r' % package) result = finder_maker(module) _finder_cache[package] = result return result
[ "def", "finder", "(", "package", ")", ":", "if", "package", "in", "_finder_cache", ":", "result", "=", "_finder_cache", "[", "package", "]", "else", ":", "if", "package", "not", "in", "sys", ".", "modules", ":", "__import__", "(", "package", ")", "module", "=", "sys", ".", "modules", "[", "package", "]", "path", "=", "getattr", "(", "module", ",", "'__path__'", ",", "None", ")", "if", "path", "is", "None", ":", "raise", "DistlibException", "(", "'You cannot get a finder for a module, '", "'only for a package'", ")", "loader", "=", "getattr", "(", "module", ",", "'__loader__'", ",", "None", ")", "finder_maker", "=", "_finder_registry", ".", "get", "(", "type", "(", "loader", ")", ")", "if", "finder_maker", "is", "None", ":", "raise", "DistlibException", "(", "'Unable to locate finder for %r'", "%", "package", ")", "result", "=", "finder_maker", "(", "module", ")", "_finder_cache", "[", "package", "]", "=", "result", "return", "result" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/resources.py#L282-L304
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py
python
indentedBlock
(blockStatementExpr, indentStack, indent=True)
return smExpr.setName('indented block')
Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
[]
def indentedBlock(blockStatementExpr, indentStack, indent=True): """ Helper method for defining space-delimited indentation blocks, such as those used to define block statements in Python source code. Parameters: - blockStatementExpr - expression defining syntax of statement that is repeated within the indented block - indentStack - list created by caller to manage indentation stack (multiple statementWithIndentedBlock expressions within a single grammar should share a common indentStack) - indent - boolean indicating whether block must be indented beyond the the current level; set to False for block of left-most statements (default=C{True}) A valid block must contain at least one C{blockStatement}. Example:: data = ''' def A(z): A1 B = 100 G = A2 A2 A3 B def BB(a,b,c): BB1 def BBA(): bba1 bba2 bba3 C D def spam(x,y): def eggs(z): pass ''' indentStack = [1] stmt = Forward() identifier = Word(alphas, alphanums) funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") func_body = indentedBlock(stmt, indentStack) funcDef = Group( funcDecl + func_body ) rvalue = Forward() funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") rvalue << (funcCall | identifier | Word(nums)) assignment = Group(identifier + "=" + rvalue) stmt << ( funcDef | assignment | identifier ) module_body = OneOrMore(stmt) parseTree = module_body.parseString(data) parseTree.pprint() prints:: [['def', 'A', ['(', 'z', ')'], ':', [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], 'B', ['def', 'BB', ['(', 'a', 'b', 'c', ')'], ':', [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], 'C', 'D', ['def', 'spam', ['(', 'x', 'y', ')'], ':', [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] """ def checkPeerIndent(s,l,t): if l >= len(s): return curCol = col(l,s) if curCol != indentStack[-1]: if curCol > indentStack[-1]: raise ParseFatalException(s,l,"illegal nesting") raise ParseException(s,l,"not a peer entry") def checkSubIndent(s,l,t): curCol = col(l,s) if curCol > indentStack[-1]: indentStack.append( curCol ) else: raise ParseException(s,l,"not a subentry") def checkUnindent(s,l,t): if l >= len(s): return curCol = col(l,s) if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): raise ParseException(s,l,"not an unindent") indentStack.pop() NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') PEER = Empty().setParseAction(checkPeerIndent).setName('') UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') if indent: smExpr = Group( Optional(NL) + #~ FollowedBy(blockStatementExpr) + INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) else: smExpr = Group( Optional(NL) + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) blockStatementExpr.ignore(_bslash + LineEnd()) return smExpr.setName('indented block')
[ "def", "indentedBlock", "(", "blockStatementExpr", ",", "indentStack", ",", "indent", "=", "True", ")", ":", "def", "checkPeerIndent", "(", "s", ",", "l", ",", "t", ")", ":", "if", "l", ">=", "len", "(", "s", ")", ":", "return", "curCol", "=", "col", "(", "l", ",", "s", ")", "if", "curCol", "!=", "indentStack", "[", "-", "1", "]", ":", "if", "curCol", ">", "indentStack", "[", "-", "1", "]", ":", "raise", "ParseFatalException", "(", "s", ",", "l", ",", "\"illegal nesting\"", ")", "raise", "ParseException", "(", "s", ",", "l", ",", "\"not a peer entry\"", ")", "def", "checkSubIndent", "(", "s", ",", "l", ",", "t", ")", ":", "curCol", "=", "col", "(", "l", ",", "s", ")", "if", "curCol", ">", "indentStack", "[", "-", "1", "]", ":", "indentStack", ".", "append", "(", "curCol", ")", "else", ":", "raise", "ParseException", "(", "s", ",", "l", ",", "\"not a subentry\"", ")", "def", "checkUnindent", "(", "s", ",", "l", ",", "t", ")", ":", "if", "l", ">=", "len", "(", "s", ")", ":", "return", "curCol", "=", "col", "(", "l", ",", "s", ")", "if", "not", "(", "indentStack", "and", "curCol", "<", "indentStack", "[", "-", "1", "]", "and", "curCol", "<=", "indentStack", "[", "-", "2", "]", ")", ":", "raise", "ParseException", "(", "s", ",", "l", ",", "\"not an unindent\"", ")", "indentStack", ".", "pop", "(", ")", "NL", "=", "OneOrMore", "(", "LineEnd", "(", ")", ".", "setWhitespaceChars", "(", "\"\\t \"", ")", ".", "suppress", "(", ")", ")", "INDENT", "=", "(", "Empty", "(", ")", "+", "Empty", "(", ")", ".", "setParseAction", "(", "checkSubIndent", ")", ")", ".", "setName", "(", "'INDENT'", ")", "PEER", "=", "Empty", "(", ")", ".", "setParseAction", "(", "checkPeerIndent", ")", ".", "setName", "(", "''", ")", "UNDENT", "=", "Empty", "(", ")", ".", "setParseAction", "(", "checkUnindent", ")", ".", "setName", "(", "'UNINDENT'", ")", "if", "indent", ":", "smExpr", "=", "Group", "(", "Optional", "(", "NL", ")", "+", "#~ FollowedBy(blockStatementExpr) +", "INDENT", "+", "(", "OneOrMore", "(", "PEER", "+", "Group", "(", "blockStatementExpr", ")", "+", "Optional", "(", "NL", ")", ")", ")", "+", "UNDENT", ")", "else", ":", "smExpr", "=", "Group", "(", "Optional", "(", "NL", ")", "+", "(", "OneOrMore", "(", "PEER", "+", "Group", "(", "blockStatementExpr", ")", "+", "Optional", "(", "NL", ")", ")", ")", ")", "blockStatementExpr", ".", "ignore", "(", "_bslash", "+", "LineEnd", "(", ")", ")", "return", "smExpr", ".", "setName", "(", "'indented block'", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py#L10493-L10717
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/distributed/pipeline/sync/skip/tracker.py
python
SkipTrackerThroughPotals.copy
( self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str, )
Copies the skip tensor in the corresponding portal. The given micro-batch and the portal will be tied with :class:`Fork` and :class:`Join`.
Copies the skip tensor in the corresponding portal. The given micro-batch and the portal will be tied with :class:`Fork` and :class:`Join`.
[ "Copies", "the", "skip", "tensor", "in", "the", "corresponding", "portal", ".", "The", "given", "micro", "-", "batch", "and", "the", "portal", "will", "be", "tied", "with", ":", "class", ":", "Fork", "and", ":", "class", ":", "Join", "." ]
def copy( self, batch: Batch, prev_stream: AbstractStream, next_stream: AbstractStream, ns: Namespace, name: str, ) -> None: """Copies the skip tensor in the corresponding portal. The given micro-batch and the portal will be tied with :class:`Fork` and :class:`Join`. """ assert self.skip_layout.requires_copy(ns, name) tensor_idx = batch.find_tensor_idx() batch[tensor_idx], phony = fork(batch[tensor_idx]) portal = self.portals[(ns, name)] phony = portal.copy(prev_stream, next_stream, phony) batch[tensor_idx] = join(batch[tensor_idx], phony)
[ "def", "copy", "(", "self", ",", "batch", ":", "Batch", ",", "prev_stream", ":", "AbstractStream", ",", "next_stream", ":", "AbstractStream", ",", "ns", ":", "Namespace", ",", "name", ":", "str", ",", ")", "->", "None", ":", "assert", "self", ".", "skip_layout", ".", "requires_copy", "(", "ns", ",", "name", ")", "tensor_idx", "=", "batch", ".", "find_tensor_idx", "(", ")", "batch", "[", "tensor_idx", "]", ",", "phony", "=", "fork", "(", "batch", "[", "tensor_idx", "]", ")", "portal", "=", "self", ".", "portals", "[", "(", "ns", ",", "name", ")", "]", "phony", "=", "portal", ".", "copy", "(", "prev_stream", ",", "next_stream", ",", "phony", ")", "batch", "[", "tensor_idx", "]", "=", "join", "(", "batch", "[", "tensor_idx", "]", ",", "phony", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/distributed/pipeline/sync/skip/tracker.py#L127-L142
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/util.py
python
convert_path
(pathname)
return os.path.join(*paths)
Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash.
Return 'pathname' as a name that will work on the native filesystem.
[ "Return", "pathname", "as", "a", "name", "that", "will", "work", "on", "the", "native", "filesystem", "." ]
def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash. """ if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths)
[ "def", "convert_path", "(", "pathname", ")", ":", "if", "os", ".", "sep", "==", "'/'", ":", "return", "pathname", "if", "not", "pathname", ":", "return", "pathname", "if", "pathname", "[", "0", "]", "==", "'/'", ":", "raise", "ValueError", "(", "\"path '%s' cannot be absolute\"", "%", "pathname", ")", "if", "pathname", "[", "-", "1", "]", "==", "'/'", ":", "raise", "ValueError", "(", "\"path '%s' cannot end with '/'\"", "%", "pathname", ")", "paths", "=", "pathname", ".", "split", "(", "'/'", ")", "while", "os", ".", "curdir", "in", "paths", ":", "paths", ".", "remove", "(", "os", ".", "curdir", ")", "if", "not", "paths", ":", "return", "os", ".", "curdir", "return", "os", ".", "path", ".", "join", "(", "*", "paths", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/util.py#L213-L237
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/richtext.py
python
RichTextCtrl.BeginCharacterStyle
(*args, **kwargs)
return _richtext.RichTextCtrl_BeginCharacterStyle(*args, **kwargs)
BeginCharacterStyle(self, String characterStyle) -> bool Begin named character style
BeginCharacterStyle(self, String characterStyle) -> bool
[ "BeginCharacterStyle", "(", "self", "String", "characterStyle", ")", "-", ">", "bool" ]
def BeginCharacterStyle(*args, **kwargs): """ BeginCharacterStyle(self, String characterStyle) -> bool Begin named character style """ return _richtext.RichTextCtrl_BeginCharacterStyle(*args, **kwargs)
[ "def", "BeginCharacterStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextCtrl_BeginCharacterStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/richtext.py#L3561-L3567
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/py/editwindow.py
python
EditWindow.CanEdit
(self)
return not self.GetReadOnly()
Return True if editing should succeed.
Return True if editing should succeed.
[ "Return", "True", "if", "editing", "should", "succeed", "." ]
def CanEdit(self): """Return True if editing should succeed.""" return not self.GetReadOnly()
[ "def", "CanEdit", "(", "self", ")", ":", "return", "not", "self", ".", "GetReadOnly", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/py/editwindow.py#L232-L234
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py
python
relaxNgSchema.relaxNGNewValidCtxt
(self)
return __tmp
Create an XML RelaxNGs validation context based on the given schema
Create an XML RelaxNGs validation context based on the given schema
[ "Create", "an", "XML", "RelaxNGs", "validation", "context", "based", "on", "the", "given", "schema" ]
def relaxNGNewValidCtxt(self): """Create an XML RelaxNGs validation context based on the given schema """ ret = libxml2mod.xmlRelaxNGNewValidCtxt(self._o) if ret is None:raise treeError('xmlRelaxNGNewValidCtxt() failed') __tmp = relaxNgValidCtxt(_obj=ret) __tmp.schema = self return __tmp
[ "def", "relaxNGNewValidCtxt", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlRelaxNGNewValidCtxt", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlRelaxNGNewValidCtxt() failed'", ")", "__tmp", "=", "relaxNgValidCtxt", "(", "_obj", "=", "ret", ")", "__tmp", ".", "schema", "=", "self", "return", "__tmp" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L6264-L6271
ONLYOFFICE/core
1f976ae79a2593fc22ee78e9fdbb76090e83785c
DesktopEditor/freetype_names/freetype-2.5.3/src/tools/docmaker/content.py
python
ContentProcessor.__init__
( self )
initialize a block content processor
initialize a block content processor
[ "initialize", "a", "block", "content", "processor" ]
def __init__( self ): """initialize a block content processor""" self.reset() self.sections = {} # dictionary of documentation sections self.section = None # current documentation section self.chapters = [] # list of chapters self.headers = {}
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "reset", "(", ")", "self", ".", "sections", "=", "{", "}", "# dictionary of documentation sections", "self", ".", "section", "=", "None", "# current documentation section", "self", ".", "chapters", "=", "[", "]", "# list of chapters", "self", ".", "headers", "=", "{", "}" ]
https://github.com/ONLYOFFICE/core/blob/1f976ae79a2593fc22ee78e9fdbb76090e83785c/DesktopEditor/freetype_names/freetype-2.5.3/src/tools/docmaker/content.py#L333-L342
baoboa/pyqt5
11d5f43bc6f213d9d60272f3954a0048569cfc7c
configure.py
python
TargetConfiguration.get_win32_debug_suffix
(self)
return '_d' if self.py_debug else ''
Return the debug-dependent suffix appended to the name of Windows libraries.
Return the debug-dependent suffix appended to the name of Windows libraries.
[ "Return", "the", "debug", "-", "dependent", "suffix", "appended", "to", "the", "name", "of", "Windows", "libraries", "." ]
def get_win32_debug_suffix(self): """ Return the debug-dependent suffix appended to the name of Windows libraries. """ return '_d' if self.py_debug else ''
[ "def", "get_win32_debug_suffix", "(", "self", ")", ":", "return", "'_d'", "if", "self", ".", "py_debug", "else", "''" ]
https://github.com/baoboa/pyqt5/blob/11d5f43bc6f213d9d60272f3954a0048569cfc7c/configure.py#L728-L733
etotheipi/BitcoinArmory
2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98
armoryengine/PyBtcAddress.py
python
calcWalletIDFromRoot
(root, chain)
return binary_to_base58((ADDRBYTE + first.getAddr160()[:5])[::-1])
Helper method for computing a wallet ID
Helper method for computing a wallet ID
[ "Helper", "method", "for", "computing", "a", "wallet", "ID" ]
def calcWalletIDFromRoot(root, chain): """ Helper method for computing a wallet ID """ root = PyBtcAddress().createFromPlainKeyData(SecureBinaryData(root)) root.chaincode = SecureBinaryData(chain) first = root.extendAddressChain() return binary_to_base58((ADDRBYTE + first.getAddr160()[:5])[::-1])
[ "def", "calcWalletIDFromRoot", "(", "root", ",", "chain", ")", ":", "root", "=", "PyBtcAddress", "(", ")", ".", "createFromPlainKeyData", "(", "SecureBinaryData", "(", "root", ")", ")", "root", ".", "chaincode", "=", "SecureBinaryData", "(", "chain", ")", "first", "=", "root", ".", "extendAddressChain", "(", ")", "return", "binary_to_base58", "(", "(", "ADDRBYTE", "+", "first", ".", "getAddr160", "(", ")", "[", ":", "5", "]", ")", "[", ":", ":", "-", "1", "]", ")" ]
https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/armoryengine/PyBtcAddress.py#L24-L29
wyrover/book-code
7f4883d9030d553bc6bcfa3da685e34789839900
3rdparty/protobuf/python/google/protobuf/internal/encoder.py
python
_FixedSizer
(value_size)
return SpecificSizer
Like _SimpleSizer except for a fixed-size field. The input is the size of one value.
Like _SimpleSizer except for a fixed-size field. The input is the size of one value.
[ "Like", "_SimpleSizer", "except", "for", "a", "fixed", "-", "size", "field", ".", "The", "input", "is", "the", "size", "of", "one", "value", "." ]
def _FixedSizer(value_size): """Like _SimpleSizer except for a fixed-size field. The input is the size of one value.""" def SpecificSizer(field_number, is_repeated, is_packed): tag_size = _TagSize(field_number) if is_packed: local_VarintSize = _VarintSize def PackedFieldSize(value): result = len(value) * value_size return result + local_VarintSize(result) + tag_size return PackedFieldSize elif is_repeated: element_size = value_size + tag_size def RepeatedFieldSize(value): return len(value) * element_size return RepeatedFieldSize else: field_size = value_size + tag_size def FieldSize(value): return field_size return FieldSize return SpecificSizer
[ "def", "_FixedSizer", "(", "value_size", ")", ":", "def", "SpecificSizer", "(", "field_number", ",", "is_repeated", ",", "is_packed", ")", ":", "tag_size", "=", "_TagSize", "(", "field_number", ")", "if", "is_packed", ":", "local_VarintSize", "=", "_VarintSize", "def", "PackedFieldSize", "(", "value", ")", ":", "result", "=", "len", "(", "value", ")", "*", "value_size", "return", "result", "+", "local_VarintSize", "(", "result", ")", "+", "tag_size", "return", "PackedFieldSize", "elif", "is_repeated", ":", "element_size", "=", "value_size", "+", "tag_size", "def", "RepeatedFieldSize", "(", "value", ")", ":", "return", "len", "(", "value", ")", "*", "element_size", "return", "RepeatedFieldSize", "else", ":", "field_size", "=", "value_size", "+", "tag_size", "def", "FieldSize", "(", "value", ")", ":", "return", "field_size", "return", "FieldSize", "return", "SpecificSizer" ]
https://github.com/wyrover/book-code/blob/7f4883d9030d553bc6bcfa3da685e34789839900/3rdparty/protobuf/python/google/protobuf/internal/encoder.py#L184-L207
learnforpractice/pyeos
4f04eb982c86c1fdb413084af77c713a6fda3070
libraries/vm/vm_cpython_ss/lib/codecs.py
python
StreamWriter.reset
(self)
Flushes and resets the codec buffers used for keeping state. Calling this method should ensure that the data on the output is put into a clean state, that allows appending of new fresh data without having to rescan the whole stream to recover state.
Flushes and resets the codec buffers used for keeping state.
[ "Flushes", "and", "resets", "the", "codec", "buffers", "used", "for", "keeping", "state", "." ]
def reset(self): """ Flushes and resets the codec buffers used for keeping state. Calling this method should ensure that the data on the output is put into a clean state, that allows appending of new fresh data without having to rescan the whole stream to recover state. """ pass
[ "def", "reset", "(", "self", ")", ":", "pass" ]
https://github.com/learnforpractice/pyeos/blob/4f04eb982c86c1fdb413084af77c713a6fda3070/libraries/vm/vm_cpython_ss/lib/codecs.py#L386-L396
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py
python
Text.scan_mark
(self, x, y)
Remember the current X, Y coordinates.
Remember the current X, Y coordinates.
[ "Remember", "the", "current", "X", "Y", "coordinates", "." ]
def scan_mark(self, x, y): """Remember the current X, Y coordinates.""" self.tk.call(self._w, 'scan', 'mark', x, y)
[ "def", "scan_mark", "(", "self", ",", "x", ",", "y", ")", ":", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'scan'", ",", "'mark'", ",", "x", ",", "y", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/tkinter/__init__.py#L3312-L3314
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/runpy.py
python
_run_module_code
(code, init_globals=None, mod_name=None, mod_fname=None, mod_loader=None, pkg_name=None)
return mod_globals.copy()
Helper to run code in new namespace with sys modified
Helper to run code in new namespace with sys modified
[ "Helper", "to", "run", "code", "in", "new", "namespace", "with", "sys", "modified" ]
def _run_module_code(code, init_globals=None, mod_name=None, mod_fname=None, mod_loader=None, pkg_name=None): """Helper to run code in new namespace with sys modified""" with _TempModule(mod_name) as temp_module, _ModifiedArgv0(mod_fname): mod_globals = temp_module.module.__dict__ _run_code(code, mod_globals, init_globals, mod_name, mod_fname, mod_loader, pkg_name) # Copy the globals of the temporary module, as they # may be cleared when the temporary module goes away return mod_globals.copy()
[ "def", "_run_module_code", "(", "code", ",", "init_globals", "=", "None", ",", "mod_name", "=", "None", ",", "mod_fname", "=", "None", ",", "mod_loader", "=", "None", ",", "pkg_name", "=", "None", ")", ":", "with", "_TempModule", "(", "mod_name", ")", "as", "temp_module", ",", "_ModifiedArgv0", "(", "mod_fname", ")", ":", "mod_globals", "=", "temp_module", ".", "module", ".", "__dict__", "_run_code", "(", "code", ",", "mod_globals", ",", "init_globals", ",", "mod_name", ",", "mod_fname", ",", "mod_loader", ",", "pkg_name", ")", "# Copy the globals of the temporary module, as they", "# may be cleared when the temporary module goes away", "return", "mod_globals", ".", "copy", "(", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/runpy.py#L75-L85
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Source/ThirdParty/CEF3/cef_source/tools/patch_updater.py
python
warn
(message)
Output a warning.
Output a warning.
[ "Output", "a", "warning", "." ]
def warn(message): """ Output a warning. """ sys.stdout.write('-' * 80 + "\n") sys.stdout.write('!!!! WARNING: ' + message + "\n") sys.stdout.write('-' * 80 + "\n")
[ "def", "warn", "(", "message", ")", ":", "sys", ".", "stdout", ".", "write", "(", "'-'", "*", "80", "+", "\"\\n\"", ")", "sys", ".", "stdout", ".", "write", "(", "'!!!! WARNING: '", "+", "message", "+", "\"\\n\"", ")", "sys", ".", "stdout", ".", "write", "(", "'-'", "*", "80", "+", "\"\\n\"", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/cef_source/tools/patch_updater.py#L16-L20
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
current/deps/v8/tools/release/check_clusterfuzz.py
python
GetLatestV8InChromium
()
return commit["number"]
Returns the commit position number of the latest v8 roll in chromium.
Returns the commit position number of the latest v8 roll in chromium.
[ "Returns", "the", "commit", "position", "number", "of", "the", "latest", "v8", "roll", "in", "chromium", "." ]
def GetLatestV8InChromium(): """Returns the commit position number of the latest v8 roll in chromium.""" # Check currently rolled v8 revision. result = GetRequest(DEPS_LOG) if not result: return None # Strip security header and load json. commits = json.loads(result[5:]) git_revision = None for commit in commits["log"]: # Get latest commit that matches the v8 roll pattern. Ignore cherry-picks. match = re.match(V8_COMMIT_RE, commit["message"]) if match: git_revision = match.group(1) break else: return None # Get commit position number for v8 revision. result = GetRequest(CRREV % git_revision) if not result: return None commit = json.loads(result) assert commit["repo"] == "v8/v8" return commit["number"]
[ "def", "GetLatestV8InChromium", "(", ")", ":", "# Check currently rolled v8 revision.", "result", "=", "GetRequest", "(", "DEPS_LOG", ")", "if", "not", "result", ":", "return", "None", "# Strip security header and load json.", "commits", "=", "json", ".", "loads", "(", "result", "[", "5", ":", "]", ")", "git_revision", "=", "None", "for", "commit", "in", "commits", "[", "\"log\"", "]", ":", "# Get latest commit that matches the v8 roll pattern. Ignore cherry-picks.", "match", "=", "re", ".", "match", "(", "V8_COMMIT_RE", ",", "commit", "[", "\"message\"", "]", ")", "if", "match", ":", "git_revision", "=", "match", ".", "group", "(", "1", ")", "break", "else", ":", "return", "None", "# Get commit position number for v8 revision.", "result", "=", "GetRequest", "(", "CRREV", "%", "git_revision", ")", "if", "not", "result", ":", "return", "None", "commit", "=", "json", ".", "loads", "(", "result", ")", "assert", "commit", "[", "\"repo\"", "]", "==", "\"v8/v8\"", "return", "commit", "[", "\"number\"", "]" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/deps/v8/tools/release/check_clusterfuzz.py#L130-L158
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib2to3/pgen2/parse.py
python
Parser.addtoken
(self, type, value, context)
Add a token; return True iff this is the end of the program.
Add a token; return True iff this is the end of the program.
[ "Add", "a", "token", ";", "return", "True", "iff", "this", "is", "the", "end", "of", "the", "program", "." ]
def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context)
[ "def", "addtoken", "(", "self", ",", "type", ",", "value", ",", "context", ")", ":", "# Map from token to label", "ilabel", "=", "self", ".", "classify", "(", "type", ",", "value", ",", "context", ")", "# Loop until the token is shifted; may raise exceptions", "while", "True", ":", "dfa", ",", "state", ",", "node", "=", "self", ".", "stack", "[", "-", "1", "]", "states", ",", "first", "=", "dfa", "arcs", "=", "states", "[", "state", "]", "# Look for a state with this label", "for", "i", ",", "newstate", "in", "arcs", ":", "t", ",", "v", "=", "self", ".", "grammar", ".", "labels", "[", "i", "]", "if", "ilabel", "==", "i", ":", "# Look it up in the list of labels", "assert", "t", "<", "256", "# Shift a token; we're done with it", "self", ".", "shift", "(", "type", ",", "value", ",", "newstate", ",", "context", ")", "# Pop while we are in an accept-only state", "state", "=", "newstate", "while", "states", "[", "state", "]", "==", "[", "(", "0", ",", "state", ")", "]", ":", "self", ".", "pop", "(", ")", "if", "not", "self", ".", "stack", ":", "# Done parsing!", "return", "True", "dfa", ",", "state", ",", "node", "=", "self", ".", "stack", "[", "-", "1", "]", "states", ",", "first", "=", "dfa", "# Done with this token", "return", "False", "elif", "t", ">=", "256", ":", "# See if it's a symbol and if we're in its first set", "itsdfa", "=", "self", ".", "grammar", ".", "dfas", "[", "t", "]", "itsstates", ",", "itsfirst", "=", "itsdfa", "if", "ilabel", "in", "itsfirst", ":", "# Push a symbol", "self", ".", "push", "(", "t", ",", "self", ".", "grammar", ".", "dfas", "[", "t", "]", ",", "newstate", ",", "context", ")", "break", "# To continue the outer while loop", "else", ":", "if", "(", "0", ",", "state", ")", "in", "arcs", ":", "# An accepting state, pop it and try something else", "self", ".", "pop", "(", ")", "if", "not", "self", ".", "stack", ":", "# Done parsing, but another token is input", "raise", "ParseError", "(", "\"too much input\"", ",", "type", ",", "value", ",", "context", ")", "else", ":", "# No success finding a transition", "raise", "ParseError", "(", "\"bad input\"", ",", "type", ",", "value", ",", "context", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib2to3/pgen2/parse.py#L113-L159
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/misc.py
python
ask_password
(message)
return getpass.getpass(message)
Ask for a password interactively.
Ask for a password interactively.
[ "Ask", "for", "a", "password", "interactively", "." ]
def ask_password(message): # type: (str) -> str """Ask for a password interactively.""" _check_no_input(message) return getpass.getpass(message)
[ "def", "ask_password", "(", "message", ")", ":", "# type: (str) -> str", "_check_no_input", "(", "message", ")", "return", "getpass", ".", "getpass", "(", "message", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/utils/misc.py#L479-L487
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/dtypes/cast.py
python
maybe_convert_objects
(values, convert_dates=True, convert_numeric=True, convert_timedeltas=True, copy=True)
return values
if we have an object dtype, try to coerce dates and/or numbers
if we have an object dtype, try to coerce dates and/or numbers
[ "if", "we", "have", "an", "object", "dtype", "try", "to", "coerce", "dates", "and", "/", "or", "numbers" ]
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, convert_timedeltas=True, copy=True): """ if we have an object dtype, try to coerce dates and/or numbers """ # if we have passed in a list or scalar if isinstance(values, (list, tuple)): values = np.array(values, dtype=np.object_) if not hasattr(values, 'dtype'): values = np.array([values], dtype=np.object_) # convert dates if convert_dates and values.dtype == np.object_: # we take an aggressive stance and convert to datetime64[ns] if convert_dates == 'coerce': new_values = maybe_cast_to_datetime( values, 'M8[ns]', errors='coerce') # if we are all nans then leave me alone if not isna(new_values).all(): values = new_values else: values = lib.maybe_convert_objects(values, convert_datetime=convert_dates) # convert timedeltas if convert_timedeltas and values.dtype == np.object_: if convert_timedeltas == 'coerce': from pandas.core.tools.timedeltas import to_timedelta new_values = to_timedelta(values, errors='coerce') # if we are all nans then leave me alone if not isna(new_values).all(): values = new_values else: values = lib.maybe_convert_objects( values, convert_timedelta=convert_timedeltas) # convert to numeric if values.dtype == np.object_: if convert_numeric: try: new_values = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) # if we are all nans then leave me alone if not isna(new_values).all(): values = new_values except Exception: pass else: # soft-conversion values = lib.maybe_convert_objects(values) values = values.copy() if copy else values return values
[ "def", "maybe_convert_objects", "(", "values", ",", "convert_dates", "=", "True", ",", "convert_numeric", "=", "True", ",", "convert_timedeltas", "=", "True", ",", "copy", "=", "True", ")", ":", "# if we have passed in a list or scalar", "if", "isinstance", "(", "values", ",", "(", "list", ",", "tuple", ")", ")", ":", "values", "=", "np", ".", "array", "(", "values", ",", "dtype", "=", "np", ".", "object_", ")", "if", "not", "hasattr", "(", "values", ",", "'dtype'", ")", ":", "values", "=", "np", ".", "array", "(", "[", "values", "]", ",", "dtype", "=", "np", ".", "object_", ")", "# convert dates", "if", "convert_dates", "and", "values", ".", "dtype", "==", "np", ".", "object_", ":", "# we take an aggressive stance and convert to datetime64[ns]", "if", "convert_dates", "==", "'coerce'", ":", "new_values", "=", "maybe_cast_to_datetime", "(", "values", ",", "'M8[ns]'", ",", "errors", "=", "'coerce'", ")", "# if we are all nans then leave me alone", "if", "not", "isna", "(", "new_values", ")", ".", "all", "(", ")", ":", "values", "=", "new_values", "else", ":", "values", "=", "lib", ".", "maybe_convert_objects", "(", "values", ",", "convert_datetime", "=", "convert_dates", ")", "# convert timedeltas", "if", "convert_timedeltas", "and", "values", ".", "dtype", "==", "np", ".", "object_", ":", "if", "convert_timedeltas", "==", "'coerce'", ":", "from", "pandas", ".", "core", ".", "tools", ".", "timedeltas", "import", "to_timedelta", "new_values", "=", "to_timedelta", "(", "values", ",", "errors", "=", "'coerce'", ")", "# if we are all nans then leave me alone", "if", "not", "isna", "(", "new_values", ")", ".", "all", "(", ")", ":", "values", "=", "new_values", "else", ":", "values", "=", "lib", ".", "maybe_convert_objects", "(", "values", ",", "convert_timedelta", "=", "convert_timedeltas", ")", "# convert to numeric", "if", "values", ".", "dtype", "==", "np", ".", "object_", ":", "if", "convert_numeric", ":", "try", ":", "new_values", "=", "lib", ".", "maybe_convert_numeric", "(", "values", ",", "set", "(", ")", ",", "coerce_numeric", "=", "True", ")", "# if we are all nans then leave me alone", "if", "not", "isna", "(", "new_values", ")", ".", "all", "(", ")", ":", "values", "=", "new_values", "except", "Exception", ":", "pass", "else", ":", "# soft-conversion", "values", "=", "lib", ".", "maybe_convert_objects", "(", "values", ")", "values", "=", "values", ".", "copy", "(", ")", "if", "copy", "else", "values", "return", "values" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/dtypes/cast.py#L707-L767
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/series.py
python
Series.view
(self, dtype=None)
return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self)
Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8
Create a new view of the Series.
[ "Create", "a", "new", "view", "of", "the", "Series", "." ]
def view(self, dtype=None): """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self)
[ "def", "view", "(", "self", ",", "dtype", "=", "None", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "_values", ".", "view", "(", "dtype", ")", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/series.py#L597-L664
GoSSIP-SJTU/Armariris
ad5d868482956b2194a77b39c8d543c7c2318200
tools/clang/docs/tools/dump_ast_matchers.py
python
act_on_decl
(declaration, comment, allowed_types)
Parse the matcher out of the given declaration and comment. If 'allowed_types' is set, it contains a list of node types the matcher can match on, as extracted from the static type asserts in the matcher definition.
Parse the matcher out of the given declaration and comment.
[ "Parse", "the", "matcher", "out", "of", "the", "given", "declaration", "and", "comment", "." ]
def act_on_decl(declaration, comment, allowed_types): """Parse the matcher out of the given declaration and comment. If 'allowed_types' is set, it contains a list of node types the matcher can match on, as extracted from the static type asserts in the matcher definition. """ if declaration.strip(): # Node matchers are defined by writing: # VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name; m = re.match(r""".*Variadic(?:DynCast)?AllOfMatcher\s*< \s*([^\s,]+)\s*(?:, \s*([^\s>]+)\s*)?> \s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X) if m: result, inner, name = m.groups() if not inner: inner = result add_matcher(result, name, 'Matcher<%s>...' % inner, comment, is_dyncast=True) return # Parse the various matcher definition macros. m = re.match(""".*AST_TYPE_MATCHER\( \s*([^\s,]+\s*), \s*([^\s,]+\s*) \)\s*;\s*$""", declaration, flags=re.X) if m: inner, name = m.groups() add_matcher('Type', name, 'Matcher<%s>...' % inner, comment, is_dyncast=True) # FIXME: re-enable once we have implemented casting on the TypeLoc # hierarchy. # add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner, # comment, is_dyncast=True) return m = re.match(""".*AST_TYPE(LOC)?_TRAVERSE_MATCHER\( \s*([^\s,]+\s*), \s*(?:[^\s,]+\s*), \s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\) \)\s*;\s*$""", declaration, flags=re.X) if m: loc, name, results = m.groups()[0:3] result_types = [r.strip() for r in results.split(',')] comment_result_types = extract_result_types(comment) if (comment_result_types and sorted(result_types) != sorted(comment_result_types)): raise Exception('Inconsistent documentation for: %s' % name) for result_type in result_types: add_matcher(result_type, name, 'Matcher<Type>', comment) if loc: add_matcher('%sLoc' % result_type, '%sLoc' % name, 'Matcher<TypeLoc>', comment) return m = re.match(r"""^\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\( \s*([^\s,]+)\s*, \s*AST_POLYMORPHIC_SUPPORTED_TYPES\(([^)]*)\) (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*\d+\s*)? \)\s*{\s*$""", declaration, flags=re.X) if m: p, n, name, results = m.groups()[0:4] args = m.groups()[4:] result_types = [r.strip() for r in results.split(',')] if allowed_types and allowed_types != result_types: raise Exception('Inconsistent documentation for: %s' % name) if n not in ['', '2']: raise Exception('Cannot parse "%s"' % declaration) args = ', '.join('%s %s' % (args[i], args[i+1]) for i in range(0, len(args), 2) if args[i]) for result_type in result_types: add_matcher(result_type, name, args, comment) return m = re.match(r"""^\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\( (?:\s*([^\s,]+)\s*,)? \s*([^\s,]+)\s* (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*\d+\s*)? \)\s*{\s*$""", declaration, flags=re.X) if m: p, n, result, name = m.groups()[0:4] args = m.groups()[4:] if n not in ['', '2']: raise Exception('Cannot parse "%s"' % declaration) args = ', '.join('%s %s' % (args[i], args[i+1]) for i in range(0, len(args), 2) if args[i]) add_matcher(result, name, args, comment) return m = re.match(r"""^\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\( (?:\s*([^\s,]+)\s*,)? \s*([^\s,]+)\s* (?:,\s*([^,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*([^\s,]+)\s* ,\s*([^\s,]+)\s*)? (?:,\s*\d+\s*)? \)\s*{\s*$""", declaration, flags=re.X) if m: p, n, result, name = m.groups()[0:4] args = m.groups()[4:] if not result: if not allowed_types: raise Exception('Did not find allowed result types for: %s' % name) result_types = allowed_types else: result_types = [result] if n not in ['', '2']: raise Exception('Cannot parse "%s"' % declaration) args = ', '.join('%s %s' % (args[i], args[i+1]) for i in range(0, len(args), 2) if args[i]) for result_type in result_types: add_matcher(result_type, name, args, comment) return # Parse ArgumentAdapting matchers. m = re.match( r"""^.*ArgumentAdaptingMatcherFunc<.*>\s*(?:LLVM_ATTRIBUTE_UNUSED\s*) ([a-zA-Z]*)\s*=\s*{};$""", declaration, flags=re.X) if m: name = m.groups()[0] add_matcher('*', name, 'Matcher<*>', comment) return # Parse Variadic functions. m = re.match( r"""^.*internal::VariadicFunction\s*<\s*([^,]+),\s*([^,]+),\s*[^>]+>\s* ([a-zA-Z]*)\s*=\s*{.*};$""", declaration, flags=re.X) if m: result, arg, name = m.groups()[:3] add_matcher(result, name, '%s, ..., %s' % (arg, arg), comment) return # Parse Variadic operator matchers. m = re.match( r"""^.*VariadicOperatorMatcherFunc\s*<\s*([^,]+),\s*([^\s>]+)\s*>\s* ([a-zA-Z]*)\s*=\s*{.*};$""", declaration, flags=re.X) if m: min_args, max_args, name = m.groups()[:3] if max_args == '1': add_matcher('*', name, 'Matcher<*>', comment) return elif max_args == 'UINT_MAX': add_matcher('*', name, 'Matcher<*>, ..., Matcher<*>', comment) return # Parse free standing matcher functions, like: # Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) { m = re.match(r"""^\s*(.*)\s+ ([^\s\(]+)\s*\( (.*) \)\s*{""", declaration, re.X) if m: result, name, args = m.groups() args = ', '.join(p.strip() for p in args.split(',')) m = re.match(r'.*\s+internal::(Bindable)?Matcher<([^>]+)>$', result) if m: result_types = [m.group(2)] else: result_types = extract_result_types(comment) if not result_types: if not comment: # Only overloads don't have their own doxygen comments; ignore those. print 'Ignoring "%s"' % name else: print 'Cannot determine result type for "%s"' % name else: for result_type in result_types: add_matcher(result_type, name, args, comment) else: print '*** Unparsable: "' + declaration + '" ***'
[ "def", "act_on_decl", "(", "declaration", ",", "comment", ",", "allowed_types", ")", ":", "if", "declaration", ".", "strip", "(", ")", ":", "# Node matchers are defined by writing:", "# VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;", "m", "=", "re", ".", "match", "(", "r\"\"\".*Variadic(?:DynCast)?AllOfMatcher\\s*<\n \\s*([^\\s,]+)\\s*(?:,\n \\s*([^\\s>]+)\\s*)?>\n \\s*([^\\s;]+)\\s*;\\s*$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "result", ",", "inner", ",", "name", "=", "m", ".", "groups", "(", ")", "if", "not", "inner", ":", "inner", "=", "result", "add_matcher", "(", "result", ",", "name", ",", "'Matcher<%s>...'", "%", "inner", ",", "comment", ",", "is_dyncast", "=", "True", ")", "return", "# Parse the various matcher definition macros.", "m", "=", "re", ".", "match", "(", "\"\"\".*AST_TYPE_MATCHER\\(\n \\s*([^\\s,]+\\s*),\n \\s*([^\\s,]+\\s*)\n \\)\\s*;\\s*$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "inner", ",", "name", "=", "m", ".", "groups", "(", ")", "add_matcher", "(", "'Type'", ",", "name", ",", "'Matcher<%s>...'", "%", "inner", ",", "comment", ",", "is_dyncast", "=", "True", ")", "# FIXME: re-enable once we have implemented casting on the TypeLoc", "# hierarchy.", "# add_matcher('TypeLoc', '%sLoc' % name, 'Matcher<%sLoc>...' % inner,", "# comment, is_dyncast=True)", "return", "m", "=", "re", ".", "match", "(", "\"\"\".*AST_TYPE(LOC)?_TRAVERSE_MATCHER\\(\n \\s*([^\\s,]+\\s*),\n \\s*(?:[^\\s,]+\\s*),\n \\s*AST_POLYMORPHIC_SUPPORTED_TYPES\\(([^)]*)\\)\n \\)\\s*;\\s*$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "loc", ",", "name", ",", "results", "=", "m", ".", "groups", "(", ")", "[", "0", ":", "3", "]", "result_types", "=", "[", "r", ".", "strip", "(", ")", "for", "r", "in", "results", ".", "split", "(", "','", ")", "]", "comment_result_types", "=", "extract_result_types", "(", "comment", ")", "if", "(", "comment_result_types", "and", "sorted", "(", "result_types", ")", "!=", "sorted", "(", "comment_result_types", ")", ")", ":", "raise", "Exception", "(", "'Inconsistent documentation for: %s'", "%", "name", ")", "for", "result_type", "in", "result_types", ":", "add_matcher", "(", "result_type", ",", "name", ",", "'Matcher<Type>'", ",", "comment", ")", "if", "loc", ":", "add_matcher", "(", "'%sLoc'", "%", "result_type", ",", "'%sLoc'", "%", "name", ",", "'Matcher<TypeLoc>'", ",", "comment", ")", "return", "m", "=", "re", ".", "match", "(", "r\"\"\"^\\s*AST_POLYMORPHIC_MATCHER(_P)?(.?)(?:_OVERLOAD)?\\(\n \\s*([^\\s,]+)\\s*,\n \\s*AST_POLYMORPHIC_SUPPORTED_TYPES\\(([^)]*)\\)\n (?:,\\s*([^\\s,]+)\\s*\n ,\\s*([^\\s,]+)\\s*)?\n (?:,\\s*([^\\s,]+)\\s*\n ,\\s*([^\\s,]+)\\s*)?\n (?:,\\s*\\d+\\s*)?\n \\)\\s*{\\s*$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "p", ",", "n", ",", "name", ",", "results", "=", "m", ".", "groups", "(", ")", "[", "0", ":", "4", "]", "args", "=", "m", ".", "groups", "(", ")", "[", "4", ":", "]", "result_types", "=", "[", "r", ".", "strip", "(", ")", "for", "r", "in", "results", ".", "split", "(", "','", ")", "]", "if", "allowed_types", "and", "allowed_types", "!=", "result_types", ":", "raise", "Exception", "(", "'Inconsistent documentation for: %s'", "%", "name", ")", "if", "n", "not", "in", "[", "''", ",", "'2'", "]", ":", "raise", "Exception", "(", "'Cannot parse \"%s\"'", "%", "declaration", ")", "args", "=", "', '", ".", "join", "(", "'%s %s'", "%", "(", "args", "[", "i", "]", ",", "args", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "args", ")", ",", "2", ")", "if", "args", "[", "i", "]", ")", "for", "result_type", "in", "result_types", ":", "add_matcher", "(", "result_type", ",", "name", ",", "args", ",", "comment", ")", "return", "m", "=", "re", ".", "match", "(", "r\"\"\"^\\s*AST_MATCHER_FUNCTION(_P)?(.?)(?:_OVERLOAD)?\\(\n (?:\\s*([^\\s,]+)\\s*,)?\n \\s*([^\\s,]+)\\s*\n (?:,\\s*([^\\s,]+)\\s*\n ,\\s*([^\\s,]+)\\s*)?\n (?:,\\s*([^\\s,]+)\\s*\n ,\\s*([^\\s,]+)\\s*)?\n (?:,\\s*\\d+\\s*)?\n \\)\\s*{\\s*$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "p", ",", "n", ",", "result", ",", "name", "=", "m", ".", "groups", "(", ")", "[", "0", ":", "4", "]", "args", "=", "m", ".", "groups", "(", ")", "[", "4", ":", "]", "if", "n", "not", "in", "[", "''", ",", "'2'", "]", ":", "raise", "Exception", "(", "'Cannot parse \"%s\"'", "%", "declaration", ")", "args", "=", "', '", ".", "join", "(", "'%s %s'", "%", "(", "args", "[", "i", "]", ",", "args", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "args", ")", ",", "2", ")", "if", "args", "[", "i", "]", ")", "add_matcher", "(", "result", ",", "name", ",", "args", ",", "comment", ")", "return", "m", "=", "re", ".", "match", "(", "r\"\"\"^\\s*AST_MATCHER(_P)?(.?)(?:_OVERLOAD)?\\(\n (?:\\s*([^\\s,]+)\\s*,)?\n \\s*([^\\s,]+)\\s*\n (?:,\\s*([^,]+)\\s*\n ,\\s*([^\\s,]+)\\s*)?\n (?:,\\s*([^\\s,]+)\\s*\n ,\\s*([^\\s,]+)\\s*)?\n (?:,\\s*\\d+\\s*)?\n \\)\\s*{\\s*$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "p", ",", "n", ",", "result", ",", "name", "=", "m", ".", "groups", "(", ")", "[", "0", ":", "4", "]", "args", "=", "m", ".", "groups", "(", ")", "[", "4", ":", "]", "if", "not", "result", ":", "if", "not", "allowed_types", ":", "raise", "Exception", "(", "'Did not find allowed result types for: %s'", "%", "name", ")", "result_types", "=", "allowed_types", "else", ":", "result_types", "=", "[", "result", "]", "if", "n", "not", "in", "[", "''", ",", "'2'", "]", ":", "raise", "Exception", "(", "'Cannot parse \"%s\"'", "%", "declaration", ")", "args", "=", "', '", ".", "join", "(", "'%s %s'", "%", "(", "args", "[", "i", "]", ",", "args", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "args", ")", ",", "2", ")", "if", "args", "[", "i", "]", ")", "for", "result_type", "in", "result_types", ":", "add_matcher", "(", "result_type", ",", "name", ",", "args", ",", "comment", ")", "return", "# Parse ArgumentAdapting matchers.", "m", "=", "re", ".", "match", "(", "r\"\"\"^.*ArgumentAdaptingMatcherFunc<.*>\\s*(?:LLVM_ATTRIBUTE_UNUSED\\s*)\n ([a-zA-Z]*)\\s*=\\s*{};$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "name", "=", "m", ".", "groups", "(", ")", "[", "0", "]", "add_matcher", "(", "'*'", ",", "name", ",", "'Matcher<*>'", ",", "comment", ")", "return", "# Parse Variadic functions.", "m", "=", "re", ".", "match", "(", "r\"\"\"^.*internal::VariadicFunction\\s*<\\s*([^,]+),\\s*([^,]+),\\s*[^>]+>\\s*\n ([a-zA-Z]*)\\s*=\\s*{.*};$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "result", ",", "arg", ",", "name", "=", "m", ".", "groups", "(", ")", "[", ":", "3", "]", "add_matcher", "(", "result", ",", "name", ",", "'%s, ..., %s'", "%", "(", "arg", ",", "arg", ")", ",", "comment", ")", "return", "# Parse Variadic operator matchers.", "m", "=", "re", ".", "match", "(", "r\"\"\"^.*VariadicOperatorMatcherFunc\\s*<\\s*([^,]+),\\s*([^\\s>]+)\\s*>\\s*\n ([a-zA-Z]*)\\s*=\\s*{.*};$\"\"\"", ",", "declaration", ",", "flags", "=", "re", ".", "X", ")", "if", "m", ":", "min_args", ",", "max_args", ",", "name", "=", "m", ".", "groups", "(", ")", "[", ":", "3", "]", "if", "max_args", "==", "'1'", ":", "add_matcher", "(", "'*'", ",", "name", ",", "'Matcher<*>'", ",", "comment", ")", "return", "elif", "max_args", "==", "'UINT_MAX'", ":", "add_matcher", "(", "'*'", ",", "name", ",", "'Matcher<*>, ..., Matcher<*>'", ",", "comment", ")", "return", "# Parse free standing matcher functions, like:", "# Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {", "m", "=", "re", ".", "match", "(", "r\"\"\"^\\s*(.*)\\s+\n ([^\\s\\(]+)\\s*\\(\n (.*)\n \\)\\s*{\"\"\"", ",", "declaration", ",", "re", ".", "X", ")", "if", "m", ":", "result", ",", "name", ",", "args", "=", "m", ".", "groups", "(", ")", "args", "=", "', '", ".", "join", "(", "p", ".", "strip", "(", ")", "for", "p", "in", "args", ".", "split", "(", "','", ")", ")", "m", "=", "re", ".", "match", "(", "r'.*\\s+internal::(Bindable)?Matcher<([^>]+)>$'", ",", "result", ")", "if", "m", ":", "result_types", "=", "[", "m", ".", "group", "(", "2", ")", "]", "else", ":", "result_types", "=", "extract_result_types", "(", "comment", ")", "if", "not", "result_types", ":", "if", "not", "comment", ":", "# Only overloads don't have their own doxygen comments; ignore those.", "print", "'Ignoring \"%s\"'", "%", "name", "else", ":", "print", "'Cannot determine result type for \"%s\"'", "%", "name", "else", ":", "for", "result_type", "in", "result_types", ":", "add_matcher", "(", "result_type", ",", "name", ",", "args", ",", "comment", ")", "else", ":", "print", "'*** Unparsable: \"'", "+", "declaration", "+", "'\" ***'" ]
https://github.com/GoSSIP-SJTU/Armariris/blob/ad5d868482956b2194a77b39c8d543c7c2318200/tools/clang/docs/tools/dump_ast_matchers.py#L131-L316
cyberbotics/webots
af7fa7d68dcf7b4550f1f2e132092b41e83698fc
projects/humans/pedestrian/controllers/pedestrian/pedestrian.py
python
Pedestrian.run
(self)
Set the Pedestrian pose and position.
Set the Pedestrian pose and position.
[ "Set", "the", "Pedestrian", "pose", "and", "position", "." ]
def run(self): """Set the Pedestrian pose and position.""" opt_parser = optparse.OptionParser() opt_parser.add_option("--trajectory", default="", help="Specify the trajectory in the format [x1 y1, x2 y2, ...]") opt_parser.add_option("--speed", type=float, default=0.5, help="Specify walking speed in [m/s]") opt_parser.add_option("--step", type=int, help="Specify time step (otherwise world time step is used)") options, args = opt_parser.parse_args() if not options.trajectory or len(options.trajectory.split(',')) < 2: print("You should specify the trajectory using the '--trajectory' option.") print("The trajectory should have at least 2 points.") return if options.speed and options.speed > 0: self.speed = options.speed if options.step and options.step > 0: self.time_step = options.step else: self.time_step = int(self.getBasicTimeStep()) point_list = options.trajectory.split(',') self.number_of_waypoints = len(point_list) self.waypoints = [] for i in range(0, self.number_of_waypoints): self.waypoints.append([]) self.waypoints[i].append(float(point_list[i].split()[0])) self.waypoints[i].append(float(point_list[i].split()[1])) self.root_node_ref = self.getSelf() self.root_translation_field = self.root_node_ref.getField("translation") self.root_rotation_field = self.root_node_ref.getField("rotation") for i in range(0, self.BODY_PARTS_NUMBER): self.joints_position_field.append(self.root_node_ref.getField(self.joint_names[i])) # compute waypoints distance self.waypoints_distance = [] for i in range(0, self.number_of_waypoints): x = self.waypoints[i][0] - self.waypoints[(i + 1) % self.number_of_waypoints][0] y = self.waypoints[i][1] - self.waypoints[(i + 1) % self.number_of_waypoints][1] if i == 0: self.waypoints_distance.append(math.sqrt(x * x + y * y)) else: self.waypoints_distance.append(self.waypoints_distance[i - 1] + math.sqrt(x * x + y * y)) while not self.step(self.time_step) == -1: time = self.getTime() current_sequence = int(((time * self.speed) / self.CYCLE_TO_DISTANCE_RATIO) % self.WALK_SEQUENCES_NUMBER) # compute the ratio 'distance already covered between way-point(X) and way-point(X+1)' # / 'total distance between way-point(X) and way-point(X+1)' ratio = (time * self.speed) / self.CYCLE_TO_DISTANCE_RATIO - \ int(((time * self.speed) / self.CYCLE_TO_DISTANCE_RATIO)) for i in range(0, self.BODY_PARTS_NUMBER): current_angle = self.angles[i][current_sequence] * (1 - ratio) + \ self.angles[i][(current_sequence + 1) % self.WALK_SEQUENCES_NUMBER] * ratio self.joints_position_field[i].setSFFloat(current_angle) # adjust height self.current_height_offset = self.height_offsets[current_sequence] * (1 - ratio) + \ self.height_offsets[(current_sequence + 1) % self.WALK_SEQUENCES_NUMBER] * ratio # move everything distance = time * self.speed relative_distance = distance - int(distance / self.waypoints_distance[self.number_of_waypoints - 1]) * \ self.waypoints_distance[self.number_of_waypoints - 1] for i in range(0, self.number_of_waypoints): if self.waypoints_distance[i] > relative_distance: break distance_ratio = 0 if i == 0: distance_ratio = relative_distance / self.waypoints_distance[0] else: distance_ratio = (relative_distance - self.waypoints_distance[i - 1]) / \ (self.waypoints_distance[i] - self.waypoints_distance[i - 1]) x = distance_ratio * self.waypoints[(i + 1) % self.number_of_waypoints][0] + \ (1 - distance_ratio) * self.waypoints[i][0] y = distance_ratio * self.waypoints[(i + 1) % self.number_of_waypoints][1] + \ (1 - distance_ratio) * self.waypoints[i][1] root_translation = [x, y, self.ROOT_HEIGHT + self.current_height_offset] angle = math.atan2(self.waypoints[(i + 1) % self.number_of_waypoints][1] - self.waypoints[i][1], self.waypoints[(i + 1) % self.number_of_waypoints][0] - self.waypoints[i][0]) rotation = [0, 0, 1, angle] self.root_translation_field.setSFVec3f(root_translation) self.root_rotation_field.setSFRotation(rotation)
[ "def", "run", "(", "self", ")", ":", "opt_parser", "=", "optparse", ".", "OptionParser", "(", ")", "opt_parser", ".", "add_option", "(", "\"--trajectory\"", ",", "default", "=", "\"\"", ",", "help", "=", "\"Specify the trajectory in the format [x1 y1, x2 y2, ...]\"", ")", "opt_parser", ".", "add_option", "(", "\"--speed\"", ",", "type", "=", "float", ",", "default", "=", "0.5", ",", "help", "=", "\"Specify walking speed in [m/s]\"", ")", "opt_parser", ".", "add_option", "(", "\"--step\"", ",", "type", "=", "int", ",", "help", "=", "\"Specify time step (otherwise world time step is used)\"", ")", "options", ",", "args", "=", "opt_parser", ".", "parse_args", "(", ")", "if", "not", "options", ".", "trajectory", "or", "len", "(", "options", ".", "trajectory", ".", "split", "(", "','", ")", ")", "<", "2", ":", "print", "(", "\"You should specify the trajectory using the '--trajectory' option.\"", ")", "print", "(", "\"The trajectory should have at least 2 points.\"", ")", "return", "if", "options", ".", "speed", "and", "options", ".", "speed", ">", "0", ":", "self", ".", "speed", "=", "options", ".", "speed", "if", "options", ".", "step", "and", "options", ".", "step", ">", "0", ":", "self", ".", "time_step", "=", "options", ".", "step", "else", ":", "self", ".", "time_step", "=", "int", "(", "self", ".", "getBasicTimeStep", "(", ")", ")", "point_list", "=", "options", ".", "trajectory", ".", "split", "(", "','", ")", "self", ".", "number_of_waypoints", "=", "len", "(", "point_list", ")", "self", ".", "waypoints", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "number_of_waypoints", ")", ":", "self", ".", "waypoints", ".", "append", "(", "[", "]", ")", "self", ".", "waypoints", "[", "i", "]", ".", "append", "(", "float", "(", "point_list", "[", "i", "]", ".", "split", "(", ")", "[", "0", "]", ")", ")", "self", ".", "waypoints", "[", "i", "]", ".", "append", "(", "float", "(", "point_list", "[", "i", "]", ".", "split", "(", ")", "[", "1", "]", ")", ")", "self", ".", "root_node_ref", "=", "self", ".", "getSelf", "(", ")", "self", ".", "root_translation_field", "=", "self", ".", "root_node_ref", ".", "getField", "(", "\"translation\"", ")", "self", ".", "root_rotation_field", "=", "self", ".", "root_node_ref", ".", "getField", "(", "\"rotation\"", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "BODY_PARTS_NUMBER", ")", ":", "self", ".", "joints_position_field", ".", "append", "(", "self", ".", "root_node_ref", ".", "getField", "(", "self", ".", "joint_names", "[", "i", "]", ")", ")", "# compute waypoints distance", "self", ".", "waypoints_distance", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "number_of_waypoints", ")", ":", "x", "=", "self", ".", "waypoints", "[", "i", "]", "[", "0", "]", "-", "self", ".", "waypoints", "[", "(", "i", "+", "1", ")", "%", "self", ".", "number_of_waypoints", "]", "[", "0", "]", "y", "=", "self", ".", "waypoints", "[", "i", "]", "[", "1", "]", "-", "self", ".", "waypoints", "[", "(", "i", "+", "1", ")", "%", "self", ".", "number_of_waypoints", "]", "[", "1", "]", "if", "i", "==", "0", ":", "self", ".", "waypoints_distance", ".", "append", "(", "math", ".", "sqrt", "(", "x", "*", "x", "+", "y", "*", "y", ")", ")", "else", ":", "self", ".", "waypoints_distance", ".", "append", "(", "self", ".", "waypoints_distance", "[", "i", "-", "1", "]", "+", "math", ".", "sqrt", "(", "x", "*", "x", "+", "y", "*", "y", ")", ")", "while", "not", "self", ".", "step", "(", "self", ".", "time_step", ")", "==", "-", "1", ":", "time", "=", "self", ".", "getTime", "(", ")", "current_sequence", "=", "int", "(", "(", "(", "time", "*", "self", ".", "speed", ")", "/", "self", ".", "CYCLE_TO_DISTANCE_RATIO", ")", "%", "self", ".", "WALK_SEQUENCES_NUMBER", ")", "# compute the ratio 'distance already covered between way-point(X) and way-point(X+1)'", "# / 'total distance between way-point(X) and way-point(X+1)'", "ratio", "=", "(", "time", "*", "self", ".", "speed", ")", "/", "self", ".", "CYCLE_TO_DISTANCE_RATIO", "-", "int", "(", "(", "(", "time", "*", "self", ".", "speed", ")", "/", "self", ".", "CYCLE_TO_DISTANCE_RATIO", ")", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "BODY_PARTS_NUMBER", ")", ":", "current_angle", "=", "self", ".", "angles", "[", "i", "]", "[", "current_sequence", "]", "*", "(", "1", "-", "ratio", ")", "+", "self", ".", "angles", "[", "i", "]", "[", "(", "current_sequence", "+", "1", ")", "%", "self", ".", "WALK_SEQUENCES_NUMBER", "]", "*", "ratio", "self", ".", "joints_position_field", "[", "i", "]", ".", "setSFFloat", "(", "current_angle", ")", "# adjust height", "self", ".", "current_height_offset", "=", "self", ".", "height_offsets", "[", "current_sequence", "]", "*", "(", "1", "-", "ratio", ")", "+", "self", ".", "height_offsets", "[", "(", "current_sequence", "+", "1", ")", "%", "self", ".", "WALK_SEQUENCES_NUMBER", "]", "*", "ratio", "# move everything", "distance", "=", "time", "*", "self", ".", "speed", "relative_distance", "=", "distance", "-", "int", "(", "distance", "/", "self", ".", "waypoints_distance", "[", "self", ".", "number_of_waypoints", "-", "1", "]", ")", "*", "self", ".", "waypoints_distance", "[", "self", ".", "number_of_waypoints", "-", "1", "]", "for", "i", "in", "range", "(", "0", ",", "self", ".", "number_of_waypoints", ")", ":", "if", "self", ".", "waypoints_distance", "[", "i", "]", ">", "relative_distance", ":", "break", "distance_ratio", "=", "0", "if", "i", "==", "0", ":", "distance_ratio", "=", "relative_distance", "/", "self", ".", "waypoints_distance", "[", "0", "]", "else", ":", "distance_ratio", "=", "(", "relative_distance", "-", "self", ".", "waypoints_distance", "[", "i", "-", "1", "]", ")", "/", "(", "self", ".", "waypoints_distance", "[", "i", "]", "-", "self", ".", "waypoints_distance", "[", "i", "-", "1", "]", ")", "x", "=", "distance_ratio", "*", "self", ".", "waypoints", "[", "(", "i", "+", "1", ")", "%", "self", ".", "number_of_waypoints", "]", "[", "0", "]", "+", "(", "1", "-", "distance_ratio", ")", "*", "self", ".", "waypoints", "[", "i", "]", "[", "0", "]", "y", "=", "distance_ratio", "*", "self", ".", "waypoints", "[", "(", "i", "+", "1", ")", "%", "self", ".", "number_of_waypoints", "]", "[", "1", "]", "+", "(", "1", "-", "distance_ratio", ")", "*", "self", ".", "waypoints", "[", "i", "]", "[", "1", "]", "root_translation", "=", "[", "x", ",", "y", ",", "self", ".", "ROOT_HEIGHT", "+", "self", ".", "current_height_offset", "]", "angle", "=", "math", ".", "atan2", "(", "self", ".", "waypoints", "[", "(", "i", "+", "1", ")", "%", "self", ".", "number_of_waypoints", "]", "[", "1", "]", "-", "self", ".", "waypoints", "[", "i", "]", "[", "1", "]", ",", "self", ".", "waypoints", "[", "(", "i", "+", "1", ")", "%", "self", ".", "number_of_waypoints", "]", "[", "0", "]", "-", "self", ".", "waypoints", "[", "i", "]", "[", "0", "]", ")", "rotation", "=", "[", "0", ",", "0", ",", "1", ",", "angle", "]", "self", ".", "root_translation_field", ".", "setSFVec3f", "(", "root_translation", ")", "self", ".", "root_rotation_field", ".", "setSFRotation", "(", "rotation", ")" ]
https://github.com/cyberbotics/webots/blob/af7fa7d68dcf7b4550f1f2e132092b41e83698fc/projects/humans/pedestrian/controllers/pedestrian/pedestrian.py#L61-L143
RapidsAtHKUST/CommunityDetectionCodes
23dbafd2e57ab0f5f0528b1322c4a409f21e5892
Prensentation/algorithms/link_partition/visualization/dendrogram/radial_support.py
python
d_to_polar
(D)
Distance matrix to (theta, radius).
Distance matrix to (theta, radius).
[ "Distance", "matrix", "to", "(", "theta", "radius", ")", "." ]
def d_to_polar(D): """Distance matrix to (theta, radius).""" # this functionality is to adopt for more general situations # intended functionality: # - embedd distance matrix to 2D # - return that embedding in polar coordinates pass
[ "def", "d_to_polar", "(", "D", ")", ":", "# this functionality is to adopt for more general situations", "# intended functionality:", "# - embedd distance matrix to 2D", "# - return that embedding in polar coordinates", "pass" ]
https://github.com/RapidsAtHKUST/CommunityDetectionCodes/blob/23dbafd2e57ab0f5f0528b1322c4a409f21e5892/Prensentation/algorithms/link_partition/visualization/dendrogram/radial_support.py#L17-L23
Harick1/caffe-yolo
eea92bf3ddfe4d0ff6b0b3ba9b15c029a83ed9a3
scripts/cpp_lint.py
python
_CppLintState.ResetErrorCounts
(self)
Sets the module's error statistic back to zero.
Sets the module's error statistic back to zero.
[ "Sets", "the", "module", "s", "error", "statistic", "back", "to", "zero", "." ]
def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {}
[ "def", "ResetErrorCounts", "(", "self", ")", ":", "self", ".", "error_count", "=", "0", "self", ".", "errors_by_category", "=", "{", "}" ]
https://github.com/Harick1/caffe-yolo/blob/eea92bf3ddfe4d0ff6b0b3ba9b15c029a83ed9a3/scripts/cpp_lint.py#L742-L745
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
lldb/examples/python/file_extract.py
python
FileExtract.get_sint8
(self, fail_value=0)
Extract a single int8_t from the binary file at the current file position, returns a single integer
Extract a single int8_t from the binary file at the current file position, returns a single integer
[ "Extract", "a", "single", "int8_t", "from", "the", "binary", "file", "at", "the", "current", "file", "position", "returns", "a", "single", "integer" ]
def get_sint8(self, fail_value=0): '''Extract a single int8_t from the binary file at the current file position, returns a single integer''' s = self.read_size(1) if s: v, = struct.unpack(self.byte_order + 'b', s) return v else: return fail_value
[ "def", "get_sint8", "(", "self", ",", "fail_value", "=", "0", ")", ":", "s", "=", "self", ".", "read_size", "(", "1", ")", "if", "s", ":", "v", ",", "=", "struct", ".", "unpack", "(", "self", ".", "byte_order", "+", "'b'", ",", "s", ")", "return", "v", "else", ":", "return", "fail_value" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/examples/python/file_extract.py#L63-L70
infinit/elle
a8154593c42743f45b9df09daf62b44630c24a02
drake/src/drake/__init__.py
python
BaseNode.name_absolute
(self)
return self.__name
Node name, relative to the root of the source directory.
Node name, relative to the root of the source directory.
[ "Node", "name", "relative", "to", "the", "root", "of", "the", "source", "directory", "." ]
def name_absolute(self): """Node name, relative to the root of the source directory.""" return self.__name
[ "def", "name_absolute", "(", "self", ")", ":", "return", "self", ".", "__name" ]
https://github.com/infinit/elle/blob/a8154593c42743f45b9df09daf62b44630c24a02/drake/src/drake/__init__.py#L1406-L1408
InsightSoftwareConsortium/ITK
87acfce9a93d928311c38bc371b666b515b9f19d
Wrapping/Generators/Doc/doxy2swig.py
python
Doxy2SWIG.generic_parse
(self, node, pad=0)
A Generic parser for arbitrary tags in a node. Parameters: - node: A node in the DOM. - pad: `int` (default: 0) If 0 the node data is not padded with newlines. If 1 it appends a newline after parsing the childNodes. If 2 it pads before and after the nodes are processed. Defaults to 0.
A Generic parser for arbitrary tags in a node.
[ "A", "Generic", "parser", "for", "arbitrary", "tags", "in", "a", "node", "." ]
def generic_parse(self, node, pad=0): """A Generic parser for arbitrary tags in a node. Parameters: - node: A node in the DOM. - pad: `int` (default: 0) If 0 the node data is not padded with newlines. If 1 it appends a newline after parsing the childNodes. If 2 it pads before and after the nodes are processed. Defaults to 0. """ npiece = 0 if pad: npiece = len(self.pieces) if pad == 2: self.add_text("\n") for n in node.childNodes: self.parse(n) if pad: if len(self.pieces) > npiece: self.add_text("\n")
[ "def", "generic_parse", "(", "self", ",", "node", ",", "pad", "=", "0", ")", ":", "npiece", "=", "0", "if", "pad", ":", "npiece", "=", "len", "(", "self", ".", "pieces", ")", "if", "pad", "==", "2", ":", "self", ".", "add_text", "(", "\"\\n\"", ")", "for", "n", "in", "node", ".", "childNodes", ":", "self", ".", "parse", "(", "n", ")", "if", "pad", ":", "if", "len", "(", "self", ".", "pieces", ")", ">", "npiece", ":", "self", ".", "add_text", "(", "\"\\n\"", ")" ]
https://github.com/InsightSoftwareConsortium/ITK/blob/87acfce9a93d928311c38bc371b666b515b9f19d/Wrapping/Generators/Doc/doxy2swig.py#L165-L188
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/fftpack/basic.py
python
_is_safe_size
(n)
return not n & (n-1)
Is the size of FFT such that FFTPACK can handle it in single precision with sufficient accuracy? Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those
Is the size of FFT such that FFTPACK can handle it in single precision with sufficient accuracy?
[ "Is", "the", "size", "of", "FFT", "such", "that", "FFTPACK", "can", "handle", "it", "in", "single", "precision", "with", "sufficient", "accuracy?" ]
def _is_safe_size(n): """ Is the size of FFT such that FFTPACK can handle it in single precision with sufficient accuracy? Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those """ n = int(n) if n == 0: return True # Divide by 3 until you can't, then by 5 until you can't for c in (3, 5): while n % c == 0: n //= c # Return True if the remainder is a power of 2 return not n & (n-1)
[ "def", "_is_safe_size", "(", "n", ")", ":", "n", "=", "int", "(", "n", ")", "if", "n", "==", "0", ":", "return", "True", "# Divide by 3 until you can't, then by 5 until you can't", "for", "c", "in", "(", "3", ",", "5", ")", ":", "while", "n", "%", "c", "==", "0", ":", "n", "//=", "c", "# Return True if the remainder is a power of 2", "return", "not", "n", "&", "(", "n", "-", "1", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/fftpack/basic.py#L51-L69
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/losses/python/losses/loss_ops.py
python
_num_present
(losses, weights, per_batch=False)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
Computes the number of elements in the loss function induced by `weights`. A given weights tensor induces different numbers of usable elements in the `losses` tensor. The `weights` tensor is broadcast across `losses` for all possible dimensions. For example, if `losses` is a tensor of dimension [4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in effect, tiled to match the size of `losses`. Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: A tensor of size [batch_size, d1, ... dN]. weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If `per_batch` is True, the value is returned as a tensor of size [batch_size]. Otherwise, a single scalar tensor is returned.
Computes the number of elements in the loss function induced by `weights`.
[ "Computes", "the", "number", "of", "elements", "in", "the", "loss", "function", "induced", "by", "weights", "." ]
def _num_present(losses, weights, per_batch=False): """Computes the number of elements in the loss function induced by `weights`. A given weights tensor induces different numbers of usable elements in the `losses` tensor. The `weights` tensor is broadcast across `losses` for all possible dimensions. For example, if `losses` is a tensor of dimension [4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in effect, tiled to match the size of `losses`. Following this effective tile, the total number of present elements is the number of non-zero weights. Args: losses: A tensor of size [batch_size, d1, ... dN]. weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. per_batch: Whether to return the number of elements per batch or as a sum total. Returns: The number of present (non-zero) elements in the losses tensor. If `per_batch` is True, the value is returned as a tensor of size [batch_size]. Otherwise, a single scalar tensor is returned. """ # If weights is a scalar, its easy to compute: if weights.get_shape().ndims == 0: batch_size = array_ops.reshape( array_ops.slice(array_ops.shape(losses), [0], [1]), []) num_per_batch = math_ops.div( math_ops.cast(array_ops.size(losses), dtypes.float32), math_ops.cast(batch_size, dtypes.float32)) num_per_batch = array_ops.where( math_ops.equal(weights, 0), 0.0, num_per_batch) num_per_batch = math_ops.multiply( array_ops.ones(array_ops.reshape(batch_size, [1])), num_per_batch) return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch) # First, count the number of nonzero weights: if weights.get_shape().ndims >= 1: axis = list(range(1, weights.get_shape().ndims)) num_nonzero_per_batch = math_ops.reduce_sum( math_ops.cast(math_ops.not_equal(weights, 0), dtypes.float32), axis=axis) # Next, determine the number of elements that weights would broadcast to: broadcast_dims = array_ops.slice( array_ops.shape(losses), [weights.get_shape().ndims], [-1]) num_to_broadcast = math_ops.cast(math_ops.reduce_prod(broadcast_dims), dtypes.float32) num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast) return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
[ "def", "_num_present", "(", "losses", ",", "weights", ",", "per_batch", "=", "False", ")", ":", "# If weights is a scalar, its easy to compute:", "if", "weights", ".", "get_shape", "(", ")", ".", "ndims", "==", "0", ":", "batch_size", "=", "array_ops", ".", "reshape", "(", "array_ops", ".", "slice", "(", "array_ops", ".", "shape", "(", "losses", ")", ",", "[", "0", "]", ",", "[", "1", "]", ")", ",", "[", "]", ")", "num_per_batch", "=", "math_ops", ".", "div", "(", "math_ops", ".", "cast", "(", "array_ops", ".", "size", "(", "losses", ")", ",", "dtypes", ".", "float32", ")", ",", "math_ops", ".", "cast", "(", "batch_size", ",", "dtypes", ".", "float32", ")", ")", "num_per_batch", "=", "array_ops", ".", "where", "(", "math_ops", ".", "equal", "(", "weights", ",", "0", ")", ",", "0.0", ",", "num_per_batch", ")", "num_per_batch", "=", "math_ops", ".", "multiply", "(", "array_ops", ".", "ones", "(", "array_ops", ".", "reshape", "(", "batch_size", ",", "[", "1", "]", ")", ")", ",", "num_per_batch", ")", "return", "num_per_batch", "if", "per_batch", "else", "math_ops", ".", "reduce_sum", "(", "num_per_batch", ")", "# First, count the number of nonzero weights:", "if", "weights", ".", "get_shape", "(", ")", ".", "ndims", ">=", "1", ":", "axis", "=", "list", "(", "range", "(", "1", ",", "weights", ".", "get_shape", "(", ")", ".", "ndims", ")", ")", "num_nonzero_per_batch", "=", "math_ops", ".", "reduce_sum", "(", "math_ops", ".", "cast", "(", "math_ops", ".", "not_equal", "(", "weights", ",", "0", ")", ",", "dtypes", ".", "float32", ")", ",", "axis", "=", "axis", ")", "# Next, determine the number of elements that weights would broadcast to:", "broadcast_dims", "=", "array_ops", ".", "slice", "(", "array_ops", ".", "shape", "(", "losses", ")", ",", "[", "weights", ".", "get_shape", "(", ")", ".", "ndims", "]", ",", "[", "-", "1", "]", ")", "num_to_broadcast", "=", "math_ops", ".", "cast", "(", "math_ops", ".", "reduce_prod", "(", "broadcast_dims", ")", ",", "dtypes", ".", "float32", ")", "num_per_batch", "=", "math_ops", ".", "multiply", "(", "num_nonzero_per_batch", ",", "num_to_broadcast", ")", "return", "num_per_batch", "if", "per_batch", "else", "math_ops", ".", "reduce_sum", "(", "num_per_batch", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/losses/python/losses/loss_ops.py#L125-L173
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/ssl.py
python
SSLObject.server_hostname
(self)
return self._sslobj.server_hostname
The currently set server hostname (for SNI), or ``None`` if no server hostame is set.
The currently set server hostname (for SNI), or ``None`` if no server hostame is set.
[ "The", "currently", "set", "server", "hostname", "(", "for", "SNI", ")", "or", "None", "if", "no", "server", "hostame", "is", "set", "." ]
def server_hostname(self): """The currently set server hostname (for SNI), or ``None`` if no server hostame is set.""" return self._sslobj.server_hostname
[ "def", "server_hostname", "(", "self", ")", ":", "return", "self", ".", "_sslobj", ".", "server_hostname" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/ssl.py#L704-L707
etotheipi/BitcoinArmory
2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98
armoryengine/ArmoryUtils.py
python
base58_to_binary
(addr)
return '\x00'*padding + binOut
This method applies the Bitcoin-specific conversion from Base58 to binary which may includes some extra "zero" bytes, such as is the case with the main-network addresses. This method is labeled as inputting an "addrStr", but it's really this special kind of Base58 converter, which makes it usable for encoding other data, such as ECDSA keys or scripts.
This method applies the Bitcoin-specific conversion from Base58 to binary which may includes some extra "zero" bytes, such as is the case with the main-network addresses.
[ "This", "method", "applies", "the", "Bitcoin", "-", "specific", "conversion", "from", "Base58", "to", "binary", "which", "may", "includes", "some", "extra", "zero", "bytes", "such", "as", "is", "the", "case", "with", "the", "main", "-", "network", "addresses", "." ]
def base58_to_binary(addr): """ This method applies the Bitcoin-specific conversion from Base58 to binary which may includes some extra "zero" bytes, such as is the case with the main-network addresses. This method is labeled as inputting an "addrStr", but it's really this special kind of Base58 converter, which makes it usable for encoding other data, such as ECDSA keys or scripts. """ # Count the zeros ('1' characters) at the beginning padding = 0; for c in addr: if c=='1': padding+=1 else: break n = 0 for ch in addr: n *= 58 if ch in BASE58CHARS: n += BASE58CHARS.index(ch) else: raise NonBase58CharacterError("Unrecognized Base 58 Character: %s" % ch) binOut = '' while n>0: d,m = divmod(n,256) binOut = chr(m) + binOut n = d return '\x00'*padding + binOut
[ "def", "base58_to_binary", "(", "addr", ")", ":", "# Count the zeros ('1' characters) at the beginning", "padding", "=", "0", "for", "c", "in", "addr", ":", "if", "c", "==", "'1'", ":", "padding", "+=", "1", "else", ":", "break", "n", "=", "0", "for", "ch", "in", "addr", ":", "n", "*=", "58", "if", "ch", "in", "BASE58CHARS", ":", "n", "+=", "BASE58CHARS", ".", "index", "(", "ch", ")", "else", ":", "raise", "NonBase58CharacterError", "(", "\"Unrecognized Base 58 Character: %s\"", "%", "ch", ")", "binOut", "=", "''", "while", "n", ">", "0", ":", "d", ",", "m", "=", "divmod", "(", "n", ",", "256", ")", "binOut", "=", "chr", "(", "m", ")", "+", "binOut", "n", "=", "d", "return", "'\\x00'", "*", "padding", "+", "binOut" ]
https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/armoryengine/ArmoryUtils.py#L2027-L2058
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
third_party/closure_linter/closure_linter/common/tokenizer.py
python
Tokenizer.TokenizeFile
(self, file)
return self.__first_token
Tokenizes the given file. Args: file: An iterable that yields one line of the file at a time. Returns: The first token in the file
Tokenizes the given file.
[ "Tokenizes", "the", "given", "file", "." ]
def TokenizeFile(self, file): """Tokenizes the given file. Args: file: An iterable that yields one line of the file at a time. Returns: The first token in the file """ # The current mode. self.mode = self.__starting_mode # The first token in the stream. self.__first_token = None # The last token added to the token stream. self.__last_token = None # The current line number. self.__line_number = 0 for line in file: self.__line_number += 1 self.__TokenizeLine(line) return self.__first_token
[ "def", "TokenizeFile", "(", "self", ",", "file", ")", ":", "# The current mode.", "self", ".", "mode", "=", "self", ".", "__starting_mode", "# The first token in the stream.", "self", ".", "__first_token", "=", "None", "# The last token added to the token stream.", "self", ".", "__last_token", "=", "None", "# The current line number.", "self", ".", "__line_number", "=", "0", "for", "line", "in", "file", ":", "self", ".", "__line_number", "+=", "1", "self", ".", "__TokenizeLine", "(", "line", ")", "return", "self", ".", "__first_token" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/closure_linter/closure_linter/common/tokenizer.py#L54-L76
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/Queue.py
python
Queue.qsize
(self)
return n
Return the approximate size of the queue (not reliable!).
Return the approximate size of the queue (not reliable!).
[ "Return", "the", "approximate", "size", "of", "the", "queue", "(", "not", "reliable!", ")", "." ]
def qsize(self): """Return the approximate size of the queue (not reliable!).""" self.mutex.acquire() n = self._qsize() self.mutex.release() return n
[ "def", "qsize", "(", "self", ")", ":", "self", ".", "mutex", ".", "acquire", "(", ")", "n", "=", "self", ".", "_qsize", "(", ")", "self", ".", "mutex", ".", "release", "(", ")", "return", "n" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/Queue.py#L86-L91
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
PyApp.SetMacExitMenuItemId
(*args, **kwargs)
return _core_.PyApp_SetMacExitMenuItemId(*args, **kwargs)
SetMacExitMenuItemId(long val)
SetMacExitMenuItemId(long val)
[ "SetMacExitMenuItemId", "(", "long", "val", ")" ]
def SetMacExitMenuItemId(*args, **kwargs): """SetMacExitMenuItemId(long val)""" return _core_.PyApp_SetMacExitMenuItemId(*args, **kwargs)
[ "def", "SetMacExitMenuItemId", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "PyApp_SetMacExitMenuItemId", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L8180-L8182
MTG/gaia
0f7214dbdec6f9b651ca34211824841ffba0bc77
src/doc/doxy2swig.py
python
Doxy2SWIG.__init__
(self, src, with_function_signature = False, with_type_info = False, with_constructor_list = False, with_attribute_list = False, with_overloaded_functions = False, textwidth = 80, quiet = False)
Initialize the instance given a source object. `src` can be a file or filename. If you do not want to include function definitions from doxygen then set `include_function_definition` to `False`. This is handy since this allows you to use the swig generated function definition using %feature("autodoc", [0,1]).
Initialize the instance given a source object. `src` can be a file or filename. If you do not want to include function definitions from doxygen then set `include_function_definition` to `False`. This is handy since this allows you to use the swig generated function definition using %feature("autodoc", [0,1]).
[ "Initialize", "the", "instance", "given", "a", "source", "object", ".", "src", "can", "be", "a", "file", "or", "filename", ".", "If", "you", "do", "not", "want", "to", "include", "function", "definitions", "from", "doxygen", "then", "set", "include_function_definition", "to", "False", ".", "This", "is", "handy", "since", "this", "allows", "you", "to", "use", "the", "swig", "generated", "function", "definition", "using", "%feature", "(", "autodoc", "[", "0", "1", "]", ")", "." ]
def __init__(self, src, with_function_signature = False, with_type_info = False, with_constructor_list = False, with_attribute_list = False, with_overloaded_functions = False, textwidth = 80, quiet = False): """Initialize the instance given a source object. `src` can be a file or filename. If you do not want to include function definitions from doxygen then set `include_function_definition` to `False`. This is handy since this allows you to use the swig generated function definition using %feature("autodoc", [0,1]). """ # options: self.with_function_signature = with_function_signature self.with_type_info = with_type_info self.with_constructor_list = with_constructor_list self.with_attribute_list = with_attribute_list self.with_overloaded_functions = with_overloaded_functions self.textwidth = textwidth self.quiet = quiet # state: self.indent = 0 self.listitem = '' self.pieces = [] f = my_open_read(src) self.my_dir = os.path.dirname(f.name) self.xmldoc = minidom.parse(f).documentElement f.close() self.pieces.append('\n// File: %s\n' % os.path.basename(f.name)) self.space_re = re.compile(r'\s+') self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)') self.multi = 0 self.ignores = ['inheritancegraph', 'param', 'listofallmembers', 'innerclass', 'name', 'declname', 'incdepgraph', 'invincdepgraph', 'programlisting', 'type', 'references', 'referencedby', 'location', 'collaborationgraph', 'reimplements', 'reimplementedby', 'derivedcompoundref', 'basecompoundref', 'argsstring', 'definition', 'exceptions']
[ "def", "__init__", "(", "self", ",", "src", ",", "with_function_signature", "=", "False", ",", "with_type_info", "=", "False", ",", "with_constructor_list", "=", "False", ",", "with_attribute_list", "=", "False", ",", "with_overloaded_functions", "=", "False", ",", "textwidth", "=", "80", ",", "quiet", "=", "False", ")", ":", "# options:", "self", ".", "with_function_signature", "=", "with_function_signature", "self", ".", "with_type_info", "=", "with_type_info", "self", ".", "with_constructor_list", "=", "with_constructor_list", "self", ".", "with_attribute_list", "=", "with_attribute_list", "self", ".", "with_overloaded_functions", "=", "with_overloaded_functions", "self", ".", "textwidth", "=", "textwidth", "self", ".", "quiet", "=", "quiet", "# state:", "self", ".", "indent", "=", "0", "self", ".", "listitem", "=", "''", "self", ".", "pieces", "=", "[", "]", "f", "=", "my_open_read", "(", "src", ")", "self", ".", "my_dir", "=", "os", ".", "path", ".", "dirname", "(", "f", ".", "name", ")", "self", ".", "xmldoc", "=", "minidom", ".", "parse", "(", "f", ")", ".", "documentElement", "f", ".", "close", "(", ")", "self", ".", "pieces", ".", "append", "(", "'\\n// File: %s\\n'", "%", "os", ".", "path", ".", "basename", "(", "f", ".", "name", ")", ")", "self", ".", "space_re", "=", "re", ".", "compile", "(", "r'\\s+'", ")", "self", ".", "lead_spc", "=", "re", ".", "compile", "(", "r'^(%feature\\S+\\s+\\S+\\s*?)\"\\s+(\\S)'", ")", "self", ".", "multi", "=", "0", "self", ".", "ignores", "=", "[", "'inheritancegraph'", ",", "'param'", ",", "'listofallmembers'", ",", "'innerclass'", ",", "'name'", ",", "'declname'", ",", "'incdepgraph'", ",", "'invincdepgraph'", ",", "'programlisting'", ",", "'type'", ",", "'references'", ",", "'referencedby'", ",", "'location'", ",", "'collaborationgraph'", ",", "'reimplements'", ",", "'reimplementedby'", ",", "'derivedcompoundref'", ",", "'basecompoundref'", ",", "'argsstring'", ",", "'definition'", ",", "'exceptions'", "]" ]
https://github.com/MTG/gaia/blob/0f7214dbdec6f9b651ca34211824841ffba0bc77/src/doc/doxy2swig.py#L107-L155
microsoft/CNTK
e9396480025b9ca457d26b6f33dd07c474c6aa04
bindings/python/cntk/ops/__init__.py
python
_input_spec
(shape, dtype=default_override_or(np.float32), needs_gradient=False, is_sparse=False, dynamic_axes=[Axis.default_batch_axis()], name='')
We need _input_spec because input is python built-in and because of typemap, must remain in sync with input. TODO: Investigate to remove it.
We need _input_spec because input is python built-in and because of typemap, must remain in sync with input. TODO: Investigate to remove it.
[ "We", "need", "_input_spec", "because", "input", "is", "python", "built", "-", "in", "and", "because", "of", "typemap", "must", "remain", "in", "sync", "with", "input", ".", "TODO", ":", "Investigate", "to", "remove", "it", "." ]
def _input_spec(shape, dtype=default_override_or(np.float32), needs_gradient=False, is_sparse=False, dynamic_axes=[Axis.default_batch_axis()], name=''): ''' We need _input_spec because input is python built-in and because of typemap, must remain in sync with input. TODO: Investigate to remove it. ''' pass
[ "def", "_input_spec", "(", "shape", ",", "dtype", "=", "default_override_or", "(", "np", ".", "float32", ")", ",", "needs_gradient", "=", "False", ",", "is_sparse", "=", "False", ",", "dynamic_axes", "=", "[", "Axis", ".", "default_batch_axis", "(", ")", "]", ",", "name", "=", "''", ")", ":", "pass" ]
https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/ops/__init__.py#L3615-L3622
neoml-lib/neoml
a0d370fba05269a1b2258cef126f77bbd2054a3e
NeoML/Python/neoml/Dnn/Qrnn.py
python
Qrnn.dropout
(self)
return self._internal.get_dropout()
Gets the dropout probability for the forget gate.
Gets the dropout probability for the forget gate.
[ "Gets", "the", "dropout", "probability", "for", "the", "forget", "gate", "." ]
def dropout(self): """Gets the dropout probability for the forget gate. """ return self._internal.get_dropout()
[ "def", "dropout", "(", "self", ")", ":", "return", "self", ".", "_internal", ".", "get_dropout", "(", ")" ]
https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/Qrnn.py#L230-L233
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/code.py
python
InteractiveInterpreter.runcode
(self, code)
Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it.
Execute a code object.
[ "Execute", "a", "code", "object", "." ]
def runcode(self, code): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: exec(code, self.locals) except SystemExit: raise except: self.showtraceback()
[ "def", "runcode", "(", "self", ",", "code", ")", ":", "try", ":", "exec", "(", "code", ",", "self", ".", "locals", ")", "except", "SystemExit", ":", "raise", "except", ":", "self", ".", "showtraceback", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/code.py#L77-L94
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/ros_comm/rosmaster/src/rosmaster/master_api.py
python
ROSMasterHandler.__init__
(self, num_workers=NUM_WORKERS)
ctor.
ctor.
[ "ctor", "." ]
def __init__(self, num_workers=NUM_WORKERS): """ctor.""" self.uri = None self.done = False self.thread_pool = rosmaster.threadpool.MarkedThreadPool(num_workers) # pub/sub/providers: dict { topicName : [publishers/subscribers names] } self.ps_lock = threading.Condition(threading.Lock()) self.reg_manager = RegistrationManager(self.thread_pool) # maintain refs to reg_manager fields self.publishers = self.reg_manager.publishers self.subscribers = self.reg_manager.subscribers self.services = self.reg_manager.services self.param_subscribers = self.reg_manager.param_subscribers self.topics_types = {} #dict { topicName : type } # parameter server dictionary self.param_server = rosmaster.paramserver.ParamDictionary(self.reg_manager)
[ "def", "__init__", "(", "self", ",", "num_workers", "=", "NUM_WORKERS", ")", ":", "self", ".", "uri", "=", "None", "self", ".", "done", "=", "False", "self", ".", "thread_pool", "=", "rosmaster", ".", "threadpool", ".", "MarkedThreadPool", "(", "num_workers", ")", "# pub/sub/providers: dict { topicName : [publishers/subscribers names] }", "self", ".", "ps_lock", "=", "threading", ".", "Condition", "(", "threading", ".", "Lock", "(", ")", ")", "self", ".", "reg_manager", "=", "RegistrationManager", "(", "self", ".", "thread_pool", ")", "# maintain refs to reg_manager fields", "self", ".", "publishers", "=", "self", ".", "reg_manager", ".", "publishers", "self", ".", "subscribers", "=", "self", ".", "reg_manager", ".", "subscribers", "self", ".", "services", "=", "self", ".", "reg_manager", ".", "services", "self", ".", "param_subscribers", "=", "self", ".", "reg_manager", ".", "param_subscribers", "self", ".", "topics_types", "=", "{", "}", "#dict { topicName : type }", "# parameter server dictionary", "self", ".", "param_server", "=", "rosmaster", ".", "paramserver", ".", "ParamDictionary", "(", "self", ".", "reg_manager", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros_comm/rosmaster/src/rosmaster/master_api.py#L239-L260
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/io/matlab/mio5.py
python
to_writeable
(source)
return narr
Convert input object ``source`` to something we can write Parameters ---------- source : object Returns ------- arr : None or ndarray or EmptyStructMarker If `source` cannot be converted to something we can write to a matfile, return None. If `source` is equivalent to an empty dictionary, return ``EmptyStructMarker``. Otherwise return `source` converted to an ndarray with contents for writing to matfile.
Convert input object ``source`` to something we can write
[ "Convert", "input", "object", "source", "to", "something", "we", "can", "write" ]
def to_writeable(source): ''' Convert input object ``source`` to something we can write Parameters ---------- source : object Returns ------- arr : None or ndarray or EmptyStructMarker If `source` cannot be converted to something we can write to a matfile, return None. If `source` is equivalent to an empty dictionary, return ``EmptyStructMarker``. Otherwise return `source` converted to an ndarray with contents for writing to matfile. ''' if isinstance(source, np.ndarray): return source if source is None: return None # Objects that implement mappings is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and hasattr(source, 'items')) # Objects that don't implement mappings, but do have dicts if not is_mapping and hasattr(source, '__dict__'): source = dict((key, value) for key, value in source.__dict__.items() if not key.startswith('_')) is_mapping = True if is_mapping: dtype = [] values = [] for field, value in source.items(): if (isinstance(field, string_types) and field[0] not in '_0123456789'): dtype.append((field, object)) values.append(value) if dtype: return np.array([tuple(values)], dtype) else: return EmptyStructMarker # Next try and convert to an array narr = np.asanyarray(source) if narr.dtype.type in (object, np.object_) and \ narr.shape == () and narr == source: # No interesting conversion possible return None return narr
[ "def", "to_writeable", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "np", ".", "ndarray", ")", ":", "return", "source", "if", "source", "is", "None", ":", "return", "None", "# Objects that implement mappings", "is_mapping", "=", "(", "hasattr", "(", "source", ",", "'keys'", ")", "and", "hasattr", "(", "source", ",", "'values'", ")", "and", "hasattr", "(", "source", ",", "'items'", ")", ")", "# Objects that don't implement mappings, but do have dicts", "if", "not", "is_mapping", "and", "hasattr", "(", "source", ",", "'__dict__'", ")", ":", "source", "=", "dict", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "source", ".", "__dict__", ".", "items", "(", ")", "if", "not", "key", ".", "startswith", "(", "'_'", ")", ")", "is_mapping", "=", "True", "if", "is_mapping", ":", "dtype", "=", "[", "]", "values", "=", "[", "]", "for", "field", ",", "value", "in", "source", ".", "items", "(", ")", ":", "if", "(", "isinstance", "(", "field", ",", "string_types", ")", "and", "field", "[", "0", "]", "not", "in", "'_0123456789'", ")", ":", "dtype", ".", "append", "(", "(", "field", ",", "object", ")", ")", "values", ".", "append", "(", "value", ")", "if", "dtype", ":", "return", "np", ".", "array", "(", "[", "tuple", "(", "values", ")", "]", ",", "dtype", ")", "else", ":", "return", "EmptyStructMarker", "# Next try and convert to an array", "narr", "=", "np", ".", "asanyarray", "(", "source", ")", "if", "narr", ".", "dtype", ".", "type", "in", "(", "object", ",", "np", ".", "object_", ")", "and", "narr", ".", "shape", "==", "(", ")", "and", "narr", "==", "source", ":", "# No interesting conversion possible", "return", "None", "return", "narr" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/io/matlab/mio5.py#L407-L452
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/splitter.py
python
MultiSplitterWindow.AppendWindow
(self, window, sashPos=-1)
Add a new window to the splitter at the right side or bottom of the window stack. If sashPos is given then it is used to size the new window.
Add a new window to the splitter at the right side or bottom of the window stack. If sashPos is given then it is used to size the new window.
[ "Add", "a", "new", "window", "to", "the", "splitter", "at", "the", "right", "side", "or", "bottom", "of", "the", "window", "stack", ".", "If", "sashPos", "is", "given", "then", "it", "is", "used", "to", "size", "the", "new", "window", "." ]
def AppendWindow(self, window, sashPos=-1): """ Add a new window to the splitter at the right side or bottom of the window stack. If sashPos is given then it is used to size the new window. """ self.InsertWindow(len(self._windows), window, sashPos)
[ "def", "AppendWindow", "(", "self", ",", "window", ",", "sashPos", "=", "-", "1", ")", ":", "self", ".", "InsertWindow", "(", "len", "(", "self", ".", "_windows", ")", ",", "window", ",", "sashPos", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/splitter.py#L136-L142
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/ntpath.py
python
normpath
(path)
return prefix + sep.join(comps)
Normalize path, eliminating double slashes, etc.
Normalize path, eliminating double slashes, etc.
[ "Normalize", "path", "eliminating", "double", "slashes", "etc", "." ]
def normpath(path): """Normalize path, eliminating double slashes, etc.""" path = os.fspath(path) if isinstance(path, bytes): sep = b'\\' altsep = b'/' curdir = b'.' pardir = b'..' special_prefixes = (b'\\\\.\\', b'\\\\?\\') else: sep = '\\' altsep = '/' curdir = '.' pardir = '..' special_prefixes = ('\\\\.\\', '\\\\?\\') if path.startswith(special_prefixes): # in the case of paths with these prefixes: # \\.\ -> device names # \\?\ -> literal paths # do not do any normalization, but return the path # unchanged apart from the call to os.fspath() return path path = path.replace(altsep, sep) prefix, path = splitdrive(path) # collapse initial backslashes if path.startswith(sep): prefix += sep path = path.lstrip(sep) comps = path.split(sep) i = 0 while i < len(comps): if not comps[i] or comps[i] == curdir: del comps[i] elif comps[i] == pardir: if i > 0 and comps[i-1] != pardir: del comps[i-1:i+1] i -= 1 elif i == 0 and prefix.endswith(sep): del comps[i] else: i += 1 else: i += 1 # If the path is now empty, substitute '.' if not prefix and not comps: comps.append(curdir) return prefix + sep.join(comps)
[ "def", "normpath", "(", "path", ")", ":", "path", "=", "os", ".", "fspath", "(", "path", ")", "if", "isinstance", "(", "path", ",", "bytes", ")", ":", "sep", "=", "b'\\\\'", "altsep", "=", "b'/'", "curdir", "=", "b'.'", "pardir", "=", "b'..'", "special_prefixes", "=", "(", "b'\\\\\\\\.\\\\'", ",", "b'\\\\\\\\?\\\\'", ")", "else", ":", "sep", "=", "'\\\\'", "altsep", "=", "'/'", "curdir", "=", "'.'", "pardir", "=", "'..'", "special_prefixes", "=", "(", "'\\\\\\\\.\\\\'", ",", "'\\\\\\\\?\\\\'", ")", "if", "path", ".", "startswith", "(", "special_prefixes", ")", ":", "# in the case of paths with these prefixes:", "# \\\\.\\ -> device names", "# \\\\?\\ -> literal paths", "# do not do any normalization, but return the path", "# unchanged apart from the call to os.fspath()", "return", "path", "path", "=", "path", ".", "replace", "(", "altsep", ",", "sep", ")", "prefix", ",", "path", "=", "splitdrive", "(", "path", ")", "# collapse initial backslashes", "if", "path", ".", "startswith", "(", "sep", ")", ":", "prefix", "+=", "sep", "path", "=", "path", ".", "lstrip", "(", "sep", ")", "comps", "=", "path", ".", "split", "(", "sep", ")", "i", "=", "0", "while", "i", "<", "len", "(", "comps", ")", ":", "if", "not", "comps", "[", "i", "]", "or", "comps", "[", "i", "]", "==", "curdir", ":", "del", "comps", "[", "i", "]", "elif", "comps", "[", "i", "]", "==", "pardir", ":", "if", "i", ">", "0", "and", "comps", "[", "i", "-", "1", "]", "!=", "pardir", ":", "del", "comps", "[", "i", "-", "1", ":", "i", "+", "1", "]", "i", "-=", "1", "elif", "i", "==", "0", "and", "prefix", ".", "endswith", "(", "sep", ")", ":", "del", "comps", "[", "i", "]", "else", ":", "i", "+=", "1", "else", ":", "i", "+=", "1", "# If the path is now empty, substitute '.'", "if", "not", "prefix", "and", "not", "comps", ":", "comps", ".", "append", "(", "curdir", ")", "return", "prefix", "+", "sep", ".", "join", "(", "comps", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/ntpath.py#L450-L498
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
third_party/closure_linter/closure_linter/common/tokenizer.py
python
Tokenizer.__AddToken
(self, token)
Add the given token to the token stream. Args: token: The token to add.
Add the given token to the token stream.
[ "Add", "the", "given", "token", "to", "the", "token", "stream", "." ]
def __AddToken(self, token): """Add the given token to the token stream. Args: token: The token to add. """ # Store the first token, or point the previous token to this one. if not self.__first_token: self.__first_token = token else: self.__last_token.next = token # Establish the doubly linked list token.previous = self.__last_token self.__last_token = token # Compute the character indices token.start_index = self.__start_index self.__start_index += token.length
[ "def", "__AddToken", "(", "self", ",", "token", ")", ":", "# Store the first token, or point the previous token to this one.", "if", "not", "self", ".", "__first_token", ":", "self", ".", "__first_token", "=", "token", "else", ":", "self", ".", "__last_token", ".", "next", "=", "token", "# Establish the doubly linked list", "token", ".", "previous", "=", "self", ".", "__last_token", "self", ".", "__last_token", "=", "token", "# Compute the character indices", "token", ".", "start_index", "=", "self", ".", "__start_index", "self", ".", "__start_index", "+=", "token", ".", "length" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/closure_linter/closure_linter/common/tokenizer.py#L166-L184
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/meta_graph_transform/meta_graph_transform.py
python
_find_all_mandatory_retain_ops
(base_meta_graph_def)
return initializer_names
Identify all infrastructural Ops, to ensure that they are retained. We need to retain infrastructural Ops (init and saver stuff), in addition to the desired outputs. For now we retain *all* save and restore ops, variable initializers, table initializers, and main init ops. This means that strip_unused_nodes will not remove unused variables. Args: base_meta_graph_def: a GraphDef proto in which to identify nodes to retain. Returns: A dictionary corresponding to the nodes associated with each collection that are to be retained.
Identify all infrastructural Ops, to ensure that they are retained.
[ "Identify", "all", "infrastructural", "Ops", "to", "ensure", "that", "they", "are", "retained", "." ]
def _find_all_mandatory_retain_ops(base_meta_graph_def): """Identify all infrastructural Ops, to ensure that they are retained. We need to retain infrastructural Ops (init and saver stuff), in addition to the desired outputs. For now we retain *all* save and restore ops, variable initializers, table initializers, and main init ops. This means that strip_unused_nodes will not remove unused variables. Args: base_meta_graph_def: a GraphDef proto in which to identify nodes to retain. Returns: A dictionary corresponding to the nodes associated with each collection that are to be retained. """ # TODO(b/63447631): implement variable stripping. initializer_names = {} # Primary SaverDef and SAVERS collection saver_defs = [] if base_meta_graph_def.HasField('saver_def'): saver_defs.append(base_meta_graph_def.saver_def) saver_defs.extend(_get_all_protos_from_collection( base_meta_graph_def, _ops.GraphKeys.SAVERS)) for saver_def in saver_defs: savers = initializer_names.get(_ops.GraphKeys.SAVERS, []) savers.extend([ saver_def.filename_tensor_name, saver_def.save_tensor_name, saver_def.restore_op_name ]) initializer_names[_ops.GraphKeys.SAVERS] = savers # Variable initializers variable_collections = [ _ops.GraphKeys.GLOBAL_VARIABLES, _ops.GraphKeys.TRAINABLE_VARIABLES, _ops.GraphKeys.MOVING_AVERAGE_VARIABLES, _ops.GraphKeys.LOCAL_VARIABLES, _ops.GraphKeys.MODEL_VARIABLES] for var_coll in variable_collections: variables = _get_all_protos_from_collection(base_meta_graph_def, var_coll) var_init_names = [v.initializer_name for v in variables] if var_init_names: # Sanity check to ensure we don't overwrite dictionary entries. assert var_coll not in initializer_names initializer_names[var_coll] = var_init_names # Table initializers op_names = _get_all_node_names_from_collection( base_meta_graph_def, _ops.GraphKeys.TABLE_INITIALIZERS) if op_names: # Sanity check to ensure we don't overwrite dictionary entries. assert _ops.GraphKeys.TABLE_INITIALIZERS not in initializer_names table_initializers = [t for t in op_names] initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = table_initializers # Various init ops various_init_op_collections = [_saved_model_constants.LEGACY_INIT_OP_KEY, _saved_model_constants.MAIN_OP_KEY, _ops.GraphKeys.INIT_OP, _ops.GraphKeys.LOCAL_INIT_OP, _ops.GraphKeys.READY_OP, _ops.GraphKeys.READY_FOR_LOCAL_INIT_OP] for op_coll in various_init_op_collections: op_name = _get_single_node_name_from_collection( base_meta_graph_def, op_coll) if op_name: # Sanity check to ensure we don't overwrite dictionary entries. assert op_coll not in initializer_names initializer_names[op_coll] = [op_name] return initializer_names
[ "def", "_find_all_mandatory_retain_ops", "(", "base_meta_graph_def", ")", ":", "# TODO(b/63447631): implement variable stripping.", "initializer_names", "=", "{", "}", "# Primary SaverDef and SAVERS collection", "saver_defs", "=", "[", "]", "if", "base_meta_graph_def", ".", "HasField", "(", "'saver_def'", ")", ":", "saver_defs", ".", "append", "(", "base_meta_graph_def", ".", "saver_def", ")", "saver_defs", ".", "extend", "(", "_get_all_protos_from_collection", "(", "base_meta_graph_def", ",", "_ops", ".", "GraphKeys", ".", "SAVERS", ")", ")", "for", "saver_def", "in", "saver_defs", ":", "savers", "=", "initializer_names", ".", "get", "(", "_ops", ".", "GraphKeys", ".", "SAVERS", ",", "[", "]", ")", "savers", ".", "extend", "(", "[", "saver_def", ".", "filename_tensor_name", ",", "saver_def", ".", "save_tensor_name", ",", "saver_def", ".", "restore_op_name", "]", ")", "initializer_names", "[", "_ops", ".", "GraphKeys", ".", "SAVERS", "]", "=", "savers", "# Variable initializers", "variable_collections", "=", "[", "_ops", ".", "GraphKeys", ".", "GLOBAL_VARIABLES", ",", "_ops", ".", "GraphKeys", ".", "TRAINABLE_VARIABLES", ",", "_ops", ".", "GraphKeys", ".", "MOVING_AVERAGE_VARIABLES", ",", "_ops", ".", "GraphKeys", ".", "LOCAL_VARIABLES", ",", "_ops", ".", "GraphKeys", ".", "MODEL_VARIABLES", "]", "for", "var_coll", "in", "variable_collections", ":", "variables", "=", "_get_all_protos_from_collection", "(", "base_meta_graph_def", ",", "var_coll", ")", "var_init_names", "=", "[", "v", ".", "initializer_name", "for", "v", "in", "variables", "]", "if", "var_init_names", ":", "# Sanity check to ensure we don't overwrite dictionary entries.", "assert", "var_coll", "not", "in", "initializer_names", "initializer_names", "[", "var_coll", "]", "=", "var_init_names", "# Table initializers", "op_names", "=", "_get_all_node_names_from_collection", "(", "base_meta_graph_def", ",", "_ops", ".", "GraphKeys", ".", "TABLE_INITIALIZERS", ")", "if", "op_names", ":", "# Sanity check to ensure we don't overwrite dictionary entries.", "assert", "_ops", ".", "GraphKeys", ".", "TABLE_INITIALIZERS", "not", "in", "initializer_names", "table_initializers", "=", "[", "t", "for", "t", "in", "op_names", "]", "initializer_names", "[", "_ops", ".", "GraphKeys", ".", "TABLE_INITIALIZERS", "]", "=", "table_initializers", "# Various init ops", "various_init_op_collections", "=", "[", "_saved_model_constants", ".", "LEGACY_INIT_OP_KEY", ",", "_saved_model_constants", ".", "MAIN_OP_KEY", ",", "_ops", ".", "GraphKeys", ".", "INIT_OP", ",", "_ops", ".", "GraphKeys", ".", "LOCAL_INIT_OP", ",", "_ops", ".", "GraphKeys", ".", "READY_OP", ",", "_ops", ".", "GraphKeys", ".", "READY_FOR_LOCAL_INIT_OP", "]", "for", "op_coll", "in", "various_init_op_collections", ":", "op_name", "=", "_get_single_node_name_from_collection", "(", "base_meta_graph_def", ",", "op_coll", ")", "if", "op_name", ":", "# Sanity check to ensure we don't overwrite dictionary entries.", "assert", "op_coll", "not", "in", "initializer_names", "initializer_names", "[", "op_coll", "]", "=", "[", "op_name", "]", "return", "initializer_names" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/meta_graph_transform/meta_graph_transform.py#L387-L460
Evolving-AI-Lab/fooling
66f097dd6bd2eb6794ade3e187a7adfdf1887688
caffe/scripts/cpp_lint.py
python
_DropCommonSuffixes
(filename)
return os.path.splitext(filename)[0]
Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed.
Drops common suffixes like _test.cc or -inl.h from filename.
[ "Drops", "common", "suffixes", "like", "_test", ".", "cc", "or", "-", "inl", ".", "h", "from", "filename", "." ]
def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0]
[ "def", "_DropCommonSuffixes", "(", "filename", ")", ":", "for", "suffix", "in", "(", "'test.cc'", ",", "'regtest.cc'", ",", "'unittest.cc'", ",", "'inl.h'", ",", "'impl.h'", ",", "'internal.h'", ")", ":", "if", "(", "filename", ".", "endswith", "(", "suffix", ")", "and", "len", "(", "filename", ")", ">", "len", "(", "suffix", ")", "and", "filename", "[", "-", "len", "(", "suffix", ")", "-", "1", "]", "in", "(", "'-'", ",", "'_'", ")", ")", ":", "return", "filename", "[", ":", "-", "len", "(", "suffix", ")", "-", "1", "]", "return", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]" ]
https://github.com/Evolving-AI-Lab/fooling/blob/66f097dd6bd2eb6794ade3e187a7adfdf1887688/caffe/scripts/cpp_lint.py#L3506-L3530