nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
smilehao/xlua-framework
a03801538be2b0e92d39332d445b22caca1ef61f
ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/build/lib/google/protobuf/internal/python_message.py
python
_AddSlots
(message_descriptor, dictionary)
Adds a __slots__ entry to dictionary, containing the names of all valid attributes for this message type. Args: message_descriptor: A Descriptor instance describing this message type. dictionary: Class dictionary to which we'll add a '__slots__' entry.
Adds a __slots__ entry to dictionary, containing the names of all valid attributes for this message type.
[ "Adds", "a", "__slots__", "entry", "to", "dictionary", "containing", "the", "names", "of", "all", "valid", "attributes", "for", "this", "message", "type", "." ]
def _AddSlots(message_descriptor, dictionary): """Adds a __slots__ entry to dictionary, containing the names of all valid attributes for this message type. Args: message_descriptor: A Descriptor instance describing this message type. dictionary: Class dictionary to which we'll add a '__slots__' entry. """ dictionary['__slots__'] = ['_cached_byte_size', '_cached_byte_size_dirty', '_fields', '_unknown_fields', '_is_present_in_parent', '_listener', '_listener_for_children', '__weakref__']
[ "def", "_AddSlots", "(", "message_descriptor", ",", "dictionary", ")", ":", "dictionary", "[", "'__slots__'", "]", "=", "[", "'_cached_byte_size'", ",", "'_cached_byte_size_dirty'", ",", "'_fields'", ",", "'_unknown_fields'", ",", "'_is_present_in_parent'", ",", "'_listener'", ",", "'_listener_for_children'", ",", "'__weakref__'", "]" ]
https://github.com/smilehao/xlua-framework/blob/a03801538be2b0e92d39332d445b22caca1ef61f/ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/build/lib/google/protobuf/internal/python_message.py#L164-L179
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_controls.py
python
CheckListBox.Check
(*args, **kwargs)
return _controls_.CheckListBox_Check(*args, **kwargs)
Check(self, unsigned int index, int check=True)
Check(self, unsigned int index, int check=True)
[ "Check", "(", "self", "unsigned", "int", "index", "int", "check", "=", "True", ")" ]
def Check(*args, **kwargs): """Check(self, unsigned int index, int check=True)""" return _controls_.CheckListBox_Check(*args, **kwargs)
[ "def", "Check", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "CheckListBox_Check", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L1330-L1332
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/boto/boto/kms/layer1.py
python
KMSConnection.re_encrypt
(self, ciphertext_blob, destination_key_id, source_encryption_context=None, destination_encryption_context=None, grant_tokens=None)
return response
Encrypts data on the server side with a new customer master key without exposing the plaintext of the data on the client side. The data is first decrypted and then encrypted. This operation can also be used to change the encryption context of a ciphertext. :type ciphertext_blob: blob :param ciphertext_blob: Ciphertext of the data to re-encrypt. :type source_encryption_context: map :param source_encryption_context: Encryption context used to encrypt and decrypt the data specified in the `CiphertextBlob` parameter. :type destination_key_id: string :param destination_key_id: Key identifier of the key used to re-encrypt the data. :type destination_encryption_context: map :param destination_encryption_context: Encryption context to be used when the data is re-encrypted. :type grant_tokens: list :param grant_tokens: Grant tokens that identify the grants that have permissions for the encryption and decryption process.
Encrypts data on the server side with a new customer master key without exposing the plaintext of the data on the client side. The data is first decrypted and then encrypted. This operation can also be used to change the encryption context of a ciphertext.
[ "Encrypts", "data", "on", "the", "server", "side", "with", "a", "new", "customer", "master", "key", "without", "exposing", "the", "plaintext", "of", "the", "data", "on", "the", "client", "side", ".", "The", "data", "is", "first", "decrypted", "and", "then", "encrypted", ".", "This", "operation", "can", "also", "be", "used", "to", "change", "the", "encryption", "context", "of", "a", "ciphertext", "." ]
def re_encrypt(self, ciphertext_blob, destination_key_id, source_encryption_context=None, destination_encryption_context=None, grant_tokens=None): """ Encrypts data on the server side with a new customer master key without exposing the plaintext of the data on the client side. The data is first decrypted and then encrypted. This operation can also be used to change the encryption context of a ciphertext. :type ciphertext_blob: blob :param ciphertext_blob: Ciphertext of the data to re-encrypt. :type source_encryption_context: map :param source_encryption_context: Encryption context used to encrypt and decrypt the data specified in the `CiphertextBlob` parameter. :type destination_key_id: string :param destination_key_id: Key identifier of the key used to re-encrypt the data. :type destination_encryption_context: map :param destination_encryption_context: Encryption context to be used when the data is re-encrypted. :type grant_tokens: list :param grant_tokens: Grant tokens that identify the grants that have permissions for the encryption and decryption process. """ if not isinstance(ciphertext_blob, six.binary_type): raise TypeError( "Value of argument ``ciphertext_blob`` " "must be of type %s." % six.binary_type) ciphertext_blob = base64.b64encode(ciphertext_blob) params = { 'CiphertextBlob': ciphertext_blob, 'DestinationKeyId': destination_key_id, } if source_encryption_context is not None: params['SourceEncryptionContext'] = source_encryption_context if destination_encryption_context is not None: params['DestinationEncryptionContext'] = destination_encryption_context if grant_tokens is not None: params['GrantTokens'] = grant_tokens response = self.make_request(action='ReEncrypt', body=json.dumps(params)) if response.get('CiphertextBlob') is not None: response['CiphertextBlob'] = base64.b64decode( response['CiphertextBlob'].encode('utf-8')) return response
[ "def", "re_encrypt", "(", "self", ",", "ciphertext_blob", ",", "destination_key_id", ",", "source_encryption_context", "=", "None", ",", "destination_encryption_context", "=", "None", ",", "grant_tokens", "=", "None", ")", ":", "if", "not", "isinstance", "(", "ciphertext_blob", ",", "six", ".", "binary_type", ")", ":", "raise", "TypeError", "(", "\"Value of argument ``ciphertext_blob`` \"", "\"must be of type %s.\"", "%", "six", ".", "binary_type", ")", "ciphertext_blob", "=", "base64", ".", "b64encode", "(", "ciphertext_blob", ")", "params", "=", "{", "'CiphertextBlob'", ":", "ciphertext_blob", ",", "'DestinationKeyId'", ":", "destination_key_id", ",", "}", "if", "source_encryption_context", "is", "not", "None", ":", "params", "[", "'SourceEncryptionContext'", "]", "=", "source_encryption_context", "if", "destination_encryption_context", "is", "not", "None", ":", "params", "[", "'DestinationEncryptionContext'", "]", "=", "destination_encryption_context", "if", "grant_tokens", "is", "not", "None", ":", "params", "[", "'GrantTokens'", "]", "=", "grant_tokens", "response", "=", "self", ".", "make_request", "(", "action", "=", "'ReEncrypt'", ",", "body", "=", "json", ".", "dumps", "(", "params", ")", ")", "if", "response", ".", "get", "(", "'CiphertextBlob'", ")", "is", "not", "None", ":", "response", "[", "'CiphertextBlob'", "]", "=", "base64", ".", "b64decode", "(", "response", "[", "'CiphertextBlob'", "]", ".", "encode", "(", "'utf-8'", ")", ")", "return", "response" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/kms/layer1.py#L701-L751
arkenthera/electron-vibrancy
383153ef9ccb23a6c7517150d6bb0794dff3115e
scripts/cpplint.py
python
NestingState.CheckCompletedBlocks
(self, filename, error)
Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found.
Checks that all classes and namespaces have been completely parsed.
[ "Checks", "that", "all", "classes", "and", "namespaces", "have", "been", "completely", "parsed", "." ]
def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name)
[ "def", "CheckCompletedBlocks", "(", "self", ",", "filename", ",", "error", ")", ":", "# Note: This test can result in false positives if #ifdef constructs", "# get in the way of brace matching. See the testBuildClass test in", "# cpplint_unittest.py for an example of this.", "for", "obj", "in", "self", ".", "stack", ":", "if", "isinstance", "(", "obj", ",", "_ClassInfo", ")", ":", "error", "(", "filename", ",", "obj", ".", "starting_linenum", ",", "'build/class'", ",", "5", ",", "'Failed to find complete declaration of class %s'", "%", "obj", ".", "name", ")", "elif", "isinstance", "(", "obj", ",", "_NamespaceInfo", ")", ":", "error", "(", "filename", ",", "obj", ".", "starting_linenum", ",", "'build/namespaces'", ",", "5", ",", "'Failed to find complete declaration of namespace %s'", "%", "obj", ".", "name", ")" ]
https://github.com/arkenthera/electron-vibrancy/blob/383153ef9ccb23a6c7517150d6bb0794dff3115e/scripts/cpplint.py#L2299-L2318
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemWebCommunicator/AWS/common-code/lib/AWSIoTPythonSDK/core/greengrass/discovery/models.py
python
CoreConnectivityInfo.connectivityInfoList
(self)
return list(self._connectivity_info_dict.values())
The list of connectivity information that this Greengrass core has.
[]
def connectivityInfoList(self): """ The list of connectivity information that this Greengrass core has. """ return list(self._connectivity_info_dict.values())
[ "def", "connectivityInfoList", "(", "self", ")", ":", "return", "list", "(", "self", ".", "_connectivity_info_dict", ".", "values", "(", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemWebCommunicator/AWS/common-code/lib/AWSIoTPythonSDK/core/greengrass/discovery/models.py#L116-L122
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/richtext.py
python
RichTextObject.GetTopMargin
(*args, **kwargs)
return _richtext.RichTextObject_GetTopMargin(*args, **kwargs)
GetTopMargin(self) -> int
GetTopMargin(self) -> int
[ "GetTopMargin", "(", "self", ")", "-", ">", "int" ]
def GetTopMargin(*args, **kwargs): """GetTopMargin(self) -> int""" return _richtext.RichTextObject_GetTopMargin(*args, **kwargs)
[ "def", "GetTopMargin", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextObject_GetTopMargin", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L1345-L1347
xhzdeng/crpn
a5aef0f80dbe486103123f740c634fb01e6cc9a1
caffe-fast-rcnn/python/caffe/coord_map.py
python
crop
(top_from, top_to)
return L.Crop(top_from, top_to, crop_param=dict(axis=ax + 1, # +1 for first cropping dim. offset=list(-np.round(b).astype(int))))
Define a Crop layer to crop a top (from) to another top (to) by determining the coordinate mapping between the two and net spec'ing the axis and shift parameters of the crop.
Define a Crop layer to crop a top (from) to another top (to) by determining the coordinate mapping between the two and net spec'ing the axis and shift parameters of the crop.
[ "Define", "a", "Crop", "layer", "to", "crop", "a", "top", "(", "from", ")", "to", "another", "top", "(", "to", ")", "by", "determining", "the", "coordinate", "mapping", "between", "the", "two", "and", "net", "spec", "ing", "the", "axis", "and", "shift", "parameters", "of", "the", "crop", "." ]
def crop(top_from, top_to): """ Define a Crop layer to crop a top (from) to another top (to) by determining the coordinate mapping between the two and net spec'ing the axis and shift parameters of the crop. """ ax, a, b = coord_map_from_to(top_from, top_to) assert (a == 1).all(), 'scale mismatch on crop (a = {})'.format(a) assert (b <= 0).all(), 'cannot crop negative offset (b = {})'.format(b) assert (np.round(b) == b).all(), 'cannot crop noninteger offset ' \ '(b = {})'.format(b) return L.Crop(top_from, top_to, crop_param=dict(axis=ax + 1, # +1 for first cropping dim. offset=list(-np.round(b).astype(int))))
[ "def", "crop", "(", "top_from", ",", "top_to", ")", ":", "ax", ",", "a", ",", "b", "=", "coord_map_from_to", "(", "top_from", ",", "top_to", ")", "assert", "(", "a", "==", "1", ")", ".", "all", "(", ")", ",", "'scale mismatch on crop (a = {})'", ".", "format", "(", "a", ")", "assert", "(", "b", "<=", "0", ")", ".", "all", "(", ")", ",", "'cannot crop negative offset (b = {})'", ".", "format", "(", "b", ")", "assert", "(", "np", ".", "round", "(", "b", ")", "==", "b", ")", ".", "all", "(", ")", ",", "'cannot crop noninteger offset '", "'(b = {})'", ".", "format", "(", "b", ")", "return", "L", ".", "Crop", "(", "top_from", ",", "top_to", ",", "crop_param", "=", "dict", "(", "axis", "=", "ax", "+", "1", ",", "# +1 for first cropping dim.", "offset", "=", "list", "(", "-", "np", ".", "round", "(", "b", ")", ".", "astype", "(", "int", ")", ")", ")", ")" ]
https://github.com/xhzdeng/crpn/blob/a5aef0f80dbe486103123f740c634fb01e6cc9a1/caffe-fast-rcnn/python/caffe/coord_map.py#L172-L185
Kitware/VTK
5b4df4d90a4f31194d97d3c639dd38ea8f81e8b8
Utilities/Maintenance/FindNeededModules.py
python
get_users_file_headers
(path)
return headers
Get the set of VTK headers in a user's file or files with a common name in the path. :param path: The file name or the file name without the extension. :return: headers as the key and the corresponding filenames as the value.
Get the set of VTK headers in a user's file or files with a common name in the path. :param path: The file name or the file name without the extension. :return: headers as the key and the corresponding filenames as the value.
[ "Get", "the", "set", "of", "VTK", "headers", "in", "a", "user", "s", "file", "or", "files", "with", "a", "common", "name", "in", "the", "path", ".", ":", "param", "path", ":", "The", "file", "name", "or", "the", "file", "name", "without", "the", "extension", ".", ":", "return", ":", "headers", "as", "the", "key", "and", "the", "corresponding", "filenames", "as", "the", "value", "." ]
def get_users_file_headers(path): """ Get the set of VTK headers in a user's file or files with a common name in the path. :param path: The file name or the file name without the extension. :return: headers as the key and the corresponding filenames as the value. """ c = Constants() headers = collections.defaultdict(set) name, ext = os.path.splitext(path) if ext: if ext in c.valid_ext: if not os.path.isfile(path): raise Exception('No such file: ' + path) with open(path) as data: # Read the file looking for includes. for line in data: m = c.header_pattern.match(line.strip()) if m: # We have a header name, split it from its path (if the path exists). header_parts = os.path.split(m.group(1)) m = c.vtk_include_pattern.match(header_parts[1]) if m: headers[m.group(1)].add(os.path.split(path)[1]) else: raise Exception('Unrecognised extension:' + path) else: for ext in c.valid_ext: fn = name + ext if os.path.isfile(fn): with open(fn) as data: # Read the file looking for includes. for line in data: m = c.header_pattern.match(line.strip()) if m: # We have a header name, split it from its path (if the path exists). header_parts = os.path.split(m.group(1)) m = c.vtk_include_pattern.match(header_parts[1]) if m: headers[m.group(1)].add(os.path.split(path)[1]) return headers
[ "def", "get_users_file_headers", "(", "path", ")", ":", "c", "=", "Constants", "(", ")", "headers", "=", "collections", ".", "defaultdict", "(", "set", ")", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "if", "ext", ":", "if", "ext", "in", "c", ".", "valid_ext", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "raise", "Exception", "(", "'No such file: '", "+", "path", ")", "with", "open", "(", "path", ")", "as", "data", ":", "# Read the file looking for includes.", "for", "line", "in", "data", ":", "m", "=", "c", ".", "header_pattern", ".", "match", "(", "line", ".", "strip", "(", ")", ")", "if", "m", ":", "# We have a header name, split it from its path (if the path exists).", "header_parts", "=", "os", ".", "path", ".", "split", "(", "m", ".", "group", "(", "1", ")", ")", "m", "=", "c", ".", "vtk_include_pattern", ".", "match", "(", "header_parts", "[", "1", "]", ")", "if", "m", ":", "headers", "[", "m", ".", "group", "(", "1", ")", "]", ".", "add", "(", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", ")", "else", ":", "raise", "Exception", "(", "'Unrecognised extension:'", "+", "path", ")", "else", ":", "for", "ext", "in", "c", ".", "valid_ext", ":", "fn", "=", "name", "+", "ext", "if", "os", ".", "path", ".", "isfile", "(", "fn", ")", ":", "with", "open", "(", "fn", ")", "as", "data", ":", "# Read the file looking for includes.", "for", "line", "in", "data", ":", "m", "=", "c", ".", "header_pattern", ".", "match", "(", "line", ".", "strip", "(", ")", ")", "if", "m", ":", "# We have a header name, split it from its path (if the path exists).", "header_parts", "=", "os", ".", "path", ".", "split", "(", "m", ".", "group", "(", "1", ")", ")", "m", "=", "c", ".", "vtk_include_pattern", ".", "match", "(", "header_parts", "[", "1", "]", ")", "if", "m", ":", "headers", "[", "m", ".", "group", "(", "1", ")", "]", ".", "add", "(", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", ")", "return", "headers" ]
https://github.com/Kitware/VTK/blob/5b4df4d90a4f31194d97d3c639dd38ea8f81e8b8/Utilities/Maintenance/FindNeededModules.py#L86-L125
weichengkuo/DeepBox
c4f8c065b6a51cf296540cc453a44f0519aaacc9
caffe-fast-rcnn/scripts/cpp_lint.py
python
GetPreviousNonBlankLine
(clean_lines, linenum)
return ('', -1)
Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line.
Return the most recent non-blank line and its line number.
[ "Return", "the", "most", "recent", "non", "-", "blank", "line", "and", "its", "line", "number", "." ]
def GetPreviousNonBlankLine(clean_lines, linenum): """Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line. """ prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1)
[ "def", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", ":", "prevlinenum", "=", "linenum", "-", "1", "while", "prevlinenum", ">=", "0", ":", "prevline", "=", "clean_lines", ".", "elided", "[", "prevlinenum", "]", "if", "not", "IsBlankLine", "(", "prevline", ")", ":", "# if not a blank line...", "return", "(", "prevline", ",", "prevlinenum", ")", "prevlinenum", "-=", "1", "return", "(", "''", ",", "-", "1", ")" ]
https://github.com/weichengkuo/DeepBox/blob/c4f8c065b6a51cf296540cc453a44f0519aaacc9/caffe-fast-rcnn/scripts/cpp_lint.py#L3046-L3066
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2/__init__.py
python
parse_uri
(uri)
return (groups[1], groups[3], groups[4], groups[6], groups[8])
Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri)
Parses a URI using the regex given in Appendix B of RFC 3986.
[ "Parses", "a", "URI", "using", "the", "regex", "given", "in", "Appendix", "B", "of", "RFC", "3986", "." ]
def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8])
[ "def", "parse_uri", "(", "uri", ")", ":", "groups", "=", "URI", ".", "match", "(", "uri", ")", ".", "groups", "(", ")", "return", "(", "groups", "[", "1", "]", ",", "groups", "[", "3", "]", ",", "groups", "[", "4", "]", ",", "groups", "[", "6", "]", ",", "groups", "[", "8", "]", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/httplib2/python3/httplib2/__init__.py#L137-L143
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/distributed/fleet/base/fleet_base.py
python
Fleet.init
(self, role_maker=None, is_collective=False, strategy=None)
Initialize role_maker in Fleet. This function is responsible for the distributed architecture what you want to run your code behind. Args: role_maker (RoleMakerBase, optional): A ``RoleMakerBase`` containing the configuration of environment variables related to distributed training.If you did not initialize the rolemaker by yourself, it will be automatically initialized to PaddleRoleMaker. The default value is None. is_collective (Boolean, optional): A ``Boolean`` variable determines whether the program runs on the CPU or GPU. False means set distributed training using CPU, and True means GPU.The default value is False.The default value is False. strategy (DistributedStrategy): Extra properties for distributed training. For details, please refer to paddle.distributed.fleet.DistributedStrategy. Default: None. Returns: None Examples1: .. code-block:: python import paddle.distributed.fleet as fleet fleet.init() Examples2: .. code-block:: python import paddle.distributed.fleet as fleet fleet.init(is_collective=True) Examples3: .. code-block:: python import paddle.distributed.fleet as fleet role = fleet.PaddleCloudRoleMaker() fleet.init(role) Examples4: .. code-block:: python import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() fleet.init(strategy=strategy)
Initialize role_maker in Fleet.
[ "Initialize", "role_maker", "in", "Fleet", "." ]
def init(self, role_maker=None, is_collective=False, strategy=None): """ Initialize role_maker in Fleet. This function is responsible for the distributed architecture what you want to run your code behind. Args: role_maker (RoleMakerBase, optional): A ``RoleMakerBase`` containing the configuration of environment variables related to distributed training.If you did not initialize the rolemaker by yourself, it will be automatically initialized to PaddleRoleMaker. The default value is None. is_collective (Boolean, optional): A ``Boolean`` variable determines whether the program runs on the CPU or GPU. False means set distributed training using CPU, and True means GPU.The default value is False.The default value is False. strategy (DistributedStrategy): Extra properties for distributed training. For details, please refer to paddle.distributed.fleet.DistributedStrategy. Default: None. Returns: None Examples1: .. code-block:: python import paddle.distributed.fleet as fleet fleet.init() Examples2: .. code-block:: python import paddle.distributed.fleet as fleet fleet.init(is_collective=True) Examples3: .. code-block:: python import paddle.distributed.fleet as fleet role = fleet.PaddleCloudRoleMaker() fleet.init(role) Examples4: .. code-block:: python import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() fleet.init(strategy=strategy) """ if strategy is None: strategy = DistributedStrategy() self._user_defined_strategy = copy.deepcopy(strategy) if role_maker is None: if isinstance(is_collective, bool): self._is_collective = is_collective self._role_maker = PaddleCloudRoleMaker( is_collective=self._is_collective) else: raise ValueError( "`is_collective` should be instance of `bool`, but got {}". format(type(is_collective))) else: if isinstance(role_maker, RoleMakerBase): self._role_maker = role_maker self._is_collective = role_maker._is_collective else: raise ValueError( "`role_maker` should be subclass of `RoleMakerBase`, but got {}". format(type(role_maker))) self._role_maker._generate_role() import paddle.distributed.fleet as fleet fleet.util._set_role_maker(self._role_maker) self.strategy_compiler = StrategyCompiler() if self._role_maker._is_non_distributed() and self._is_collective: if paddle.fluid.core.is_compiled_with_cuda(): gpus_num = paddle.fluid.core.get_cuda_device_count() if gpus_num != 1: raise ValueError( "CUDA_VISIBLE_DEVICES shoule be set only 1 card if you use `python` to launch fleet program." ) if paddle.fluid.framework.in_dygraph_mode(): if self.worker_num() == 1: # if worker_num is 1, should construct default topology & hcg self._topology = tp.CommunicateTopology() self._hcg = tp.HybridCommunicateGroup(self._topology) return if parallel_helper._is_parallel_ctx_initialized(): warnings.warn( "The dygraph parallel environment has been initialized.") else: # FLAGS_nccl_nrings is used for dynamic graph multi-stream communication if "FLAGS_nccl_nrings" in os.environ: warnings.warn( "You have set the environment variable FLAGS_nccl_nrings " "outside the program, so the nccl_comm_num in " "DistributedStrategy will not take effect here.") else: os.environ["FLAGS_nccl_nrings"] = str( self._user_defined_strategy.nccl_comm_num) paddle.distributed.init_parallel_env() # hybrid parallel not support for npu/xpu if self._user_defined_strategy.heter_ccl_mode == False: # init hybrid parallel environment in dygraph if tp._HYBRID_PARALLEL_GROUP is None: self._init_hybrid_parallel_env() else: warnings.warn( "The dygraph hybrid parallel environment has been initialized." ) elif self._is_collective: use_sharding = self._user_defined_strategy.sharding # global group global_rank = self.worker_index() global_world_size = self.worker_num() # NOTE(wangxi): see sharding_optimizer global_ring_id = 3 if use_sharding else 0 global_ranks = list(range(global_world_size)) if tp._HYBRID_PARALLEL_GROUP is None: tp._CommunicateGroup() cg = tp._HYBRID_PARALLEL_GROUP self._hcg = cg cg.set_comm_group('global', global_rank, global_world_size, global_ring_id, global_ranks) use_tensor_parallel = self._user_defined_strategy.tensor_parallel use_mp = use_sharding or use_tensor_parallel # hybrid group if use_mp is False: return mp_degree_sharding = 1 mp_degree_tensor_parallel = 1 if use_sharding: sharding_configs = self._user_defined_strategy.sharding_configs mp_degree_sharding = int(sharding_configs['mp_degree']) if use_tensor_parallel: tensor_parallel_configs = self._user_defined_strategy.tensor_parallel_configs mp_degree_tensor_parallel = int(tensor_parallel_configs[ 'tensor_parallel_degree']) if use_sharding and use_tensor_parallel: assert mp_degree_sharding == mp_degree_tensor_parallel mp_degree = mp_degree_sharding if use_sharding else mp_degree_tensor_parallel if mp_degree > 1: assert global_world_size % mp_degree == 0 # NOTE(wangxi): mp_ring_id sync with sharding_optimizer.py _build_groups mp_ring_id = 0 mp_rank = global_rank % mp_degree mp_group_id = global_rank // mp_degree mp_group_ranks = [ idx for idx in global_ranks if idx // mp_degree == mp_group_id ] cg.set_comm_group('model', mp_rank, mp_degree, mp_ring_id, mp_group_ranks)
[ "def", "init", "(", "self", ",", "role_maker", "=", "None", ",", "is_collective", "=", "False", ",", "strategy", "=", "None", ")", ":", "if", "strategy", "is", "None", ":", "strategy", "=", "DistributedStrategy", "(", ")", "self", ".", "_user_defined_strategy", "=", "copy", ".", "deepcopy", "(", "strategy", ")", "if", "role_maker", "is", "None", ":", "if", "isinstance", "(", "is_collective", ",", "bool", ")", ":", "self", ".", "_is_collective", "=", "is_collective", "self", ".", "_role_maker", "=", "PaddleCloudRoleMaker", "(", "is_collective", "=", "self", ".", "_is_collective", ")", "else", ":", "raise", "ValueError", "(", "\"`is_collective` should be instance of `bool`, but got {}\"", ".", "format", "(", "type", "(", "is_collective", ")", ")", ")", "else", ":", "if", "isinstance", "(", "role_maker", ",", "RoleMakerBase", ")", ":", "self", ".", "_role_maker", "=", "role_maker", "self", ".", "_is_collective", "=", "role_maker", ".", "_is_collective", "else", ":", "raise", "ValueError", "(", "\"`role_maker` should be subclass of `RoleMakerBase`, but got {}\"", ".", "format", "(", "type", "(", "role_maker", ")", ")", ")", "self", ".", "_role_maker", ".", "_generate_role", "(", ")", "import", "paddle", ".", "distributed", ".", "fleet", "as", "fleet", "fleet", ".", "util", ".", "_set_role_maker", "(", "self", ".", "_role_maker", ")", "self", ".", "strategy_compiler", "=", "StrategyCompiler", "(", ")", "if", "self", ".", "_role_maker", ".", "_is_non_distributed", "(", ")", "and", "self", ".", "_is_collective", ":", "if", "paddle", ".", "fluid", ".", "core", ".", "is_compiled_with_cuda", "(", ")", ":", "gpus_num", "=", "paddle", ".", "fluid", ".", "core", ".", "get_cuda_device_count", "(", ")", "if", "gpus_num", "!=", "1", ":", "raise", "ValueError", "(", "\"CUDA_VISIBLE_DEVICES shoule be set only 1 card if you use `python` to launch fleet program.\"", ")", "if", "paddle", ".", "fluid", ".", "framework", ".", "in_dygraph_mode", "(", ")", ":", "if", "self", ".", "worker_num", "(", ")", "==", "1", ":", "# if worker_num is 1, should construct default topology & hcg", "self", ".", "_topology", "=", "tp", ".", "CommunicateTopology", "(", ")", "self", ".", "_hcg", "=", "tp", ".", "HybridCommunicateGroup", "(", "self", ".", "_topology", ")", "return", "if", "parallel_helper", ".", "_is_parallel_ctx_initialized", "(", ")", ":", "warnings", ".", "warn", "(", "\"The dygraph parallel environment has been initialized.\"", ")", "else", ":", "# FLAGS_nccl_nrings is used for dynamic graph multi-stream communication", "if", "\"FLAGS_nccl_nrings\"", "in", "os", ".", "environ", ":", "warnings", ".", "warn", "(", "\"You have set the environment variable FLAGS_nccl_nrings \"", "\"outside the program, so the nccl_comm_num in \"", "\"DistributedStrategy will not take effect here.\"", ")", "else", ":", "os", ".", "environ", "[", "\"FLAGS_nccl_nrings\"", "]", "=", "str", "(", "self", ".", "_user_defined_strategy", ".", "nccl_comm_num", ")", "paddle", ".", "distributed", ".", "init_parallel_env", "(", ")", "# hybrid parallel not support for npu/xpu", "if", "self", ".", "_user_defined_strategy", ".", "heter_ccl_mode", "==", "False", ":", "# init hybrid parallel environment in dygraph", "if", "tp", ".", "_HYBRID_PARALLEL_GROUP", "is", "None", ":", "self", ".", "_init_hybrid_parallel_env", "(", ")", "else", ":", "warnings", ".", "warn", "(", "\"The dygraph hybrid parallel environment has been initialized.\"", ")", "elif", "self", ".", "_is_collective", ":", "use_sharding", "=", "self", ".", "_user_defined_strategy", ".", "sharding", "# global group", "global_rank", "=", "self", ".", "worker_index", "(", ")", "global_world_size", "=", "self", ".", "worker_num", "(", ")", "# NOTE(wangxi): see sharding_optimizer", "global_ring_id", "=", "3", "if", "use_sharding", "else", "0", "global_ranks", "=", "list", "(", "range", "(", "global_world_size", ")", ")", "if", "tp", ".", "_HYBRID_PARALLEL_GROUP", "is", "None", ":", "tp", ".", "_CommunicateGroup", "(", ")", "cg", "=", "tp", ".", "_HYBRID_PARALLEL_GROUP", "self", ".", "_hcg", "=", "cg", "cg", ".", "set_comm_group", "(", "'global'", ",", "global_rank", ",", "global_world_size", ",", "global_ring_id", ",", "global_ranks", ")", "use_tensor_parallel", "=", "self", ".", "_user_defined_strategy", ".", "tensor_parallel", "use_mp", "=", "use_sharding", "or", "use_tensor_parallel", "# hybrid group", "if", "use_mp", "is", "False", ":", "return", "mp_degree_sharding", "=", "1", "mp_degree_tensor_parallel", "=", "1", "if", "use_sharding", ":", "sharding_configs", "=", "self", ".", "_user_defined_strategy", ".", "sharding_configs", "mp_degree_sharding", "=", "int", "(", "sharding_configs", "[", "'mp_degree'", "]", ")", "if", "use_tensor_parallel", ":", "tensor_parallel_configs", "=", "self", ".", "_user_defined_strategy", ".", "tensor_parallel_configs", "mp_degree_tensor_parallel", "=", "int", "(", "tensor_parallel_configs", "[", "'tensor_parallel_degree'", "]", ")", "if", "use_sharding", "and", "use_tensor_parallel", ":", "assert", "mp_degree_sharding", "==", "mp_degree_tensor_parallel", "mp_degree", "=", "mp_degree_sharding", "if", "use_sharding", "else", "mp_degree_tensor_parallel", "if", "mp_degree", ">", "1", ":", "assert", "global_world_size", "%", "mp_degree", "==", "0", "# NOTE(wangxi): mp_ring_id sync with sharding_optimizer.py _build_groups", "mp_ring_id", "=", "0", "mp_rank", "=", "global_rank", "%", "mp_degree", "mp_group_id", "=", "global_rank", "//", "mp_degree", "mp_group_ranks", "=", "[", "idx", "for", "idx", "in", "global_ranks", "if", "idx", "//", "mp_degree", "==", "mp_group_id", "]", "cg", ".", "set_comm_group", "(", "'model'", ",", "mp_rank", ",", "mp_degree", ",", "mp_ring_id", ",", "mp_group_ranks", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distributed/fleet/base/fleet_base.py#L170-L338
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
benchmarks/tensorexpr/attention.py
python
BahdanauAttention.forward
(self, att_query, att_keys, normalize_bias, linear_att)
return out
Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores
Calculate Bahdanau score
[ "Calculate", "Bahdanau", "score" ]
def forward(self, att_query, att_keys, normalize_bias, linear_att): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n return b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys + normalize_bias out = torch.tanh(sum_qk).matmul(linear_att) return out
[ "def", "forward", "(", "self", ",", "att_query", ",", "att_keys", ",", "normalize_bias", ",", "linear_att", ")", ":", "b", ",", "t_k", ",", "n", "=", "att_keys", ".", "size", "(", ")", "t_q", "=", "att_query", ".", "size", "(", "1", ")", "att_query", "=", "att_query", ".", "unsqueeze", "(", "2", ")", ".", "expand", "(", "b", ",", "t_q", ",", "t_k", ",", "n", ")", "att_keys", "=", "att_keys", ".", "unsqueeze", "(", "1", ")", ".", "expand", "(", "b", ",", "t_q", ",", "t_k", ",", "n", ")", "sum_qk", "=", "att_query", "+", "att_keys", "+", "normalize_bias", "out", "=", "torch", ".", "tanh", "(", "sum_qk", ")", ".", "matmul", "(", "linear_att", ")", "return", "out" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/benchmarks/tensorexpr/attention.py#L35-L52
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/tensor/math.py
python
erfinv_
(x, name=None)
return _C_ops.erfinv_(x)
r""" Inplace version of ``erfinv`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_erfinv`.
r""" Inplace version of ``erfinv`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_erfinv`.
[ "r", "Inplace", "version", "of", "erfinv", "API", "the", "output", "Tensor", "will", "be", "inplaced", "with", "input", "x", ".", "Please", "refer", "to", ":", "ref", ":", "api_tensor_erfinv", "." ]
def erfinv_(x, name=None): r""" Inplace version of ``erfinv`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_erfinv`. """ check_type(x, 'x', (paddle.Tensor, Variable), 'erfinv') return _C_ops.erfinv_(x)
[ "def", "erfinv_", "(", "x", ",", "name", "=", "None", ")", ":", "check_type", "(", "x", ",", "'x'", ",", "(", "paddle", ".", "Tensor", ",", "Variable", ")", ",", "'erfinv'", ")", "return", "_C_ops", ".", "erfinv_", "(", "x", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/tensor/math.py#L3431-L3437
PyMesh/PyMesh
384ba882b7558ba6e8653ed263c419226c22bddf
python/pymesh/meshutils/generate_tube.py
python
generate_tube
(p0, p1, r0_out, r1_out, r0_in, r1_in, num_segments=16, with_quad=False)
return form_mesh(vertices, faces)
Generate generalized tube (i.e. cylinder with an axial hole). Args: p0 (``np.ndarray``): Bottom center. p1 (``np.ndarray``): Top center. r0_out (``float``): Bottom outer radius. r1_out (``float``): Top outer radius. r0_in (``float``): Bottom inner radius. r1_in (``float``): Top inner radius. num_segments (``int``): Number of segments to a discrete circle consists. with_quad (``bool``): Output a quad mesh instead. Returns: A generalized tube :py:class:`Mesh`.
Generate generalized tube (i.e. cylinder with an axial hole).
[ "Generate", "generalized", "tube", "(", "i", ".", "e", ".", "cylinder", "with", "an", "axial", "hole", ")", "." ]
def generate_tube(p0, p1, r0_out, r1_out, r0_in, r1_in, num_segments=16, with_quad=False): """ Generate generalized tube (i.e. cylinder with an axial hole). Args: p0 (``np.ndarray``): Bottom center. p1 (``np.ndarray``): Top center. r0_out (``float``): Bottom outer radius. r1_out (``float``): Top outer radius. r0_in (``float``): Bottom inner radius. r1_in (``float``): Top inner radius. num_segments (``int``): Number of segments to a discrete circle consists. with_quad (``bool``): Output a quad mesh instead. Returns: A generalized tube :py:class:`Mesh`. """ assert(len(p0) == 3) assert(len(p1) == 3) Z = np.array([0, 0, 1], dtype=float) p0 = np.array(p0, dtype=float) p1 = np.array(p1, dtype=float) axis = p1 - p0 l = norm(axis) if l <= 1e-12: axis=Z N = num_segments angles = [2*math.pi*i/float(N) for i in range(N)] rim = np.array([[math.cos(theta), math.sin(theta), 0.0] for theta in angles]) rot = Quaternion.fromData(Z, axis).to_matrix() bottom_outer_rim = np.dot(rot, rim.T).T * r0_out + p0 bottom_inner_rim = np.dot(rot, rim.T).T * r0_in + p0 top_outer_rim = np.dot(rot, rim.T).T * r1_out + p1 top_inner_rim = np.dot(rot, rim.T).T * r1_in + p1 vertices = np.vstack([ bottom_outer_rim, bottom_inner_rim, top_outer_rim, top_inner_rim]) if with_quad: top = np.array([ [2*N+i, 2*N+(i+1)%N, 3*N+(i+1)%N, 3*N+i] for i in range(N)]) bottom = np.array([ [(i+1)%N, i, N+i, N+(i+1)%N] for i in range(N)]) inner = np.array([ [3*N+i, 3*N+(i+1)%N, N+(i+1)%N, N+i] for i in range(N)]) outer = np.array([ [ i, (i+1)%N, 2*N+(i+1)%N, 2*N+i] for i in range(N)]) faces = np.vstack([top, bottom, inner, outer]) else: top = np.array([ [ [2*N+i, 2*N+(i+1)%N, 3*N+i], [3*N+i, 2*N+(i+1)%N, 3*N+(i+1)%N] ] for i in range(N)]) bottom = np.array([ [ [(i+1)%N, i, N+i], [(i+1)%N, N+i, N+(i+1)%N] ] for i in range(N)]) inner = np.array([ [ [3*N+i, 3*N+(i+1)%N, N+i], [ N+i, 3*N+(i+1)%N, N+(i+1)%N] ] for i in range(N)]) outer = np.array([ [ [ i, (i+1)%N, 2*N+i], [2*N+i, (i+1)%N, 2*N+(i+1)%N] ] for i in range(N)]) faces = np.vstack([ top.reshape((-1, 3)), bottom.reshape((-1, 3)), inner.reshape((-1, 3)), outer.reshape((-1, 3)) ]) return form_mesh(vertices, faces)
[ "def", "generate_tube", "(", "p0", ",", "p1", ",", "r0_out", ",", "r1_out", ",", "r0_in", ",", "r1_in", ",", "num_segments", "=", "16", ",", "with_quad", "=", "False", ")", ":", "assert", "(", "len", "(", "p0", ")", "==", "3", ")", "assert", "(", "len", "(", "p1", ")", "==", "3", ")", "Z", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "1", "]", ",", "dtype", "=", "float", ")", "p0", "=", "np", ".", "array", "(", "p0", ",", "dtype", "=", "float", ")", "p1", "=", "np", ".", "array", "(", "p1", ",", "dtype", "=", "float", ")", "axis", "=", "p1", "-", "p0", "l", "=", "norm", "(", "axis", ")", "if", "l", "<=", "1e-12", ":", "axis", "=", "Z", "N", "=", "num_segments", "angles", "=", "[", "2", "*", "math", ".", "pi", "*", "i", "/", "float", "(", "N", ")", "for", "i", "in", "range", "(", "N", ")", "]", "rim", "=", "np", ".", "array", "(", "[", "[", "math", ".", "cos", "(", "theta", ")", ",", "math", ".", "sin", "(", "theta", ")", ",", "0.0", "]", "for", "theta", "in", "angles", "]", ")", "rot", "=", "Quaternion", ".", "fromData", "(", "Z", ",", "axis", ")", ".", "to_matrix", "(", ")", "bottom_outer_rim", "=", "np", ".", "dot", "(", "rot", ",", "rim", ".", "T", ")", ".", "T", "*", "r0_out", "+", "p0", "bottom_inner_rim", "=", "np", ".", "dot", "(", "rot", ",", "rim", ".", "T", ")", ".", "T", "*", "r0_in", "+", "p0", "top_outer_rim", "=", "np", ".", "dot", "(", "rot", ",", "rim", ".", "T", ")", ".", "T", "*", "r1_out", "+", "p1", "top_inner_rim", "=", "np", ".", "dot", "(", "rot", ",", "rim", ".", "T", ")", ".", "T", "*", "r1_in", "+", "p1", "vertices", "=", "np", ".", "vstack", "(", "[", "bottom_outer_rim", ",", "bottom_inner_rim", ",", "top_outer_rim", ",", "top_inner_rim", "]", ")", "if", "with_quad", ":", "top", "=", "np", ".", "array", "(", "[", "[", "2", "*", "N", "+", "i", ",", "2", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "3", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "3", "*", "N", "+", "i", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "bottom", "=", "np", ".", "array", "(", "[", "[", "(", "i", "+", "1", ")", "%", "N", ",", "i", ",", "N", "+", "i", ",", "N", "+", "(", "i", "+", "1", ")", "%", "N", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "inner", "=", "np", ".", "array", "(", "[", "[", "3", "*", "N", "+", "i", ",", "3", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "N", "+", "i", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "outer", "=", "np", ".", "array", "(", "[", "[", "i", ",", "(", "i", "+", "1", ")", "%", "N", ",", "2", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "2", "*", "N", "+", "i", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "faces", "=", "np", ".", "vstack", "(", "[", "top", ",", "bottom", ",", "inner", ",", "outer", "]", ")", "else", ":", "top", "=", "np", ".", "array", "(", "[", "[", "[", "2", "*", "N", "+", "i", ",", "2", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "3", "*", "N", "+", "i", "]", ",", "[", "3", "*", "N", "+", "i", ",", "2", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "3", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", "]", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "bottom", "=", "np", ".", "array", "(", "[", "[", "[", "(", "i", "+", "1", ")", "%", "N", ",", "i", ",", "N", "+", "i", "]", ",", "[", "(", "i", "+", "1", ")", "%", "N", ",", "N", "+", "i", ",", "N", "+", "(", "i", "+", "1", ")", "%", "N", "]", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "inner", "=", "np", ".", "array", "(", "[", "[", "[", "3", "*", "N", "+", "i", ",", "3", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "N", "+", "i", "]", ",", "[", "N", "+", "i", ",", "3", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", ",", "N", "+", "(", "i", "+", "1", ")", "%", "N", "]", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "outer", "=", "np", ".", "array", "(", "[", "[", "[", "i", ",", "(", "i", "+", "1", ")", "%", "N", ",", "2", "*", "N", "+", "i", "]", ",", "[", "2", "*", "N", "+", "i", ",", "(", "i", "+", "1", ")", "%", "N", ",", "2", "*", "N", "+", "(", "i", "+", "1", ")", "%", "N", "]", "]", "for", "i", "in", "range", "(", "N", ")", "]", ")", "faces", "=", "np", ".", "vstack", "(", "[", "top", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", ",", "bottom", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", ",", "inner", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", ",", "outer", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "]", ")", "return", "form_mesh", "(", "vertices", ",", "faces", ")" ]
https://github.com/PyMesh/PyMesh/blob/384ba882b7558ba6e8653ed263c419226c22bddf/python/pymesh/meshutils/generate_tube.py#L7-L89
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_controls.py
python
TextAttr.HasLineSpacing
(*args, **kwargs)
return _controls_.TextAttr_HasLineSpacing(*args, **kwargs)
HasLineSpacing(self) -> bool
HasLineSpacing(self) -> bool
[ "HasLineSpacing", "(", "self", ")", "-", ">", "bool" ]
def HasLineSpacing(*args, **kwargs): """HasLineSpacing(self) -> bool""" return _controls_.TextAttr_HasLineSpacing(*args, **kwargs)
[ "def", "HasLineSpacing", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "TextAttr_HasLineSpacing", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L1836-L1838
Yelp/MOE
5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c
moe/optimal_learning/python/data_containers.py
python
HistoricalData.__str__
(self, pretty_print=True)
String representation of this HistoricalData object. pretty-print'ing produces output that is easily read by humans. Disabling it prints the member arrays to the screen in full precision; this is convenient for pasting into C++ or other debugging purposes. :param pretty_print: enable pretty-printing for formatted, human-readable output :type pretty_print: bool :return: string representation :rtype: string
String representation of this HistoricalData object.
[ "String", "representation", "of", "this", "HistoricalData", "object", "." ]
def __str__(self, pretty_print=True): """String representation of this HistoricalData object. pretty-print'ing produces output that is easily read by humans. Disabling it prints the member arrays to the screen in full precision; this is convenient for pasting into C++ or other debugging purposes. :param pretty_print: enable pretty-printing for formatted, human-readable output :type pretty_print: bool :return: string representation :rtype: string """ if pretty_print: sample_point_list = self.to_list_of_sample_points() return pprint.pformat(sample_point_list) else: out_string = repr(self._points_sampled) + '\n' out_string += repr(self._points_sampled_value) + '\n' out_string += repr(self._points_sampled_noise_variance) return out_string
[ "def", "__str__", "(", "self", ",", "pretty_print", "=", "True", ")", ":", "if", "pretty_print", ":", "sample_point_list", "=", "self", ".", "to_list_of_sample_points", "(", ")", "return", "pprint", ".", "pformat", "(", "sample_point_list", ")", "else", ":", "out_string", "=", "repr", "(", "self", ".", "_points_sampled", ")", "+", "'\\n'", "out_string", "+=", "repr", "(", "self", ".", "_points_sampled_value", ")", "+", "'\\n'", "out_string", "+=", "repr", "(", "self", ".", "_points_sampled_noise_variance", ")", "return", "out_string" ]
https://github.com/Yelp/MOE/blob/5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c/moe/optimal_learning/python/data_containers.py#L131-L151
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
caffe2/python/core.py
python
ExecutionStep.SetReportNet
(self, report_net, report_interval)
DEPRECATED. Use RunEveryMillis instead.
DEPRECATED. Use RunEveryMillis instead.
[ "DEPRECATED", ".", "Use", "RunEveryMillis", "instead", "." ]
def SetReportNet(self, report_net, report_interval): """ DEPRECATED. Use RunEveryMillis instead. """ self._assert_can_mutate() _add_net_to_dict(self._net_dict, report_net) self._step.report_net = get_net_name(report_net) self._step.report_interval = report_interval
[ "def", "SetReportNet", "(", "self", ",", "report_net", ",", "report_interval", ")", ":", "self", ".", "_assert_can_mutate", "(", ")", "_add_net_to_dict", "(", "self", ".", "_net_dict", ",", "report_net", ")", "self", ".", "_step", ".", "report_net", "=", "get_net_name", "(", "report_net", ")", "self", ".", "_step", ".", "report_interval", "=", "report_interval" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/core.py#L2765-L2770
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/docutils/utils/math/math2html.py
python
NumberCounter.reset
(self)
Reset the counter.
Reset the counter.
[ "Reset", "the", "counter", "." ]
def reset(self): "Reset the counter." self.value = 0
[ "def", "reset", "(", "self", ")", ":", "self", ".", "value", "=", "0" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/docutils/utils/math/math2html.py#L3251-L3253
vslavik/poedit
f7a9daa0a10037e090aa0a86f5ce0f24ececdf6a
deps/boost/libs/predef/tools/ci/build_log.py
python
BuildOutputProcessor.add_input
(self, input)
Add a single build XML output file to our data.
Add a single build XML output file to our data.
[ "Add", "a", "single", "build", "XML", "output", "file", "to", "our", "data", "." ]
def add_input(self, input): ''' Add a single build XML output file to our data. ''' events = xml.dom.pulldom.parse(input) context = [] for (event,node) in events: if event == xml.dom.pulldom.START_ELEMENT: context.append(node) if node.nodeType == xml.dom.Node.ELEMENT_NODE: x_f = self.x_name_(*context) if x_f: events.expandNode(node) # expanding eats the end element, hence walking us out one level context.pop() # call handler (x_f[1])(node) elif event == xml.dom.pulldom.END_ELEMENT: context.pop()
[ "def", "add_input", "(", "self", ",", "input", ")", ":", "events", "=", "xml", ".", "dom", ".", "pulldom", ".", "parse", "(", "input", ")", "context", "=", "[", "]", "for", "(", "event", ",", "node", ")", "in", "events", ":", "if", "event", "==", "xml", ".", "dom", ".", "pulldom", ".", "START_ELEMENT", ":", "context", ".", "append", "(", "node", ")", "if", "node", ".", "nodeType", "==", "xml", ".", "dom", ".", "Node", ".", "ELEMENT_NODE", ":", "x_f", "=", "self", ".", "x_name_", "(", "*", "context", ")", "if", "x_f", ":", "events", ".", "expandNode", "(", "node", ")", "# expanding eats the end element, hence walking us out one level", "context", ".", "pop", "(", ")", "# call handler", "(", "x_f", "[", "1", "]", ")", "(", "node", ")", "elif", "event", "==", "xml", ".", "dom", ".", "pulldom", ".", "END_ELEMENT", ":", "context", ".", "pop", "(", ")" ]
https://github.com/vslavik/poedit/blob/f7a9daa0a10037e090aa0a86f5ce0f24ececdf6a/deps/boost/libs/predef/tools/ci/build_log.py#L85-L103
Kitware/ParaView
f760af9124ff4634b23ebbeab95a4f56e0261955
Wrapping/Python/paraview/servermanager.py
python
InputProperty.__setitem__
(self, idx, value)
Given a list or tuple of values, sets a slice of values [min, max)
Given a list or tuple of values, sets a slice of values [min, max)
[ "Given", "a", "list", "or", "tuple", "of", "values", "sets", "a", "slice", "of", "values", "[", "min", "max", ")" ]
def __setitem__(self, idx, value): """Given a list or tuple of values, sets a slice of values [min, max)""" if isinstance(idx, slice): indices = idx.indices(len(self)) for i, j in zip(range(*indices), value): op = value[i-min] self.SMProperty.SetInputConnection(i, op.SMProxy, op.Port) self._UpdateProperty() elif idx >= len(self) or idx < 0: raise IndexError else: self.SMProperty.SetInputConnection(idx, value.SMProxy, value.Port) self._UpdateProperty()
[ "def", "__setitem__", "(", "self", ",", "idx", ",", "value", ")", ":", "if", "isinstance", "(", "idx", ",", "slice", ")", ":", "indices", "=", "idx", ".", "indices", "(", "len", "(", "self", ")", ")", "for", "i", ",", "j", "in", "zip", "(", "range", "(", "*", "indices", ")", ",", "value", ")", ":", "op", "=", "value", "[", "i", "-", "min", "]", "self", ".", "SMProperty", ".", "SetInputConnection", "(", "i", ",", "op", ".", "SMProxy", ",", "op", ".", "Port", ")", "self", ".", "_UpdateProperty", "(", ")", "elif", "idx", ">=", "len", "(", "self", ")", "or", "idx", "<", "0", ":", "raise", "IndexError", "else", ":", "self", ".", "SMProperty", ".", "SetInputConnection", "(", "idx", ",", "value", ".", "SMProxy", ",", "value", ".", "Port", ")", "self", ".", "_UpdateProperty", "(", ")" ]
https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Wrapping/Python/paraview/servermanager.py#L1412-L1424
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mailbox.py
python
Mailbox.__init__
(self, path, factory=None, create=True)
Initialize a Mailbox instance.
Initialize a Mailbox instance.
[ "Initialize", "a", "Mailbox", "instance", "." ]
def __init__(self, path, factory=None, create=True): """Initialize a Mailbox instance.""" self._path = os.path.abspath(os.path.expanduser(path)) self._factory = factory
[ "def", "__init__", "(", "self", ",", "path", ",", "factory", "=", "None", ",", "create", "=", "True", ")", ":", "self", ".", "_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", "self", ".", "_factory", "=", "factory" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mailbox.py#L36-L39
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/dataview.py
python
DataViewEvent.SetCache
(*args, **kwargs)
return _dataview.DataViewEvent_SetCache(*args, **kwargs)
SetCache(self, int from, int to)
SetCache(self, int from, int to)
[ "SetCache", "(", "self", "int", "from", "int", "to", ")" ]
def SetCache(*args, **kwargs): """SetCache(self, int from, int to)""" return _dataview.DataViewEvent_SetCache(*args, **kwargs)
[ "def", "SetCache", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_dataview", ".", "DataViewEvent_SetCache", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/dataview.py#L1964-L1966
SpenceKonde/megaTinyCore
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
megaavr/tools/libs/serial/tools/list_ports_common.py
python
list_links
(devices)
return links
\ search all /dev devices and look for symlinks to known ports already listed in devices.
\ search all /dev devices and look for symlinks to known ports already listed in devices.
[ "\\", "search", "all", "/", "dev", "devices", "and", "look", "for", "symlinks", "to", "known", "ports", "already", "listed", "in", "devices", "." ]
def list_links(devices): """\ search all /dev devices and look for symlinks to known ports already listed in devices. """ links = [] for device in glob.glob('/dev/*'): if os.path.islink(device) and os.path.realpath(device) in devices: links.append(device) return links
[ "def", "list_links", "(", "devices", ")", ":", "links", "=", "[", "]", "for", "device", "in", "glob", ".", "glob", "(", "'/dev/*'", ")", ":", "if", "os", ".", "path", ".", "islink", "(", "device", ")", "and", "os", ".", "path", ".", "realpath", "(", "device", ")", "in", "devices", ":", "links", ".", "append", "(", "device", ")", "return", "links" ]
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/serial/tools/list_ports_common.py#L94-L103
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/summary/event_accumulator.py
python
EventAccumulator.Histograms
(self, tag)
return self._histograms.Items(tag)
Given a summary tag, return all associated histograms. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `HistogramEvent`s.
Given a summary tag, return all associated histograms.
[ "Given", "a", "summary", "tag", "return", "all", "associated", "histograms", "." ]
def Histograms(self, tag): """Given a summary tag, return all associated histograms. Args: tag: A string tag associated with the events. Raises: KeyError: If the tag is not found. Returns: An array of `HistogramEvent`s. """ return self._histograms.Items(tag)
[ "def", "Histograms", "(", "self", ",", "tag", ")", ":", "return", "self", ".", "_histograms", ".", "Items", "(", "tag", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/summary/event_accumulator.py#L327-L339
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftfunctions/move.py
python
move_vertex
(object, vertex_index, vector)
Needed for SubObjects modifiers. Implemented by Dion Moult during 0.19 dev cycle (works only with Draft Wire).
Needed for SubObjects modifiers. Implemented by Dion Moult during 0.19 dev cycle (works only with Draft Wire).
[ "Needed", "for", "SubObjects", "modifiers", ".", "Implemented", "by", "Dion", "Moult", "during", "0", ".", "19", "dev", "cycle", "(", "works", "only", "with", "Draft", "Wire", ")", "." ]
def move_vertex(object, vertex_index, vector): """ Needed for SubObjects modifiers. Implemented by Dion Moult during 0.19 dev cycle (works only with Draft Wire). """ points = object.Points points[vertex_index] = points[vertex_index].add(vector) object.Points = points
[ "def", "move_vertex", "(", "object", ",", "vertex_index", ",", "vector", ")", ":", "points", "=", "object", ".", "Points", "points", "[", "vertex_index", "]", "=", "points", "[", "vertex_index", "]", ".", "add", "(", "vector", ")", "object", ".", "Points", "=", "points" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftfunctions/move.py#L164-L171
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/profiler/pprof_profiler.py
python
Samples.add
(self, datum, location_ids)
Adds a sample data point. Args: datum: `ProfileDatum` to add a sample for. location_ids: List of numberic location ids for this sample.
Adds a sample data point.
[ "Adds", "a", "sample", "data", "point", "." ]
def add(self, datum, location_ids): """Adds a sample data point. Args: datum: `ProfileDatum` to add a sample for. location_ids: List of numberic location ids for this sample. """ node_name = datum.node_exec_stats.node_name if node_name in self._node_name_to_sample: sample = self._node_name_to_sample[node_name] sample.location_id.extend(location_ids) else: sample = profile_pb2.Sample() # Sample stores 3 values: count, all_time, op_time sample.value.extend([0, 0, 0]) label = sample.label.add() label.key = self._string_table.index_of('node_name') label.str = self._string_table.index_of(node_name) label = sample.label.add() label.key = self._string_table.index_of('op_type') label.str = self._string_table.index_of(datum.op_type) self._node_name_to_sample[node_name] = sample sample.value[0] += 1 sample.value[1] += datum.node_exec_stats.all_end_rel_micros sample.value[2] += ( datum.node_exec_stats.op_end_rel_micros - datum.node_exec_stats.op_start_rel_micros)
[ "def", "add", "(", "self", ",", "datum", ",", "location_ids", ")", ":", "node_name", "=", "datum", ".", "node_exec_stats", ".", "node_name", "if", "node_name", "in", "self", ".", "_node_name_to_sample", ":", "sample", "=", "self", ".", "_node_name_to_sample", "[", "node_name", "]", "sample", ".", "location_id", ".", "extend", "(", "location_ids", ")", "else", ":", "sample", "=", "profile_pb2", ".", "Sample", "(", ")", "# Sample stores 3 values: count, all_time, op_time", "sample", ".", "value", ".", "extend", "(", "[", "0", ",", "0", ",", "0", "]", ")", "label", "=", "sample", ".", "label", ".", "add", "(", ")", "label", ".", "key", "=", "self", ".", "_string_table", ".", "index_of", "(", "'node_name'", ")", "label", ".", "str", "=", "self", ".", "_string_table", ".", "index_of", "(", "node_name", ")", "label", "=", "sample", ".", "label", ".", "add", "(", ")", "label", ".", "key", "=", "self", ".", "_string_table", ".", "index_of", "(", "'op_type'", ")", "label", ".", "str", "=", "self", ".", "_string_table", ".", "index_of", "(", "datum", ".", "op_type", ")", "self", ".", "_node_name_to_sample", "[", "node_name", "]", "=", "sample", "sample", ".", "value", "[", "0", "]", "+=", "1", "sample", ".", "value", "[", "1", "]", "+=", "datum", ".", "node_exec_stats", ".", "all_end_rel_micros", "sample", ".", "value", "[", "2", "]", "+=", "(", "datum", ".", "node_exec_stats", ".", "op_end_rel_micros", "-", "datum", ".", "node_exec_stats", ".", "op_start_rel_micros", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/profiler/pprof_profiler.py#L223-L251
klzgrad/naiveproxy
ed2c513637c77b18721fe428d7ed395b4d284c83
src/build/android/pylib/results/flakiness_dashboard/json_results_generator.py
python
ConvertTrieToFlatPaths
(trie, prefix=None)
return result
Flattens the trie of paths, prepending a prefix to each.
Flattens the trie of paths, prepending a prefix to each.
[ "Flattens", "the", "trie", "of", "paths", "prepending", "a", "prefix", "to", "each", "." ]
def ConvertTrieToFlatPaths(trie, prefix=None): """Flattens the trie of paths, prepending a prefix to each.""" result = {} for name, data in trie.items(): if prefix: name = prefix + '/' + name if len(data) != 0 and not 'results' in data: result.update(ConvertTrieToFlatPaths(data, name)) else: result[name] = data return result
[ "def", "ConvertTrieToFlatPaths", "(", "trie", ",", "prefix", "=", "None", ")", ":", "result", "=", "{", "}", "for", "name", ",", "data", "in", "trie", ".", "items", "(", ")", ":", "if", "prefix", ":", "name", "=", "prefix", "+", "'/'", "+", "name", "if", "len", "(", "data", ")", "!=", "0", "and", "not", "'results'", "in", "data", ":", "result", ".", "update", "(", "ConvertTrieToFlatPaths", "(", "data", ",", "name", ")", ")", "else", ":", "result", "[", "name", "]", "=", "data", "return", "result" ]
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/pylib/results/flakiness_dashboard/json_results_generator.py#L50-L62
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/customtreectrl.py
python
GenericTreeItem.GetCurrentCheckedImage
(self)
Returns the current item check image. :return: An integer index that can be used to retrieve the item check image inside a :class:`ImageList`.
Returns the current item check image.
[ "Returns", "the", "current", "item", "check", "image", "." ]
def GetCurrentCheckedImage(self): """ Returns the current item check image. :return: An integer index that can be used to retrieve the item check image inside a :class:`ImageList`. """ if self._type == 0: return None checked = self.IsChecked() if checked > 0: if self._type == 1: # Checkbox if checked == wx.CHK_CHECKED: return self._checkedimages[TreeItemIcon_Checked] else: return self._checkedimages[TreeItemIcon_Undetermined] else: # Radiobutton return self._checkedimages[TreeItemIcon_Flagged] else: if self._type == 1: # Checkbox return self._checkedimages[TreeItemIcon_NotChecked] else: # Radiobutton return self._checkedimages[TreeItemIcon_NotFlagged]
[ "def", "GetCurrentCheckedImage", "(", "self", ")", ":", "if", "self", ".", "_type", "==", "0", ":", "return", "None", "checked", "=", "self", ".", "IsChecked", "(", ")", "if", "checked", ">", "0", ":", "if", "self", ".", "_type", "==", "1", ":", "# Checkbox", "if", "checked", "==", "wx", ".", "CHK_CHECKED", ":", "return", "self", ".", "_checkedimages", "[", "TreeItemIcon_Checked", "]", "else", ":", "return", "self", ".", "_checkedimages", "[", "TreeItemIcon_Undetermined", "]", "else", ":", "# Radiobutton", "return", "self", ".", "_checkedimages", "[", "TreeItemIcon_Flagged", "]", "else", ":", "if", "self", ".", "_type", "==", "1", ":", "# Checkbox", "return", "self", ".", "_checkedimages", "[", "TreeItemIcon_NotChecked", "]", "else", ":", "# Radiobutton", "return", "self", ".", "_checkedimages", "[", "TreeItemIcon_NotFlagged", "]" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/customtreectrl.py#L2602-L2627
Dovyski/cvui
d1b40267bdee34fcc193a375911415222f1409b3
cvui.py
python
printf
(theWhere, theX, theY, theFontScale, theColor, theFmt)
Display a piece of text that can be formated using `C stdio's printf()` style. For instance if you want to display text mixed with numbers, you can use: ``` printf(frame, 10, 15, 0.4, 0xff0000, 'Text: %d and %f', 7, 3.1415) ``` Parameters ---------- theWhere: np.ndarray image/frame where the component should be rendered. theX: int position X where the component should be placed. theY: int position Y where the component should be placed. theFontScale: float size of the text. theColor: uint color of the text in the format `0xRRGGBB`, e.g. `0xff0000` for red. theFmt: str formating string as it would be supplied for `stdio's printf()`, e.g. `'Text: %d and %f', 7, 3.1415`. See Also ---------- text()
Display a piece of text that can be formated using `C stdio's printf()` style. For instance if you want to display text mixed with numbers, you can use:
[ "Display", "a", "piece", "of", "text", "that", "can", "be", "formated", "using", "C", "stdio", "s", "printf", "()", "style", ".", "For", "instance", "if", "you", "want", "to", "display", "text", "mixed", "with", "numbers", "you", "can", "use", ":" ]
def printf(theWhere, theX, theY, theFontScale, theColor, theFmt): """ Display a piece of text that can be formated using `C stdio's printf()` style. For instance if you want to display text mixed with numbers, you can use: ``` printf(frame, 10, 15, 0.4, 0xff0000, 'Text: %d and %f', 7, 3.1415) ``` Parameters ---------- theWhere: np.ndarray image/frame where the component should be rendered. theX: int position X where the component should be placed. theY: int position Y where the component should be placed. theFontScale: float size of the text. theColor: uint color of the text in the format `0xRRGGBB`, e.g. `0xff0000` for red. theFmt: str formating string as it would be supplied for `stdio's printf()`, e.g. `'Text: %d and %f', 7, 3.1415`. See Also ---------- text() """ print('This is wrapper function to help code autocompletion.')
[ "def", "printf", "(", "theWhere", ",", "theX", ",", "theY", ",", "theFontScale", ",", "theColor", ",", "theFmt", ")", ":", "print", "(", "'This is wrapper function to help code autocompletion.'", ")" ]
https://github.com/Dovyski/cvui/blob/d1b40267bdee34fcc193a375911415222f1409b3/cvui.py#L1496-L1524
crosslife/OpenBird
9e0198a1a2295f03fa1e8676e216e22c9c7d380b
cocos2d/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py
python
Cursor.objc_type_encoding
(self)
return self._objc_type_encoding
Return the Objective-C type encoding as a str.
Return the Objective-C type encoding as a str.
[ "Return", "the", "Objective", "-", "C", "type", "encoding", "as", "a", "str", "." ]
def objc_type_encoding(self): """Return the Objective-C type encoding as a str.""" if not hasattr(self, '_objc_type_encoding'): self._objc_type_encoding = \ conf.lib.clang_getDeclObjCTypeEncoding(self) return self._objc_type_encoding
[ "def", "objc_type_encoding", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_objc_type_encoding'", ")", ":", "self", ".", "_objc_type_encoding", "=", "conf", ".", "lib", ".", "clang_getDeclObjCTypeEncoding", "(", "self", ")", "return", "self", ".", "_objc_type_encoding" ]
https://github.com/crosslife/OpenBird/blob/9e0198a1a2295f03fa1e8676e216e22c9c7d380b/cocos2d/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py#L1235-L1241
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/boto3/resources/response.py
python
build_empty_response
(search_path, operation_name, service_model)
return response
Creates an appropriate empty response for the type that is expected, based on the service model's shape type. For example, a value that is normally a list would then return an empty list. A structure would return an empty dict, and a number would return None. :type search_path: string :param search_path: JMESPath expression to search in the response :type operation_name: string :param operation_name: Name of the underlying service operation. :type service_model: :ref:`botocore.model.ServiceModel` :param service_model: The Botocore service model :rtype: dict, list, or None :return: An appropriate empty value
Creates an appropriate empty response for the type that is expected, based on the service model's shape type. For example, a value that is normally a list would then return an empty list. A structure would return an empty dict, and a number would return None.
[ "Creates", "an", "appropriate", "empty", "response", "for", "the", "type", "that", "is", "expected", "based", "on", "the", "service", "model", "s", "shape", "type", ".", "For", "example", "a", "value", "that", "is", "normally", "a", "list", "would", "then", "return", "an", "empty", "list", ".", "A", "structure", "would", "return", "an", "empty", "dict", "and", "a", "number", "would", "return", "None", "." ]
def build_empty_response(search_path, operation_name, service_model): """ Creates an appropriate empty response for the type that is expected, based on the service model's shape type. For example, a value that is normally a list would then return an empty list. A structure would return an empty dict, and a number would return None. :type search_path: string :param search_path: JMESPath expression to search in the response :type operation_name: string :param operation_name: Name of the underlying service operation. :type service_model: :ref:`botocore.model.ServiceModel` :param service_model: The Botocore service model :rtype: dict, list, or None :return: An appropriate empty value """ response = None operation_model = service_model.operation_model(operation_name) shape = operation_model.output_shape if search_path: # Walk the search path and find the final shape. For example, given # a path of ``foo.bar[0].baz``, we first find the shape for ``foo``, # then the shape for ``bar`` (ignoring the indexing), and finally # the shape for ``baz``. for item in search_path.split('.'): item = item.strip('[0123456789]$') if shape.type_name == 'structure': shape = shape.members[item] elif shape.type_name == 'list': shape = shape.member else: raise NotImplementedError( 'Search path hits shape type {0} from {1}'.format( shape.type_name, item)) # Anything not handled here is set to None if shape.type_name == 'structure': response = {} elif shape.type_name == 'list': response = [] elif shape.type_name == 'map': response = {} return response
[ "def", "build_empty_response", "(", "search_path", ",", "operation_name", ",", "service_model", ")", ":", "response", "=", "None", "operation_model", "=", "service_model", ".", "operation_model", "(", "operation_name", ")", "shape", "=", "operation_model", ".", "output_shape", "if", "search_path", ":", "# Walk the search path and find the final shape. For example, given", "# a path of ``foo.bar[0].baz``, we first find the shape for ``foo``,", "# then the shape for ``bar`` (ignoring the indexing), and finally", "# the shape for ``baz``.", "for", "item", "in", "search_path", ".", "split", "(", "'.'", ")", ":", "item", "=", "item", ".", "strip", "(", "'[0123456789]$'", ")", "if", "shape", ".", "type_name", "==", "'structure'", ":", "shape", "=", "shape", ".", "members", "[", "item", "]", "elif", "shape", ".", "type_name", "==", "'list'", ":", "shape", "=", "shape", ".", "member", "else", ":", "raise", "NotImplementedError", "(", "'Search path hits shape type {0} from {1}'", ".", "format", "(", "shape", ".", "type_name", ",", "item", ")", ")", "# Anything not handled here is set to None", "if", "shape", ".", "type_name", "==", "'structure'", ":", "response", "=", "{", "}", "elif", "shape", ".", "type_name", "==", "'list'", ":", "response", "=", "[", "]", "elif", "shape", ".", "type_name", "==", "'map'", ":", "response", "=", "{", "}", "return", "response" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/boto3/resources/response.py#L79-L125
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/botocore/credentials.py
python
CredentialResolver.remove
(self, name)
Removes a given ``Credentials`` instance from the chain. :param name: The short name of the credentials instance to remove. :type name: string
Removes a given ``Credentials`` instance from the chain.
[ "Removes", "a", "given", "Credentials", "instance", "from", "the", "chain", "." ]
def remove(self, name): """ Removes a given ``Credentials`` instance from the chain. :param name: The short name of the credentials instance to remove. :type name: string """ available_methods = [p.METHOD for p in self.providers] if name not in available_methods: # It's not present. Fail silently. return offset = available_methods.index(name) self.providers.pop(offset)
[ "def", "remove", "(", "self", ",", "name", ")", ":", "available_methods", "=", "[", "p", ".", "METHOD", "for", "p", "in", "self", ".", "providers", "]", "if", "name", "not", "in", "available_methods", ":", "# It's not present. Fail silently.", "return", "offset", "=", "available_methods", ".", "index", "(", "name", ")", "self", ".", "providers", ".", "pop", "(", "offset", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/credentials.py#L1601-L1614
lawy623/SVS
b7c7ae367c82a4797ff4a896a2ff304f02e7f724
caffe/python/caffe/coord_map.py
python
inverse
(coord_map)
return ax, 1 / a, -b / a
Invert a coord map by de-scaling and un-shifting; this gives the backward mapping for the gradient.
Invert a coord map by de-scaling and un-shifting; this gives the backward mapping for the gradient.
[ "Invert", "a", "coord", "map", "by", "de", "-", "scaling", "and", "un", "-", "shifting", ";", "this", "gives", "the", "backward", "mapping", "for", "the", "gradient", "." ]
def inverse(coord_map): """ Invert a coord map by de-scaling and un-shifting; this gives the backward mapping for the gradient. """ ax, a, b = coord_map return ax, 1 / a, -b / a
[ "def", "inverse", "(", "coord_map", ")", ":", "ax", ",", "a", ",", "b", "=", "coord_map", "return", "ax", ",", "1", "/", "a", ",", "-", "b", "/", "a" ]
https://github.com/lawy623/SVS/blob/b7c7ae367c82a4797ff4a896a2ff304f02e7f724/caffe/python/caffe/coord_map.py#L106-L112
lammps/lammps
b75c3065430a75b1b5543a10e10f46d9b4c91913
tools/i-pi/ipi/utils/inputvalue.py
python
Input.fetch
(self)
Dummy function to retrieve data.
Dummy function to retrieve data.
[ "Dummy", "function", "to", "retrieve", "data", "." ]
def fetch(self): """Dummy function to retrieve data.""" self.check() pass
[ "def", "fetch", "(", "self", ")", ":", "self", ".", "check", "(", ")", "pass" ]
https://github.com/lammps/lammps/blob/b75c3065430a75b1b5543a10e10f46d9b4c91913/tools/i-pi/ipi/utils/inputvalue.py#L215-L219
NERSC/timemory
431912b360ff50d1a160d7826e2eea04fbd1037f
scripts/gprof2dot.py
python
Profile.validate
(self)
Validate the edges.
Validate the edges.
[ "Validate", "the", "edges", "." ]
def validate(self): """Validate the edges.""" for function in compat_itervalues(self.functions): for callee_id in compat_keys(function.calls): assert function.calls[callee_id].callee_id == callee_id if callee_id not in self.functions: sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name)) del function.calls[callee_id]
[ "def", "validate", "(", "self", ")", ":", "for", "function", "in", "compat_itervalues", "(", "self", ".", "functions", ")", ":", "for", "callee_id", "in", "compat_keys", "(", "function", ".", "calls", ")", ":", "assert", "function", ".", "calls", "[", "callee_id", "]", ".", "callee_id", "==", "callee_id", "if", "callee_id", "not", "in", "self", ".", "functions", ":", "sys", ".", "stderr", ".", "write", "(", "'warning: call to undefined function %s from function %s\\n'", "%", "(", "str", "(", "callee_id", ")", ",", "function", ".", "name", ")", ")", "del", "function", ".", "calls", "[", "callee_id", "]" ]
https://github.com/NERSC/timemory/blob/431912b360ff50d1a160d7826e2eea04fbd1037f/scripts/gprof2dot.py#L305-L313
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/_pyio.py
python
IOBase._unsupported
(self, name)
Internal: raise an OSError exception for unsupported operations.
Internal: raise an OSError exception for unsupported operations.
[ "Internal", ":", "raise", "an", "OSError", "exception", "for", "unsupported", "operations", "." ]
def _unsupported(self, name): """Internal: raise an OSError exception for unsupported operations.""" raise UnsupportedOperation("%s.%s() not supported" % (self.__class__.__name__, name))
[ "def", "_unsupported", "(", "self", ",", "name", ")", ":", "raise", "UnsupportedOperation", "(", "\"%s.%s() not supported\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "name", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/_pyio.py#L316-L319
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
lts/deps/v8/third_party/jinja2/sandbox.py
python
SandboxedEnvironment.unsafe_undefined
(self, obj, attribute)
return self.undefined('access to attribute %r of %r ' 'object is unsafe.' % ( attribute, obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError)
Return an undefined object for unsafe attributes.
Return an undefined object for unsafe attributes.
[ "Return", "an", "undefined", "object", "for", "unsafe", "attributes", "." ]
def unsafe_undefined(self, obj, attribute): """Return an undefined object for unsafe attributes.""" return self.undefined('access to attribute %r of %r ' 'object is unsafe.' % ( attribute, obj.__class__.__name__ ), name=attribute, obj=obj, exc=SecurityError)
[ "def", "unsafe_undefined", "(", "self", ",", "obj", ",", "attribute", ")", ":", "return", "self", ".", "undefined", "(", "'access to attribute %r of %r '", "'object is unsafe.'", "%", "(", "attribute", ",", "obj", ".", "__class__", ".", "__name__", ")", ",", "name", "=", "attribute", ",", "obj", "=", "obj", ",", "exc", "=", "SecurityError", ")" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/v8/third_party/jinja2/sandbox.py#L397-L403
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/numbers.py
python
Complex.real
(self)
Retrieve the real component of this number. This should subclass Real.
Retrieve the real component of this number.
[ "Retrieve", "the", "real", "component", "of", "this", "number", "." ]
def real(self): """Retrieve the real component of this number. This should subclass Real. """ raise NotImplementedError
[ "def", "real", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/numbers.py#L55-L60
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/SANS/sans/algorithm_detail/mask_workspace.py
python
mask_with_mask_files
(mask_info, inst_info, workspace)
return workspace
Apply mask files to the workspace Rolling our own MaskDetectors wrapper since masking is broken in a couple of places that affect us here. Calling MaskDetectors(Workspace=ws_name, MaskedWorkspace=mask_ws_name) is not something we can do because the algorithm masks by ws index rather than detector id, and unfortunately for SANS the detector table is not the same for MaskingWorkspaces as it is for the workspaces containing the data to be masked. Basically, we get a mirror image of what we expect. Instead, we have to extract the det IDs and use those via the DetectorList property. :param mask_info: a SANSStateMask object. :param workspace: the workspace to be masked. :return: the masked workspace.
Apply mask files to the workspace
[ "Apply", "mask", "files", "to", "the", "workspace" ]
def mask_with_mask_files(mask_info, inst_info, workspace): """ Apply mask files to the workspace Rolling our own MaskDetectors wrapper since masking is broken in a couple of places that affect us here. Calling MaskDetectors(Workspace=ws_name, MaskedWorkspace=mask_ws_name) is not something we can do because the algorithm masks by ws index rather than detector id, and unfortunately for SANS the detector table is not the same for MaskingWorkspaces as it is for the workspaces containing the data to be masked. Basically, we get a mirror image of what we expect. Instead, we have to extract the det IDs and use those via the DetectorList property. :param mask_info: a SANSStateMask object. :param workspace: the workspace to be masked. :return: the masked workspace. """ mask_files = mask_info.mask_files if mask_files: idf_path = inst_info.idf_path # Mask loader load_name = "LoadMask" load_options = {"Instrument": idf_path, "OutputWorkspace": EMPTY_NAME} load_alg = create_unmanaged_algorithm(load_name, **load_options) mask_alg = create_unmanaged_algorithm("MaskDetectors") file_paths = [find_full_file_path(i) for i in mask_files] # Find full file path returns an empty string, so we need to remake it missing_file_paths = [mask_files[i] for i, path in enumerate(file_paths) if not path] if missing_file_paths: err_str = "The following mask files are missing:" err_str += "\n".join(missing_file_paths) raise FileNotFoundError(err_str) # Masker for mask_file in file_paths: # Get the detector ids which need to be masked load_alg.setProperty("InputFile", mask_file) load_alg.execute() masking_workspace = load_alg.getProperty("OutputWorkspace").value # Could use MaskDetectors directly with masking_workspace but it does not # support MPI. Use a two step approach via a and b instead. # a) Extract detectors to mask from MaskWorkspace det_ids = masking_workspace.getMaskedDetectors() # b) Mask the detector ids on the instrument mask_alg.setProperty("Workspace", workspace) mask_alg.setProperty("DetectorList", det_ids) mask_alg.execute() workspace = mask_alg.getProperty("Workspace").value return workspace
[ "def", "mask_with_mask_files", "(", "mask_info", ",", "inst_info", ",", "workspace", ")", ":", "mask_files", "=", "mask_info", ".", "mask_files", "if", "mask_files", ":", "idf_path", "=", "inst_info", ".", "idf_path", "# Mask loader", "load_name", "=", "\"LoadMask\"", "load_options", "=", "{", "\"Instrument\"", ":", "idf_path", ",", "\"OutputWorkspace\"", ":", "EMPTY_NAME", "}", "load_alg", "=", "create_unmanaged_algorithm", "(", "load_name", ",", "*", "*", "load_options", ")", "mask_alg", "=", "create_unmanaged_algorithm", "(", "\"MaskDetectors\"", ")", "file_paths", "=", "[", "find_full_file_path", "(", "i", ")", "for", "i", "in", "mask_files", "]", "# Find full file path returns an empty string, so we need to remake it", "missing_file_paths", "=", "[", "mask_files", "[", "i", "]", "for", "i", ",", "path", "in", "enumerate", "(", "file_paths", ")", "if", "not", "path", "]", "if", "missing_file_paths", ":", "err_str", "=", "\"The following mask files are missing:\"", "err_str", "+=", "\"\\n\"", ".", "join", "(", "missing_file_paths", ")", "raise", "FileNotFoundError", "(", "err_str", ")", "# Masker", "for", "mask_file", "in", "file_paths", ":", "# Get the detector ids which need to be masked", "load_alg", ".", "setProperty", "(", "\"InputFile\"", ",", "mask_file", ")", "load_alg", ".", "execute", "(", ")", "masking_workspace", "=", "load_alg", ".", "getProperty", "(", "\"OutputWorkspace\"", ")", ".", "value", "# Could use MaskDetectors directly with masking_workspace but it does not", "# support MPI. Use a two step approach via a and b instead.", "# a) Extract detectors to mask from MaskWorkspace", "det_ids", "=", "masking_workspace", ".", "getMaskedDetectors", "(", ")", "# b) Mask the detector ids on the instrument", "mask_alg", ".", "setProperty", "(", "\"Workspace\"", ",", "workspace", ")", "mask_alg", ".", "setProperty", "(", "\"DetectorList\"", ",", "det_ids", ")", "mask_alg", ".", "execute", "(", ")", "workspace", "=", "mask_alg", ".", "getProperty", "(", "\"Workspace\"", ")", ".", "value", "return", "workspace" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/sans/algorithm_detail/mask_workspace.py#L103-L154
apache/trafodion
8455c839ad6b6d7b6e04edda5715053095b78046
core/sqf/src/seatrans/hbase-trx/src/main/python/thrift1/gen-py/hbase/Hbase.py
python
Iface.enableTable
(self, tableName)
Brings a table on-line (enables it) Parameters: - tableName: name of the table
Brings a table on-line (enables it)
[ "Brings", "a", "table", "on", "-", "line", "(", "enables", "it", ")" ]
def enableTable(self, tableName): """ Brings a table on-line (enables it) Parameters: - tableName: name of the table """ pass
[ "def", "enableTable", "(", "self", ",", "tableName", ")", ":", "pass" ]
https://github.com/apache/trafodion/blob/8455c839ad6b6d7b6e04edda5715053095b78046/core/sqf/src/seatrans/hbase-trx/src/main/python/thrift1/gen-py/hbase/Hbase.py#L21-L28
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
demo/snippets/ellipse.py
python
path_ellipse
(cr, x, y, width, height, angle=0)
x - center x y - center y width - width of ellipse (in x direction when angle=0) height - height of ellipse (in y direction when angle=0) angle - angle in radians to rotate, clockwise
x - center x y - center y width - width of ellipse (in x direction when angle=0) height - height of ellipse (in y direction when angle=0) angle - angle in radians to rotate, clockwise
[ "x", "-", "center", "x", "y", "-", "center", "y", "width", "-", "width", "of", "ellipse", "(", "in", "x", "direction", "when", "angle", "=", "0", ")", "height", "-", "height", "of", "ellipse", "(", "in", "y", "direction", "when", "angle", "=", "0", ")", "angle", "-", "angle", "in", "radians", "to", "rotate", "clockwise" ]
def path_ellipse(cr, x, y, width, height, angle=0): """ x - center x y - center y width - width of ellipse (in x direction when angle=0) height - height of ellipse (in y direction when angle=0) angle - angle in radians to rotate, clockwise """ cr.save() cr.translate(x, y) cr.rotate(angle) cr.scale(width / 2.0, height / 2.0) cr.arc(0.0, 0.0, 1.0, 0.0, 2.0 * M_PI) cr.restore()
[ "def", "path_ellipse", "(", "cr", ",", "x", ",", "y", ",", "width", ",", "height", ",", "angle", "=", "0", ")", ":", "cr", ".", "save", "(", ")", "cr", ".", "translate", "(", "x", ",", "y", ")", "cr", ".", "rotate", "(", "angle", ")", "cr", ".", "scale", "(", "width", "/", "2.0", ",", "height", "/", "2.0", ")", "cr", ".", "arc", "(", "0.0", ",", "0.0", ",", "1.0", ",", "0.0", ",", "2.0", "*", "M_PI", ")", "cr", ".", "restore", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/demo/snippets/ellipse.py#L3-L16
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py
python
PageElement.findPreviousSiblings
(self, name=None, attrs={}, text=None, limit=None, **kwargs)
return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs)
Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.
Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.
[ "Returns", "the", "siblings", "of", "this", "Tag", "that", "match", "the", "given", "criteria", "and", "appear", "before", "this", "Tag", "in", "the", "document", "." ]
def findPreviousSiblings(self, name=None, attrs={}, text=None, limit=None, **kwargs): """Returns the siblings of this Tag that match the given criteria and appear before this Tag in the document.""" return self._findAll(name, attrs, text, limit, self.previousSiblingGenerator, **kwargs)
[ "def", "findPreviousSiblings", "(", "self", ",", "name", "=", "None", ",", "attrs", "=", "{", "}", ",", "text", "=", "None", ",", "limit", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_findAll", "(", "name", ",", "attrs", ",", "text", ",", "limit", ",", "self", ".", "previousSiblingGenerator", ",", "*", "*", "kwargs", ")" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py#L297-L302
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/win32com/client/gencache.py
python
GetClassForCLSID
(clsid)
Get a Python class for a CLSID Given a CLSID, return a Python class which wraps the COM object Returns the Python class, or None if no module is available. Params clsid -- A COM CLSID (or string repr of one)
Get a Python class for a CLSID Given a CLSID, return a Python class which wraps the COM object Returns the Python class, or None if no module is available. Params clsid -- A COM CLSID (or string repr of one)
[ "Get", "a", "Python", "class", "for", "a", "CLSID", "Given", "a", "CLSID", "return", "a", "Python", "class", "which", "wraps", "the", "COM", "object", "Returns", "the", "Python", "class", "or", "None", "if", "no", "module", "is", "available", ".", "Params", "clsid", "--", "A", "COM", "CLSID", "(", "or", "string", "repr", "of", "one", ")" ]
def GetClassForCLSID(clsid): """Get a Python class for a CLSID Given a CLSID, return a Python class which wraps the COM object Returns the Python class, or None if no module is available. Params clsid -- A COM CLSID (or string repr of one) """ # first, take a short-cut - we may already have generated support ready-to-roll. clsid = str(clsid) if CLSIDToClass.HasClass(clsid): return CLSIDToClass.GetClass(clsid) mod = GetModuleForCLSID(clsid) if mod is None: return None try: return CLSIDToClass.GetClass(clsid) except KeyError: return None
[ "def", "GetClassForCLSID", "(", "clsid", ")", ":", "# first, take a short-cut - we may already have generated support ready-to-roll.", "clsid", "=", "str", "(", "clsid", ")", "if", "CLSIDToClass", ".", "HasClass", "(", "clsid", ")", ":", "return", "CLSIDToClass", ".", "GetClass", "(", "clsid", ")", "mod", "=", "GetModuleForCLSID", "(", "clsid", ")", "if", "mod", "is", "None", ":", "return", "None", "try", ":", "return", "CLSIDToClass", ".", "GetClass", "(", "clsid", ")", "except", "KeyError", ":", "return", "None" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/win32com/client/gencache.py#L165-L185
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/cvf.py
python
generate
(env)
Add Builders and construction variables for compaq visual fortran to an Environment.
Add Builders and construction variables for compaq visual fortran to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "compaq", "visual", "fortran", "to", "an", "Environment", "." ]
def generate(env): """Add Builders and construction variables for compaq visual fortran to an Environment.""" fortran.generate(env) env['FORTRAN'] = 'f90' env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}' env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}' env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}' env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}' env['OBJSUFFIX'] = '.obj' env['FORTRANMODDIR'] = '${TARGET.dir}' env['FORTRANMODDIRPREFIX'] = '/module:' env['FORTRANMODDIRSUFFIX'] = ''
[ "def", "generate", "(", "env", ")", ":", "fortran", ".", "generate", "(", "env", ")", "env", "[", "'FORTRAN'", "]", "=", "'f90'", "env", "[", "'FORTRANCOM'", "]", "=", "'$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'", "env", "[", "'FORTRANPPCOM'", "]", "=", "'$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'", "env", "[", "'SHFORTRANCOM'", "]", "=", "'$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'", "env", "[", "'SHFORTRANPPCOM'", "]", "=", "'$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'", "env", "[", "'OBJSUFFIX'", "]", "=", "'.obj'", "env", "[", "'FORTRANMODDIR'", "]", "=", "'${TARGET.dir}'", "env", "[", "'FORTRANMODDIRPREFIX'", "]", "=", "'/module:'", "env", "[", "'FORTRANMODDIRSUFFIX'", "]", "=", "''" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/cvf.py#L36-L49
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/python2_version/klampt/math/optimize.py
python
OptimizationProblem.addSymbolicConstraint
(self,func,context,varorder=None,blackbox=False)
adds a constraint from a symbolic Function or Expression (see symbolic module). This will be "smart" in that AND Expressions will be converted to multiple constraints, inequalities will be converted to inequality constraints, and bounds will be converted to bound constraints. All other constraints will be treated as feasibility constraints
adds a constraint from a symbolic Function or Expression (see symbolic module). This will be "smart" in that AND Expressions will be converted to multiple constraints, inequalities will be converted to inequality constraints, and bounds will be converted to bound constraints. All other constraints will be treated as feasibility constraints
[ "adds", "a", "constraint", "from", "a", "symbolic", "Function", "or", "Expression", "(", "see", "symbolic", "module", ")", ".", "This", "will", "be", "smart", "in", "that", "AND", "Expressions", "will", "be", "converted", "to", "multiple", "constraints", "inequalities", "will", "be", "converted", "to", "inequality", "constraints", "and", "bounds", "will", "be", "converted", "to", "bound", "constraints", ".", "All", "other", "constraints", "will", "be", "treated", "as", "feasibility", "constraints" ]
def addSymbolicConstraint(self,func,context,varorder=None,blackbox=False): """adds a constraint from a symbolic Function or Expression (see symbolic module). This will be "smart" in that AND Expressions will be converted to multiple constraints, inequalities will be converted to inequality constraints, and bounds will be converted to bound constraints. All other constraints will be treated as feasibility constraints""" if varorder is None: varorder = context.variables if symbolic.is_op(func,"and"): for a in func.args: self.addSymbolicConstraint(self,a,context,varorder) elif symbolic.is_op(func,"le"): if symbolic.is_var(func.args[0]) and symbolic.is_const(func.args[1]): #x <= c x = symbolic.to_var(func.args[0]) xmax = symbolic.to_const(func.args[1]) indices = context.getFlatVarRanges(varorder) xindex = [i for i,v in enumerate(varorder) if v.name == x.name][0] ai,bi = indices[xindex],indices[xindex+1] n = indices[-1] if self.bounds is None: self.bounds = (np.array([-float('inf')]*n),np.array([float('inf')]*n)) self.bounds[1][ai:bi] = np.minimum(self.bounds[1][ai:bi],xmax) elif symbolic.is_var(func.args[1]) and symbolic.is_const(func.args[0]): #c <= x xmin = symbolic.to_const(func.args[0]) x = symbolic.to_var(func.args[1]) indices = context.getFlatVarRanges(varorder) xindex = [i for i,v in enumerate(varorder) if v.name == x.name][0] ai,bi = indices[xindex],indices[xindex+1] n = indices[-1] if self.bounds is None: self.bounds = (np.array([-float('inf')]*n),np.array([float('inf')]*n)) self.bounds[0][ai:bi] = np.maximum(self.bounds[0][ai:bi],a) else: h = symbolic.simplify(func.args[0]-func.args[1]) if func.args[0].returnType().is_scalar() and func.args[1].returnType().is_scalar(): #need to convert to a vector h = symbolic.flatten(h) hpy,varorder = context.makeFlatFunction(h,varorder) dhpy,varorder = context.makeFlatFunctionDeriv(h,varorder) self.addInequality(hpy,dhpy) elif symbolic.is_op(func,"ge"): c = (func.args[1] <= func.args[0]) self.addSymbolicConstraint(c,context,varorder) elif symbolic.is_op(func,"eq"): g = symbolic.simplify(func.args[0]-func.args[1]) if func.args[0].returnType().is_scalar() and func.args[1].returnType().is_scalar(): #need to convert to a vector g = symbolic.flatten(g) gpy,varorder = context.makeFlatFunction(g,varorder) dgpy,varorder = context.makeFlatFunctionDeriv(g,varorder) self.addEquality(gpy,dgpy) elif symbolic.is_op(func): if func.functionInfo is symbolic_linalg.bound_contains and symbolic.is_const(func.args[0]) and symbolic.is_const(func.args[1]) and symbolic.is_var(func.args[2]): #bound constraint xmin = symbolic.to_const(func.args[0]) xmax = symbolic.to_const(func.args[1]) x = symbolic.to_var(func.args[2]) indices = context.getFlatVarRanges(varorder) xindex = [i for i,v in enumerate(varorder) if v.name == x.name][0] ai,bi = indices[xindex],indices[xindex+1] n = indices[-1] if self.bounds is None: self.bounds = ([-float('inf')]*n,[float('inf')]*n) for i,a,b in zip(range(ai,bi),xmin,xmax): self.bounds[0][i] = max(self.bounds[0][i],a) self.bounds[1][i] = min(self.bounds[1][i],b) else: #it's a generic boolean if not blackbox: print "OptimizationProblem.addSymbolicConstraint(): Warning, turning function",func,"into black box function" fpy,varorder = context.makeFlatFunction(func,varorder) self.addFeasibilityTest(fpy) else: #it's a generic boolean if not blackbox: print "OptimizationProblem.addSymbolicConstraint(): Warning, turning function",func,"into black box function" fpy,varorder = context.makeFlatFunction(func,varorder) self.addFeasibilityTest(fpy)
[ "def", "addSymbolicConstraint", "(", "self", ",", "func", ",", "context", ",", "varorder", "=", "None", ",", "blackbox", "=", "False", ")", ":", "if", "varorder", "is", "None", ":", "varorder", "=", "context", ".", "variables", "if", "symbolic", ".", "is_op", "(", "func", ",", "\"and\"", ")", ":", "for", "a", "in", "func", ".", "args", ":", "self", ".", "addSymbolicConstraint", "(", "self", ",", "a", ",", "context", ",", "varorder", ")", "elif", "symbolic", ".", "is_op", "(", "func", ",", "\"le\"", ")", ":", "if", "symbolic", ".", "is_var", "(", "func", ".", "args", "[", "0", "]", ")", "and", "symbolic", ".", "is_const", "(", "func", ".", "args", "[", "1", "]", ")", ":", "#x <= c", "x", "=", "symbolic", ".", "to_var", "(", "func", ".", "args", "[", "0", "]", ")", "xmax", "=", "symbolic", ".", "to_const", "(", "func", ".", "args", "[", "1", "]", ")", "indices", "=", "context", ".", "getFlatVarRanges", "(", "varorder", ")", "xindex", "=", "[", "i", "for", "i", ",", "v", "in", "enumerate", "(", "varorder", ")", "if", "v", ".", "name", "==", "x", ".", "name", "]", "[", "0", "]", "ai", ",", "bi", "=", "indices", "[", "xindex", "]", ",", "indices", "[", "xindex", "+", "1", "]", "n", "=", "indices", "[", "-", "1", "]", "if", "self", ".", "bounds", "is", "None", ":", "self", ".", "bounds", "=", "(", "np", ".", "array", "(", "[", "-", "float", "(", "'inf'", ")", "]", "*", "n", ")", ",", "np", ".", "array", "(", "[", "float", "(", "'inf'", ")", "]", "*", "n", ")", ")", "self", ".", "bounds", "[", "1", "]", "[", "ai", ":", "bi", "]", "=", "np", ".", "minimum", "(", "self", ".", "bounds", "[", "1", "]", "[", "ai", ":", "bi", "]", ",", "xmax", ")", "elif", "symbolic", ".", "is_var", "(", "func", ".", "args", "[", "1", "]", ")", "and", "symbolic", ".", "is_const", "(", "func", ".", "args", "[", "0", "]", ")", ":", "#c <= x", "xmin", "=", "symbolic", ".", "to_const", "(", "func", ".", "args", "[", "0", "]", ")", "x", "=", "symbolic", ".", "to_var", "(", "func", ".", "args", "[", "1", "]", ")", "indices", "=", "context", ".", "getFlatVarRanges", "(", "varorder", ")", "xindex", "=", "[", "i", "for", "i", ",", "v", "in", "enumerate", "(", "varorder", ")", "if", "v", ".", "name", "==", "x", ".", "name", "]", "[", "0", "]", "ai", ",", "bi", "=", "indices", "[", "xindex", "]", ",", "indices", "[", "xindex", "+", "1", "]", "n", "=", "indices", "[", "-", "1", "]", "if", "self", ".", "bounds", "is", "None", ":", "self", ".", "bounds", "=", "(", "np", ".", "array", "(", "[", "-", "float", "(", "'inf'", ")", "]", "*", "n", ")", ",", "np", ".", "array", "(", "[", "float", "(", "'inf'", ")", "]", "*", "n", ")", ")", "self", ".", "bounds", "[", "0", "]", "[", "ai", ":", "bi", "]", "=", "np", ".", "maximum", "(", "self", ".", "bounds", "[", "0", "]", "[", "ai", ":", "bi", "]", ",", "a", ")", "else", ":", "h", "=", "symbolic", ".", "simplify", "(", "func", ".", "args", "[", "0", "]", "-", "func", ".", "args", "[", "1", "]", ")", "if", "func", ".", "args", "[", "0", "]", ".", "returnType", "(", ")", ".", "is_scalar", "(", ")", "and", "func", ".", "args", "[", "1", "]", ".", "returnType", "(", ")", ".", "is_scalar", "(", ")", ":", "#need to convert to a vector", "h", "=", "symbolic", ".", "flatten", "(", "h", ")", "hpy", ",", "varorder", "=", "context", ".", "makeFlatFunction", "(", "h", ",", "varorder", ")", "dhpy", ",", "varorder", "=", "context", ".", "makeFlatFunctionDeriv", "(", "h", ",", "varorder", ")", "self", ".", "addInequality", "(", "hpy", ",", "dhpy", ")", "elif", "symbolic", ".", "is_op", "(", "func", ",", "\"ge\"", ")", ":", "c", "=", "(", "func", ".", "args", "[", "1", "]", "<=", "func", ".", "args", "[", "0", "]", ")", "self", ".", "addSymbolicConstraint", "(", "c", ",", "context", ",", "varorder", ")", "elif", "symbolic", ".", "is_op", "(", "func", ",", "\"eq\"", ")", ":", "g", "=", "symbolic", ".", "simplify", "(", "func", ".", "args", "[", "0", "]", "-", "func", ".", "args", "[", "1", "]", ")", "if", "func", ".", "args", "[", "0", "]", ".", "returnType", "(", ")", ".", "is_scalar", "(", ")", "and", "func", ".", "args", "[", "1", "]", ".", "returnType", "(", ")", ".", "is_scalar", "(", ")", ":", "#need to convert to a vector", "g", "=", "symbolic", ".", "flatten", "(", "g", ")", "gpy", ",", "varorder", "=", "context", ".", "makeFlatFunction", "(", "g", ",", "varorder", ")", "dgpy", ",", "varorder", "=", "context", ".", "makeFlatFunctionDeriv", "(", "g", ",", "varorder", ")", "self", ".", "addEquality", "(", "gpy", ",", "dgpy", ")", "elif", "symbolic", ".", "is_op", "(", "func", ")", ":", "if", "func", ".", "functionInfo", "is", "symbolic_linalg", ".", "bound_contains", "and", "symbolic", ".", "is_const", "(", "func", ".", "args", "[", "0", "]", ")", "and", "symbolic", ".", "is_const", "(", "func", ".", "args", "[", "1", "]", ")", "and", "symbolic", ".", "is_var", "(", "func", ".", "args", "[", "2", "]", ")", ":", "#bound constraint", "xmin", "=", "symbolic", ".", "to_const", "(", "func", ".", "args", "[", "0", "]", ")", "xmax", "=", "symbolic", ".", "to_const", "(", "func", ".", "args", "[", "1", "]", ")", "x", "=", "symbolic", ".", "to_var", "(", "func", ".", "args", "[", "2", "]", ")", "indices", "=", "context", ".", "getFlatVarRanges", "(", "varorder", ")", "xindex", "=", "[", "i", "for", "i", ",", "v", "in", "enumerate", "(", "varorder", ")", "if", "v", ".", "name", "==", "x", ".", "name", "]", "[", "0", "]", "ai", ",", "bi", "=", "indices", "[", "xindex", "]", ",", "indices", "[", "xindex", "+", "1", "]", "n", "=", "indices", "[", "-", "1", "]", "if", "self", ".", "bounds", "is", "None", ":", "self", ".", "bounds", "=", "(", "[", "-", "float", "(", "'inf'", ")", "]", "*", "n", ",", "[", "float", "(", "'inf'", ")", "]", "*", "n", ")", "for", "i", ",", "a", ",", "b", "in", "zip", "(", "range", "(", "ai", ",", "bi", ")", ",", "xmin", ",", "xmax", ")", ":", "self", ".", "bounds", "[", "0", "]", "[", "i", "]", "=", "max", "(", "self", ".", "bounds", "[", "0", "]", "[", "i", "]", ",", "a", ")", "self", ".", "bounds", "[", "1", "]", "[", "i", "]", "=", "min", "(", "self", ".", "bounds", "[", "1", "]", "[", "i", "]", ",", "b", ")", "else", ":", "#it's a generic boolean", "if", "not", "blackbox", ":", "print", "\"OptimizationProblem.addSymbolicConstraint(): Warning, turning function\"", ",", "func", ",", "\"into black box function\"", "fpy", ",", "varorder", "=", "context", ".", "makeFlatFunction", "(", "func", ",", "varorder", ")", "self", ".", "addFeasibilityTest", "(", "fpy", ")", "else", ":", "#it's a generic boolean", "if", "not", "blackbox", ":", "print", "\"OptimizationProblem.addSymbolicConstraint(): Warning, turning function\"", ",", "func", ",", "\"into black box function\"", "fpy", ",", "varorder", "=", "context", ".", "makeFlatFunction", "(", "func", ",", "varorder", ")", "self", ".", "addFeasibilityTest", "(", "fpy", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/math/optimize.py#L82-L160
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/packager.py
python
unpack_binaries_into
(build_os, arch, spec, where)
Unpack the tarfile for (build_os, arch, spec) into directory where.
Unpack the tarfile for (build_os, arch, spec) into directory where.
[ "Unpack", "the", "tarfile", "for", "(", "build_os", "arch", "spec", ")", "into", "directory", "where", "." ]
def unpack_binaries_into(build_os, arch, spec, where): """Unpack the tarfile for (build_os, arch, spec) into directory where.""" rootdir = os.getcwd() ensure_dir(where) # Note: POSIX tar doesn't require support for gtar's "-C" option, # and Python's tarfile module prior to Python 2.7 doesn't have the # features to make this detail easy. So we'll just do the dumb # thing and chdir into where and run tar there. os.chdir(where) try: sysassert(["tar", "xvzf", rootdir + "/" + tarfile(build_os, arch, spec)]) release_dir = glob('mongodb-linux-*')[0] for releasefile in "bin", "LICENSE-Community.txt", "README", "THIRD-PARTY-NOTICES", "MPL-2": print("moving file: %s/%s" % (release_dir, releasefile)) os.rename("%s/%s" % (release_dir, releasefile), releasefile) os.rmdir(release_dir) except Exception: exc = sys.exc_info()[1] os.chdir(rootdir) raise exc os.chdir(rootdir)
[ "def", "unpack_binaries_into", "(", "build_os", ",", "arch", ",", "spec", ",", "where", ")", ":", "rootdir", "=", "os", ".", "getcwd", "(", ")", "ensure_dir", "(", "where", ")", "# Note: POSIX tar doesn't require support for gtar's \"-C\" option,", "# and Python's tarfile module prior to Python 2.7 doesn't have the", "# features to make this detail easy. So we'll just do the dumb", "# thing and chdir into where and run tar there.", "os", ".", "chdir", "(", "where", ")", "try", ":", "sysassert", "(", "[", "\"tar\"", ",", "\"xvzf\"", ",", "rootdir", "+", "\"/\"", "+", "tarfile", "(", "build_os", ",", "arch", ",", "spec", ")", "]", ")", "release_dir", "=", "glob", "(", "'mongodb-linux-*'", ")", "[", "0", "]", "for", "releasefile", "in", "\"bin\"", ",", "\"LICENSE-Community.txt\"", ",", "\"README\"", ",", "\"THIRD-PARTY-NOTICES\"", ",", "\"MPL-2\"", ":", "print", "(", "\"moving file: %s/%s\"", "%", "(", "release_dir", ",", "releasefile", ")", ")", "os", ".", "rename", "(", "\"%s/%s\"", "%", "(", "release_dir", ",", "releasefile", ")", ",", "releasefile", ")", "os", ".", "rmdir", "(", "release_dir", ")", "except", "Exception", ":", "exc", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "os", ".", "chdir", "(", "rootdir", ")", "raise", "exc", "os", ".", "chdir", "(", "rootdir", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/packager.py#L492-L512
openmm/openmm
cb293447c4fc8b03976dfe11399f107bab70f3d9
wrappers/python/openmm/app/internal/charmm/topologyobjects.py
python
Residue.__contains__
(self, thing)
return thing in self.atoms
True if an atom is present in this residue
True if an atom is present in this residue
[ "True", "if", "an", "atom", "is", "present", "in", "this", "residue" ]
def __contains__(self, thing): """ True if an atom is present in this residue """ return thing in self.atoms
[ "def", "__contains__", "(", "self", ",", "thing", ")", ":", "return", "thing", "in", "self", ".", "atoms" ]
https://github.com/openmm/openmm/blob/cb293447c4fc8b03976dfe11399f107bab70f3d9/wrappers/python/openmm/app/internal/charmm/topologyobjects.py#L457-L459
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/boto/boto/sns/connection.py
python
SNSConnection.subscribe
(self, topic, protocol, endpoint)
return self._make_request('Subscribe', params)
Subscribe to a Topic. :type topic: string :param topic: The ARN of the new topic. :type protocol: string :param protocol: The protocol used to communicate with the subscriber. Current choices are: email|email-json|http|https|sqs|sms|application :type endpoint: string :param endpoint: The location of the endpoint for the subscriber. * For email, this would be a valid email address * For email-json, this would be a valid email address * For http, this would be a URL beginning with http * For https, this would be a URL beginning with https * For sqs, this would be the ARN of an SQS Queue * For sms, this would be a phone number of an SMS-enabled device * For application, the endpoint is the EndpointArn of a mobile app and device.
Subscribe to a Topic.
[ "Subscribe", "to", "a", "Topic", "." ]
def subscribe(self, topic, protocol, endpoint): """ Subscribe to a Topic. :type topic: string :param topic: The ARN of the new topic. :type protocol: string :param protocol: The protocol used to communicate with the subscriber. Current choices are: email|email-json|http|https|sqs|sms|application :type endpoint: string :param endpoint: The location of the endpoint for the subscriber. * For email, this would be a valid email address * For email-json, this would be a valid email address * For http, this would be a URL beginning with http * For https, this would be a URL beginning with https * For sqs, this would be the ARN of an SQS Queue * For sms, this would be a phone number of an SMS-enabled device * For application, the endpoint is the EndpointArn of a mobile app and device. """ params = {'TopicArn': topic, 'Protocol': protocol, 'Endpoint': endpoint} return self._make_request('Subscribe', params)
[ "def", "subscribe", "(", "self", ",", "topic", ",", "protocol", ",", "endpoint", ")", ":", "params", "=", "{", "'TopicArn'", ":", "topic", ",", "'Protocol'", ":", "protocol", ",", "'Endpoint'", ":", "endpoint", "}", "return", "self", ".", "_make_request", "(", "'Subscribe'", ",", "params", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/sns/connection.py#L292-L320
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/contrib/layers/python/layers/utils.py
python
collect_named_outputs
(collections, name, outputs)
return outputs
Add `Tensor` outputs tagged with name to collections. It is useful to collect end-points or tags for summaries. Example of usage: logits = collect_named_outputs('end_points', 'inception_v3/logits', logits) assert logits.tag == 'inception_v3/logits' Args: collections: A collection or list of collections. If None skip collection. name: String, name to represent the outputs, ex. 'inception_v3/conv1' outputs: Tensor, an output tensor to collect Returns: The outputs Tensor to allow inline call.
Add `Tensor` outputs tagged with name to collections.
[ "Add", "Tensor", "outputs", "tagged", "with", "name", "to", "collections", "." ]
def collect_named_outputs(collections, name, outputs): """Add `Tensor` outputs tagged with name to collections. It is useful to collect end-points or tags for summaries. Example of usage: logits = collect_named_outputs('end_points', 'inception_v3/logits', logits) assert logits.tag == 'inception_v3/logits' Args: collections: A collection or list of collections. If None skip collection. name: String, name to represent the outputs, ex. 'inception_v3/conv1' outputs: Tensor, an output tensor to collect Returns: The outputs Tensor to allow inline call. """ # Remove ending '/' if present. if name[-1] == '/': name = name[:-1] if collections: ops.add_to_collections(collections, NamedOutputs(name, outputs)) return outputs
[ "def", "collect_named_outputs", "(", "collections", ",", "name", ",", "outputs", ")", ":", "# Remove ending '/' if present.", "if", "name", "[", "-", "1", "]", "==", "'/'", ":", "name", "=", "name", "[", ":", "-", "1", "]", "if", "collections", ":", "ops", ".", "add_to_collections", "(", "collections", ",", "NamedOutputs", "(", "name", ",", "outputs", ")", ")", "return", "outputs" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/layers/python/layers/utils.py#L40-L61
geemaple/leetcode
68bc5032e1ee52c22ef2f2e608053484c487af54
leetcode/203.remove-linked-list-elements.py
python
Solution.removeElements
(self, head, val)
return new_head.next
:type head: ListNode :type val: int :rtype: ListNode
:type head: ListNode :type val: int :rtype: ListNode
[ ":", "type", "head", ":", "ListNode", ":", "type", "val", ":", "int", ":", "rtype", ":", "ListNode" ]
def removeElements(self, head, val): """ :type head: ListNode :type val: int :rtype: ListNode """ new_head = ListNode(0) new_head.next = head cur = new_head while(cur.next is not None): node = cur.next if node.val != val: cur = cur.next continue cur.next = node.next del node return new_head.next
[ "def", "removeElements", "(", "self", ",", "head", ",", "val", ")", ":", "new_head", "=", "ListNode", "(", "0", ")", "new_head", ".", "next", "=", "head", "cur", "=", "new_head", "while", "(", "cur", ".", "next", "is", "not", "None", ")", ":", "node", "=", "cur", ".", "next", "if", "node", ".", "val", "!=", "val", ":", "cur", "=", "cur", ".", "next", "continue", "cur", ".", "next", "=", "node", ".", "next", "del", "node", "return", "new_head", ".", "next" ]
https://github.com/geemaple/leetcode/blob/68bc5032e1ee52c22ef2f2e608053484c487af54/leetcode/203.remove-linked-list-elements.py#L8-L27
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/numpy/files/numpy/lib/function_base.py
python
bartlett
(M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy import clip, log10, array, bartlett, linspace >>> from numpy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = abs(fftshift(A)) >>> freq = linspace(-0.5,0.5,len(A)) >>> response = 20*log10(mag) >>> response = clip(response,-100,100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show()
Return the Bartlett window.
[ "Return", "the", "Bartlett", "window", "." ]
def bartlett(M): """ Return the Bartlett window. The Bartlett window is very similar to a triangular window, except that the end points are at zero. It is often used in signal processing for tapering a signal, without generating too much ripple in the frequency domain. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. Returns ------- out : array The triangular window, normalized to one (the value one appears only if the number of samples is odd), with the first and last samples equal to zero. See Also -------- blackman, hamming, hanning, kaiser Notes ----- The Bartlett window is defined as .. math:: w(n) = \\frac{2}{M-1} \\left( \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| \\right) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. Note that convolution with this window produces linear interpolation. It is also known as an apodization (which means"removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. The fourier transform of the Bartlett is the product of two sinc functions. Note the excellent discussion in Kanasewich. References ---------- .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", Biometrika 37, 1-16, 1950. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal Processing", Prentice-Hall, 1999, pp. 468-471. .. [4] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 429. Examples -------- >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) Plot the window and its frequency response (requires SciPy and matplotlib): >>> from numpy import clip, log10, array, bartlett, linspace >>> from numpy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = bartlett(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Amplitude") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Sample") <matplotlib.text.Text object at 0x...> >>> plt.show() >>> plt.figure() <matplotlib.figure.Figure object at 0x...> >>> A = fft(window, 2048) / 25.5 >>> mag = abs(fftshift(A)) >>> freq = linspace(-0.5,0.5,len(A)) >>> response = 20*log10(mag) >>> response = clip(response,-100,100) >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") <matplotlib.text.Text object at 0x...> >>> plt.ylabel("Magnitude [dB]") <matplotlib.text.Text object at 0x...> >>> plt.xlabel("Normalized frequency [cycles per sample]") <matplotlib.text.Text object at 0x...> >>> plt.axis('tight') (-0.5, 0.5, -100.0, ...) >>> plt.show() """ if M < 1: return array([]) if M == 1: return ones(1, float) n = arange(0,M) return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
[ "def", "bartlett", "(", "M", ")", ":", "if", "M", "<", "1", ":", "return", "array", "(", "[", "]", ")", "if", "M", "==", "1", ":", "return", "ones", "(", "1", ",", "float", ")", "n", "=", "arange", "(", "0", ",", "M", ")", "return", "where", "(", "less_equal", "(", "n", ",", "(", "M", "-", "1", ")", "/", "2.0", ")", ",", "2.0", "*", "n", "/", "(", "M", "-", "1", ")", ",", "2.0", "-", "2.0", "*", "n", "/", "(", "M", "-", "1", ")", ")" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/lib/function_base.py#L2166-L2273
floooh/oryol
eb08cffe1b1cb6b05ed14ec692bca9372cef064e
fips-files/generators/util/png.py
python
Reader.asDirect
(self)
return x,y,pixels,meta
Returns the image data as a direct representation of an ``x * y * planes`` array. This method is intended to remove the need for callers to deal with palettes and transparency themselves. Images with a palette (colour type 3) are converted to RGB or RGBA; images with transparency (a ``tRNS`` chunk) are converted to LA or RGBA as appropriate. When returned in this format the pixel values represent the colour value directly without needing to refer to palettes or transparency information. Like the :meth:`read` method this method returns a 4-tuple: (*width*, *height*, *pixels*, *meta*) This method normally returns pixel values with the bit depth they have in the source image, but when the source PNG has an ``sBIT`` chunk it is inspected and can reduce the bit depth of the result pixels; pixel values will be reduced according to the bit depth specified in the ``sBIT`` chunk (PNG nerds should note a single result bit depth is used for all channels; the maximum of the ones specified in the ``sBIT`` chunk. An RGB565 image will be rescaled to 6-bit RGB666). The *meta* dictionary that is returned reflects the `direct` format and not the original source image. For example, an RGB source image with a ``tRNS`` chunk to represent a transparent colour, will have ``planes=3`` and ``alpha=False`` for the source image, but the *meta* dictionary returned by this method will have ``planes=4`` and ``alpha=True`` because an alpha channel is synthesized and added. *pixels* is the pixel data in boxed row flat pixel format (just like the :meth:`read` method). All the other aspects of the image data are not changed.
Returns the image data as a direct representation of an ``x * y * planes`` array. This method is intended to remove the need for callers to deal with palettes and transparency themselves. Images with a palette (colour type 3) are converted to RGB or RGBA; images with transparency (a ``tRNS`` chunk) are converted to LA or RGBA as appropriate. When returned in this format the pixel values represent the colour value directly without needing to refer to palettes or transparency information.
[ "Returns", "the", "image", "data", "as", "a", "direct", "representation", "of", "an", "x", "*", "y", "*", "planes", "array", ".", "This", "method", "is", "intended", "to", "remove", "the", "need", "for", "callers", "to", "deal", "with", "palettes", "and", "transparency", "themselves", ".", "Images", "with", "a", "palette", "(", "colour", "type", "3", ")", "are", "converted", "to", "RGB", "or", "RGBA", ";", "images", "with", "transparency", "(", "a", "tRNS", "chunk", ")", "are", "converted", "to", "LA", "or", "RGBA", "as", "appropriate", ".", "When", "returned", "in", "this", "format", "the", "pixel", "values", "represent", "the", "colour", "value", "directly", "without", "needing", "to", "refer", "to", "palettes", "or", "transparency", "information", "." ]
def asDirect(self): """Returns the image data as a direct representation of an ``x * y * planes`` array. This method is intended to remove the need for callers to deal with palettes and transparency themselves. Images with a palette (colour type 3) are converted to RGB or RGBA; images with transparency (a ``tRNS`` chunk) are converted to LA or RGBA as appropriate. When returned in this format the pixel values represent the colour value directly without needing to refer to palettes or transparency information. Like the :meth:`read` method this method returns a 4-tuple: (*width*, *height*, *pixels*, *meta*) This method normally returns pixel values with the bit depth they have in the source image, but when the source PNG has an ``sBIT`` chunk it is inspected and can reduce the bit depth of the result pixels; pixel values will be reduced according to the bit depth specified in the ``sBIT`` chunk (PNG nerds should note a single result bit depth is used for all channels; the maximum of the ones specified in the ``sBIT`` chunk. An RGB565 image will be rescaled to 6-bit RGB666). The *meta* dictionary that is returned reflects the `direct` format and not the original source image. For example, an RGB source image with a ``tRNS`` chunk to represent a transparent colour, will have ``planes=3`` and ``alpha=False`` for the source image, but the *meta* dictionary returned by this method will have ``planes=4`` and ``alpha=True`` because an alpha channel is synthesized and added. *pixels* is the pixel data in boxed row flat pixel format (just like the :meth:`read` method). All the other aspects of the image data are not changed. """ self.preamble() # Simple case, no conversion necessary. if not self.colormap and not self.trns and not self.sbit: return self.read() x,y,pixels,meta = self.read() if self.colormap: meta['colormap'] = False meta['alpha'] = bool(self.trns) meta['bitdepth'] = 8 meta['planes'] = 3 + bool(self.trns) plte = self.palette() def iterpal(pixels): for row in pixels: row = map(plte.__getitem__, row) yield array('B', itertools.chain(*row)) pixels = iterpal(pixels) elif self.trns: # It would be nice if there was some reasonable way of doing # this without generating a whole load of intermediate tuples. # But tuples does seem like the easiest way, with no other way # clearly much simpler or much faster. (Actually, the L to LA # conversion could perhaps go faster (all those 1-tuples!), but # I still wonder whether the code proliferation is worth it) it = self.transparent maxval = 2**meta['bitdepth']-1 planes = meta['planes'] meta['alpha'] = True meta['planes'] += 1 typecode = 'BH'[meta['bitdepth']>8] def itertrns(pixels): for row in pixels: # For each row we group it into pixels, then form a # characterisation vector that says whether each pixel # is opaque or not. Then we convert True/False to # 0/maxval (by multiplication), and add it as the extra # channel. row = group(row, planes) opa = map(it.__ne__, row) opa = map(maxval.__mul__, opa) opa = zip(opa) # convert to 1-tuples yield array(typecode, itertools.chain(*map(operator.add, row, opa))) pixels = itertrns(pixels) targetbitdepth = None if self.sbit: sbit = struct.unpack('%dB' % len(self.sbit), self.sbit) targetbitdepth = max(sbit) if targetbitdepth > meta['bitdepth']: raise Error('sBIT chunk %r exceeds bitdepth %d' % (sbit,self.bitdepth)) if min(sbit) <= 0: raise Error('sBIT chunk %r has a 0-entry' % sbit) if targetbitdepth == meta['bitdepth']: targetbitdepth = None if targetbitdepth: shift = meta['bitdepth'] - targetbitdepth meta['bitdepth'] = targetbitdepth def itershift(pixels): for row in pixels: yield map(shift.__rrshift__, row) pixels = itershift(pixels) return x,y,pixels,meta
[ "def", "asDirect", "(", "self", ")", ":", "self", ".", "preamble", "(", ")", "# Simple case, no conversion necessary.", "if", "not", "self", ".", "colormap", "and", "not", "self", ".", "trns", "and", "not", "self", ".", "sbit", ":", "return", "self", ".", "read", "(", ")", "x", ",", "y", ",", "pixels", ",", "meta", "=", "self", ".", "read", "(", ")", "if", "self", ".", "colormap", ":", "meta", "[", "'colormap'", "]", "=", "False", "meta", "[", "'alpha'", "]", "=", "bool", "(", "self", ".", "trns", ")", "meta", "[", "'bitdepth'", "]", "=", "8", "meta", "[", "'planes'", "]", "=", "3", "+", "bool", "(", "self", ".", "trns", ")", "plte", "=", "self", ".", "palette", "(", ")", "def", "iterpal", "(", "pixels", ")", ":", "for", "row", "in", "pixels", ":", "row", "=", "map", "(", "plte", ".", "__getitem__", ",", "row", ")", "yield", "array", "(", "'B'", ",", "itertools", ".", "chain", "(", "*", "row", ")", ")", "pixels", "=", "iterpal", "(", "pixels", ")", "elif", "self", ".", "trns", ":", "# It would be nice if there was some reasonable way of doing", "# this without generating a whole load of intermediate tuples.", "# But tuples does seem like the easiest way, with no other way", "# clearly much simpler or much faster. (Actually, the L to LA", "# conversion could perhaps go faster (all those 1-tuples!), but", "# I still wonder whether the code proliferation is worth it)", "it", "=", "self", ".", "transparent", "maxval", "=", "2", "**", "meta", "[", "'bitdepth'", "]", "-", "1", "planes", "=", "meta", "[", "'planes'", "]", "meta", "[", "'alpha'", "]", "=", "True", "meta", "[", "'planes'", "]", "+=", "1", "typecode", "=", "'BH'", "[", "meta", "[", "'bitdepth'", "]", ">", "8", "]", "def", "itertrns", "(", "pixels", ")", ":", "for", "row", "in", "pixels", ":", "# For each row we group it into pixels, then form a", "# characterisation vector that says whether each pixel", "# is opaque or not. Then we convert True/False to", "# 0/maxval (by multiplication), and add it as the extra", "# channel.", "row", "=", "group", "(", "row", ",", "planes", ")", "opa", "=", "map", "(", "it", ".", "__ne__", ",", "row", ")", "opa", "=", "map", "(", "maxval", ".", "__mul__", ",", "opa", ")", "opa", "=", "zip", "(", "opa", ")", "# convert to 1-tuples", "yield", "array", "(", "typecode", ",", "itertools", ".", "chain", "(", "*", "map", "(", "operator", ".", "add", ",", "row", ",", "opa", ")", ")", ")", "pixels", "=", "itertrns", "(", "pixels", ")", "targetbitdepth", "=", "None", "if", "self", ".", "sbit", ":", "sbit", "=", "struct", ".", "unpack", "(", "'%dB'", "%", "len", "(", "self", ".", "sbit", ")", ",", "self", ".", "sbit", ")", "targetbitdepth", "=", "max", "(", "sbit", ")", "if", "targetbitdepth", ">", "meta", "[", "'bitdepth'", "]", ":", "raise", "Error", "(", "'sBIT chunk %r exceeds bitdepth %d'", "%", "(", "sbit", ",", "self", ".", "bitdepth", ")", ")", "if", "min", "(", "sbit", ")", "<=", "0", ":", "raise", "Error", "(", "'sBIT chunk %r has a 0-entry'", "%", "sbit", ")", "if", "targetbitdepth", "==", "meta", "[", "'bitdepth'", "]", ":", "targetbitdepth", "=", "None", "if", "targetbitdepth", ":", "shift", "=", "meta", "[", "'bitdepth'", "]", "-", "targetbitdepth", "meta", "[", "'bitdepth'", "]", "=", "targetbitdepth", "def", "itershift", "(", "pixels", ")", ":", "for", "row", "in", "pixels", ":", "yield", "map", "(", "shift", ".", "__rrshift__", ",", "row", ")", "pixels", "=", "itershift", "(", "pixels", ")", "return", "x", ",", "y", ",", "pixels", ",", "meta" ]
https://github.com/floooh/oryol/blob/eb08cffe1b1cb6b05ed14ec692bca9372cef064e/fips-files/generators/util/png.py#L1958-L2060
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
python
TarInfo.create_pax_header
(self, info, encoding)
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information.
Return the object as a ustar header block. If it cannot be
[ "Return", "the", "object", "as", "a", "ustar", "header", "block", ".", "If", "it", "cannot", "be" ]
def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
[ "def", "create_pax_header", "(", "self", ",", "info", ",", "encoding", ")", ":", "info", "[", "\"magic\"", "]", "=", "POSIX_MAGIC", "pax_headers", "=", "self", ".", "pax_headers", ".", "copy", "(", ")", "# Test string fields for values that exceed the field length or cannot", "# be represented in ASCII encoding.", "for", "name", ",", "hname", ",", "length", "in", "(", "(", "\"name\"", ",", "\"path\"", ",", "LENGTH_NAME", ")", ",", "(", "\"linkname\"", ",", "\"linkpath\"", ",", "LENGTH_LINK", ")", ",", "(", "\"uname\"", ",", "\"uname\"", ",", "32", ")", ",", "(", "\"gname\"", ",", "\"gname\"", ",", "32", ")", ")", ":", "if", "hname", "in", "pax_headers", ":", "# The pax header has priority.", "continue", "# Try to encode the string as ASCII.", "try", ":", "info", "[", "name", "]", ".", "encode", "(", "\"ascii\"", ",", "\"strict\"", ")", "except", "UnicodeEncodeError", ":", "pax_headers", "[", "hname", "]", "=", "info", "[", "name", "]", "continue", "if", "len", "(", "info", "[", "name", "]", ")", ">", "length", ":", "pax_headers", "[", "hname", "]", "=", "info", "[", "name", "]", "# Test number fields for values that exceed the field limit or values", "# that like to be stored as float.", "for", "name", ",", "digits", "in", "(", "(", "\"uid\"", ",", "8", ")", ",", "(", "\"gid\"", ",", "8", ")", ",", "(", "\"size\"", ",", "12", ")", ",", "(", "\"mtime\"", ",", "12", ")", ")", ":", "if", "name", "in", "pax_headers", ":", "# The pax header has priority. Avoid overflow.", "info", "[", "name", "]", "=", "0", "continue", "val", "=", "info", "[", "name", "]", "if", "not", "0", "<=", "val", "<", "8", "**", "(", "digits", "-", "1", ")", "or", "isinstance", "(", "val", ",", "float", ")", ":", "pax_headers", "[", "name", "]", "=", "str", "(", "val", ")", "info", "[", "name", "]", "=", "0", "# Create a pax extended header if necessary.", "if", "pax_headers", ":", "buf", "=", "self", ".", "_create_pax_generic_header", "(", "pax_headers", ",", "XHDTYPE", ",", "encoding", ")", "else", ":", "buf", "=", "b\"\"", "return", "buf", "+", "self", ".", "_create_header", "(", "info", ",", "USTAR_FORMAT", ",", "\"ascii\"", ",", "\"replace\"", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L2085-L2179
Slicer/SlicerGitSVNArchive
65e92bb16c2b32ea47a1a66bee71f238891ee1ca
Base/Python/slicer/release/midasdata.py
python
itemExists
(folderID, itemName, token, communicator)
return False
Check if an item exists in a folder based on its name folderID -- ID of the folder. itemName -- Name of the item to check for. token -- Authentication token. communicator -- Midas session communicator. Returns a boolean indicating if the item exists or not.
Check if an item exists in a folder based on its name folderID -- ID of the folder. itemName -- Name of the item to check for. token -- Authentication token. communicator -- Midas session communicator. Returns a boolean indicating if the item exists or not.
[ "Check", "if", "an", "item", "exists", "in", "a", "folder", "based", "on", "its", "name", "folderID", "--", "ID", "of", "the", "folder", ".", "itemName", "--", "Name", "of", "the", "item", "to", "check", "for", ".", "token", "--", "Authentication", "token", ".", "communicator", "--", "Midas", "session", "communicator", ".", "Returns", "a", "boolean", "indicating", "if", "the", "item", "exists", "or", "not", "." ]
def itemExists(folderID, itemName, token, communicator): """Check if an item exists in a folder based on its name folderID -- ID of the folder. itemName -- Name of the item to check for. token -- Authentication token. communicator -- Midas session communicator. Returns a boolean indicating if the item exists or not.""" folderChildren = communicator.folder_children(token, folderID) folder_children_items = folderChildren["items"] for item in folder_children_items: if item["name"] == itemName: return True return False
[ "def", "itemExists", "(", "folderID", ",", "itemName", ",", "token", ",", "communicator", ")", ":", "folderChildren", "=", "communicator", ".", "folder_children", "(", "token", ",", "folderID", ")", "folder_children_items", "=", "folderChildren", "[", "\"items\"", "]", "for", "item", "in", "folder_children_items", ":", "if", "item", "[", "\"name\"", "]", "==", "itemName", ":", "return", "True", "return", "False" ]
https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Base/Python/slicer/release/midasdata.py#L48-L60
Caffe-MPI/Caffe-MPI.github.io
df5992af571a2a19981b69635115c393f18d1c76
python/caffe/io.py
python
datum_to_array
(datum)
Converts a datum to an array. Note that the label is not returned, as one can easily get it by calling datum.label.
Converts a datum to an array. Note that the label is not returned, as one can easily get it by calling datum.label.
[ "Converts", "a", "datum", "to", "an", "array", ".", "Note", "that", "the", "label", "is", "not", "returned", "as", "one", "can", "easily", "get", "it", "by", "calling", "datum", ".", "label", "." ]
def datum_to_array(datum): """Converts a datum to an array. Note that the label is not returned, as one can easily get it by calling datum.label. """ if len(datum.data): return np.fromstring(datum.data, dtype=np.uint8).reshape( datum.channels, datum.height, datum.width) else: return np.array(datum.float_data).astype(float).reshape( datum.channels, datum.height, datum.width)
[ "def", "datum_to_array", "(", "datum", ")", ":", "if", "len", "(", "datum", ".", "data", ")", ":", "return", "np", ".", "fromstring", "(", "datum", ".", "data", ",", "dtype", "=", "np", ".", "uint8", ")", ".", "reshape", "(", "datum", ".", "channels", ",", "datum", ".", "height", ",", "datum", ".", "width", ")", "else", ":", "return", "np", ".", "array", "(", "datum", ".", "float_data", ")", ".", "astype", "(", "float", ")", ".", "reshape", "(", "datum", ".", "channels", ",", "datum", ".", "height", ",", "datum", ".", "width", ")" ]
https://github.com/Caffe-MPI/Caffe-MPI.github.io/blob/df5992af571a2a19981b69635115c393f18d1c76/python/caffe/io.py#L84-L93
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
addon-sdk/source/python-lib/simplejson/encoder.py
python
floatstr
(o, allow_nan=True)
return text
Check for specials. Note that this type of test is processor- and/or platform-specific, so do tests which don't depend on the internals.
Check for specials. Note that this type of test is processor- and/or platform-specific, so do tests which don't depend on the internals.
[ "Check", "for", "specials", ".", "Note", "that", "this", "type", "of", "test", "is", "processor", "-", "and", "/", "or", "platform", "-", "specific", "so", "do", "tests", "which", "don", "t", "depend", "on", "the", "internals", "." ]
def floatstr(o, allow_nan=True): """ Check for specials. Note that this type of test is processor- and/or platform-specific, so do tests which don't depend on the internals. """ if o != o: text = 'NaN' elif o == INFINITY: text = 'Infinity' elif o == -INFINITY: text = '-Infinity' else: return FLOAT_REPR(o) if not allow_nan: raise ValueError("Out of range float values are not JSON compliant: %r" % (o,)) return text
[ "def", "floatstr", "(", "o", ",", "allow_nan", "=", "True", ")", ":", "if", "o", "!=", "o", ":", "text", "=", "'NaN'", "elif", "o", "==", "INFINITY", ":", "text", "=", "'Infinity'", "elif", "o", "==", "-", "INFINITY", ":", "text", "=", "'-Infinity'", "else", ":", "return", "FLOAT_REPR", "(", "o", ")", "if", "not", "allow_nan", ":", "raise", "ValueError", "(", "\"Out of range float values are not JSON compliant: %r\"", "%", "(", "o", ",", ")", ")", "return", "text" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/addon-sdk/source/python-lib/simplejson/encoder.py#L30-L48
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/Tkinter.py
python
Misc.winfo_rgb
(self, color)
return self._getints( self.tk.call('winfo', 'rgb', self._w, color))
Return tuple of decimal values for red, green, blue for COLOR in this widget.
Return tuple of decimal values for red, green, blue for COLOR in this widget.
[ "Return", "tuple", "of", "decimal", "values", "for", "red", "green", "blue", "for", "COLOR", "in", "this", "widget", "." ]
def winfo_rgb(self, color): """Return tuple of decimal values for red, green, blue for COLOR in this widget.""" return self._getints( self.tk.call('winfo', 'rgb', self._w, color))
[ "def", "winfo_rgb", "(", "self", ",", "color", ")", ":", "return", "self", ".", "_getints", "(", "self", ".", "tk", ".", "call", "(", "'winfo'", ",", "'rgb'", ",", "self", ".", "_w", ",", "color", ")", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/Tkinter.py#L833-L837
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/deps/v8/tools/vim/ninja_output.py
python
GetNinjaOutputDirectory
(v8_root, configuration=None)
Returns <v8_root>/<output_dir>/(Release|Debug|<other>). The configuration chosen is the one most recently generated/built, but can be overriden via the <configuration> parameter. Detects a custom output_dir specified by GYP_GENERATOR_FLAGS.
Returns <v8_root>/<output_dir>/(Release|Debug|<other>).
[ "Returns", "<v8_root", ">", "/", "<output_dir", ">", "/", "(", "Release|Debug|<other", ">", ")", "." ]
def GetNinjaOutputDirectory(v8_root, configuration=None): """Returns <v8_root>/<output_dir>/(Release|Debug|<other>). The configuration chosen is the one most recently generated/built, but can be overriden via the <configuration> parameter. Detects a custom output_dir specified by GYP_GENERATOR_FLAGS.""" output_dirs = [] generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ') for flag in generator_flags: name_value = flag.split('=', 1) if (len(name_value) == 2 and name_value[0] == 'output_dir' and os.path.isdir(os.path.join(v8_root, name_value[1]))): output_dirs = [name_value[1]] if configuration: output_dir = 'out' if len(output_dirs) == 0 else output_dirs[-1] return os.path.join(os.path.join(v8_root, output_dir), configuration) if not output_dirs: for f in os.listdir(v8_root): if re.match(r'out(\b|_)', f): if os.path.isdir(os.path.join(v8_root, f)): output_dirs.append(f) def generate_paths(): for out_dir in output_dirs: out_path = os.path.join(v8_root, out_dir) for config in os.listdir(out_path): path = os.path.join(out_path, config) if os.path.exists(os.path.join(path, 'build.ninja')): yield path def approx_directory_mtime(path): # This is a heuristic; don't recurse into subdirectories. paths = [path] + [os.path.join(path, f) for f in os.listdir(path)] return max(filter(None, [safe_mtime(p) for p in paths])) def safe_mtime(path): try: return os.path.getmtime(path) except OSError: return None try: return max(generate_paths(), key=approx_directory_mtime) except ValueError: raise RuntimeError('Unable to find a valid ninja output directory.')
[ "def", "GetNinjaOutputDirectory", "(", "v8_root", ",", "configuration", "=", "None", ")", ":", "output_dirs", "=", "[", "]", "generator_flags", "=", "os", ".", "getenv", "(", "'GYP_GENERATOR_FLAGS'", ",", "''", ")", ".", "split", "(", "' '", ")", "for", "flag", "in", "generator_flags", ":", "name_value", "=", "flag", ".", "split", "(", "'='", ",", "1", ")", "if", "(", "len", "(", "name_value", ")", "==", "2", "and", "name_value", "[", "0", "]", "==", "'output_dir'", "and", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "v8_root", ",", "name_value", "[", "1", "]", ")", ")", ")", ":", "output_dirs", "=", "[", "name_value", "[", "1", "]", "]", "if", "configuration", ":", "output_dir", "=", "'out'", "if", "len", "(", "output_dirs", ")", "==", "0", "else", "output_dirs", "[", "-", "1", "]", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "join", "(", "v8_root", ",", "output_dir", ")", ",", "configuration", ")", "if", "not", "output_dirs", ":", "for", "f", "in", "os", ".", "listdir", "(", "v8_root", ")", ":", "if", "re", ".", "match", "(", "r'out(\\b|_)'", ",", "f", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "v8_root", ",", "f", ")", ")", ":", "output_dirs", ".", "append", "(", "f", ")", "def", "generate_paths", "(", ")", ":", "for", "out_dir", "in", "output_dirs", ":", "out_path", "=", "os", ".", "path", ".", "join", "(", "v8_root", ",", "out_dir", ")", "for", "config", "in", "os", ".", "listdir", "(", "out_path", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "out_path", ",", "config", ")", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'build.ninja'", ")", ")", ":", "yield", "path", "def", "approx_directory_mtime", "(", "path", ")", ":", "# This is a heuristic; don't recurse into subdirectories.", "paths", "=", "[", "path", "]", "+", "[", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "path", ")", "]", "return", "max", "(", "filter", "(", "None", ",", "[", "safe_mtime", "(", "p", ")", "for", "p", "in", "paths", "]", ")", ")", "def", "safe_mtime", "(", "path", ")", ":", "try", ":", "return", "os", ".", "path", ".", "getmtime", "(", "path", ")", "except", "OSError", ":", "return", "None", "try", ":", "return", "max", "(", "generate_paths", "(", ")", ",", "key", "=", "approx_directory_mtime", ")", "except", "ValueError", ":", "raise", "RuntimeError", "(", "'Unable to find a valid ninja output directory.'", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/deps/v8/tools/vim/ninja_output.py#L18-L66
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/websocket-client/websocket.py
python
WebSocket.gettimeout
(self)
return self.sock.gettimeout()
Get the websocket timeout(second).
Get the websocket timeout(second).
[ "Get", "the", "websocket", "timeout", "(", "second", ")", "." ]
def gettimeout(self): """ Get the websocket timeout(second). """ return self.sock.gettimeout()
[ "def", "gettimeout", "(", "self", ")", ":", "return", "self", ".", "sock", ".", "gettimeout", "(", ")" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/websocket-client/websocket.py#L406-L410
potassco/clingo
e0c91d8f95cc28de1c480a871f9c97c30de83d40
libpyclingo/clingo/ast.py
python
Comparison
(comparison: int, left: AST, right: AST)
return AST(p_ast[0])
Construct an AST node of type `ASTType.Comparison`.
Construct an AST node of type `ASTType.Comparison`.
[ "Construct", "an", "AST", "node", "of", "type", "ASTType", ".", "Comparison", "." ]
def Comparison(comparison: int, left: AST, right: AST) -> AST: ''' Construct an AST node of type `ASTType.Comparison`. ''' p_ast = _ffi.new('clingo_ast_t**') _handle_error(_lib.clingo_ast_build( _lib.clingo_ast_type_comparison, p_ast, _ffi.cast('int', comparison), left._rep, right._rep)) return AST(p_ast[0])
[ "def", "Comparison", "(", "comparison", ":", "int", ",", "left", ":", "AST", ",", "right", ":", "AST", ")", "->", "AST", ":", "p_ast", "=", "_ffi", ".", "new", "(", "'clingo_ast_t**'", ")", "_handle_error", "(", "_lib", ".", "clingo_ast_build", "(", "_lib", ".", "clingo_ast_type_comparison", ",", "p_ast", ",", "_ffi", ".", "cast", "(", "'int'", ",", "comparison", ")", ",", "left", ".", "_rep", ",", "right", ".", "_rep", ")", ")", "return", "AST", "(", "p_ast", "[", "0", "]", ")" ]
https://github.com/potassco/clingo/blob/e0c91d8f95cc28de1c480a871f9c97c30de83d40/libpyclingo/clingo/ast.py#L1357-L1367
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py
python
StateSpaceModel._exogenous_input_step
( self, current_times, current_exogenous_regressors, state)
return state
Update state with exogenous regressors. Allows both increases and decreases in uncertainty. Args: current_times: A [batch size] Tensor of times for the exogenous values being input. current_exogenous_regressors: A [batch size x exogenous input dimension] Tensor of exogenous values for each part of the batch. state: A tuple of (mean, covariance, previous_times) having shapes mean; [batch size x state dimension] covariance; [batch size x state dimension x state dimension] previous_times; [batch size] Returns: Updated state taking the exogenous regressors into account.
Update state with exogenous regressors.
[ "Update", "state", "with", "exogenous", "regressors", "." ]
def _exogenous_input_step( self, current_times, current_exogenous_regressors, state): """Update state with exogenous regressors. Allows both increases and decreases in uncertainty. Args: current_times: A [batch size] Tensor of times for the exogenous values being input. current_exogenous_regressors: A [batch size x exogenous input dimension] Tensor of exogenous values for each part of the batch. state: A tuple of (mean, covariance, previous_times) having shapes mean; [batch size x state dimension] covariance; [batch size x state dimension x state dimension] previous_times; [batch size] Returns: Updated state taking the exogenous regressors into account. """ if self._configuration.exogenous_noise_decreases: state = self._exogenous_noise_decreasing( current_times, current_exogenous_regressors, state) if self._configuration.exogenous_noise_increases: state = self._exogenous_noise_increasing( current_times, current_exogenous_regressors, state) return state
[ "def", "_exogenous_input_step", "(", "self", ",", "current_times", ",", "current_exogenous_regressors", ",", "state", ")", ":", "if", "self", ".", "_configuration", ".", "exogenous_noise_decreases", ":", "state", "=", "self", ".", "_exogenous_noise_decreasing", "(", "current_times", ",", "current_exogenous_regressors", ",", "state", ")", "if", "self", ".", "_configuration", ".", "exogenous_noise_increases", ":", "state", "=", "self", ".", "_exogenous_noise_increasing", "(", "current_times", ",", "current_exogenous_regressors", ",", "state", ")", "return", "state" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py#L587-L611
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/devil/devil/android/sdk/shared_prefs.py
python
SharedPrefs.__repr__
(self)
return '<{cls} file {filename} for {package} on {device}>'.format( cls=type(self).__name__, filename=self.filename, package=self.package, device=str(self._device))
Get a useful printable representation of the object.
Get a useful printable representation of the object.
[ "Get", "a", "useful", "printable", "representation", "of", "the", "object", "." ]
def __repr__(self): """Get a useful printable representation of the object.""" return '<{cls} file {filename} for {package} on {device}>'.format( cls=type(self).__name__, filename=self.filename, package=self.package, device=str(self._device))
[ "def", "__repr__", "(", "self", ")", ":", "return", "'<{cls} file {filename} for {package} on {device}>'", ".", "format", "(", "cls", "=", "type", "(", "self", ")", ".", "__name__", ",", "filename", "=", "self", ".", "filename", ",", "package", "=", "self", ".", "package", ",", "device", "=", "str", "(", "self", ".", "_device", ")", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/devil/devil/android/sdk/shared_prefs.py#L203-L207
microsoft/TSS.MSR
0f2516fca2cd9929c31d5450e39301c9bde43688
TSS.Py/src/TpmTypes.py
python
TPM2_PolicyLocality_REQUEST.toTpm
(self, buf)
TpmMarshaller method
TpmMarshaller method
[ "TpmMarshaller", "method" ]
def toTpm(self, buf): """ TpmMarshaller method """ buf.writeByte(self.locality)
[ "def", "toTpm", "(", "self", ",", "buf", ")", ":", "buf", ".", "writeByte", "(", "self", ".", "locality", ")" ]
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L14543-L14545
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/pubsub/core/topicdefnprovider.py
python
exportTopicTreeSpec
(moduleName = None, rootTopic=None, bak='bak', moduleDoc=None)
Using TopicTreeSpecPrinter, exports the topic tree rooted at rootTopic to a Python module (.py) file. This module will define module-level classes representing root topics, nested classes for subtopics etc. Returns a string representing the contents of the file. Parameters: - If moduleName is given, the topic tree is written to moduleName.py in os.getcwd(). By default, it is first backed up, it it already exists, using bak as the filename extension. If bak is None, existing module file gets overwritten. - If rootTopic is specified, the export only traverses tree from corresponding topic. Otherwise, complete tree, using pub.getDefaultTopicTreeRoot() as starting point. - The moduleDoc is the doc string for the module ie topic tree.
Using TopicTreeSpecPrinter, exports the topic tree rooted at rootTopic to a Python module (.py) file. This module will define module-level classes representing root topics, nested classes for subtopics etc. Returns a string representing the contents of the file. Parameters:
[ "Using", "TopicTreeSpecPrinter", "exports", "the", "topic", "tree", "rooted", "at", "rootTopic", "to", "a", "Python", "module", "(", ".", "py", ")", "file", ".", "This", "module", "will", "define", "module", "-", "level", "classes", "representing", "root", "topics", "nested", "classes", "for", "subtopics", "etc", ".", "Returns", "a", "string", "representing", "the", "contents", "of", "the", "file", ".", "Parameters", ":" ]
def exportTopicTreeSpec(moduleName = None, rootTopic=None, bak='bak', moduleDoc=None): """Using TopicTreeSpecPrinter, exports the topic tree rooted at rootTopic to a Python module (.py) file. This module will define module-level classes representing root topics, nested classes for subtopics etc. Returns a string representing the contents of the file. Parameters: - If moduleName is given, the topic tree is written to moduleName.py in os.getcwd(). By default, it is first backed up, it it already exists, using bak as the filename extension. If bak is None, existing module file gets overwritten. - If rootTopic is specified, the export only traverses tree from corresponding topic. Otherwise, complete tree, using pub.getDefaultTopicTreeRoot() as starting point. - The moduleDoc is the doc string for the module ie topic tree. """ if rootTopic is None: from .. import pub rootTopic = pub.getDefaultTopicMgr().getRootAllTopics() elif py2and3.isstring(rootTopic): from .. import pub rootTopic = pub.getDefaultTopicMgr().getTopic(rootTopic) # create exporter if moduleName is None: capture = py2and3.StringIO() TopicTreeSpecPrinter(rootTopic, fileObj=capture, treeDoc=moduleDoc) return capture.getvalue() else: filename = '%s.py' % moduleName if bak: _backupIfExists(filename, bak) moduleFile = open(filename, 'w') try: TopicTreeSpecPrinter(rootTopic, fileObj=moduleFile, treeDoc=moduleDoc) finally: moduleFile.close()
[ "def", "exportTopicTreeSpec", "(", "moduleName", "=", "None", ",", "rootTopic", "=", "None", ",", "bak", "=", "'bak'", ",", "moduleDoc", "=", "None", ")", ":", "if", "rootTopic", "is", "None", ":", "from", ".", ".", "import", "pub", "rootTopic", "=", "pub", ".", "getDefaultTopicMgr", "(", ")", ".", "getRootAllTopics", "(", ")", "elif", "py2and3", ".", "isstring", "(", "rootTopic", ")", ":", "from", ".", ".", "import", "pub", "rootTopic", "=", "pub", ".", "getDefaultTopicMgr", "(", ")", ".", "getTopic", "(", "rootTopic", ")", "# create exporter", "if", "moduleName", "is", "None", ":", "capture", "=", "py2and3", ".", "StringIO", "(", ")", "TopicTreeSpecPrinter", "(", "rootTopic", ",", "fileObj", "=", "capture", ",", "treeDoc", "=", "moduleDoc", ")", "return", "capture", ".", "getvalue", "(", ")", "else", ":", "filename", "=", "'%s.py'", "%", "moduleName", "if", "bak", ":", "_backupIfExists", "(", "filename", ",", "bak", ")", "moduleFile", "=", "open", "(", "filename", ",", "'w'", ")", "try", ":", "TopicTreeSpecPrinter", "(", "rootTopic", ",", "fileObj", "=", "moduleFile", ",", "treeDoc", "=", "moduleDoc", ")", "finally", ":", "moduleFile", ".", "close", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/pubsub/core/topicdefnprovider.py#L413-L450
nasa/fprime
595cf3682d8365943d86c1a6fe7c78f0a116acf0
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
python
XmlSerializeParser.get_comment
(self)
return self.__comment
Return text block string of comment for serializable class.
Return text block string of comment for serializable class.
[ "Return", "text", "block", "string", "of", "comment", "for", "serializable", "class", "." ]
def get_comment(self): """ Return text block string of comment for serializable class. """ return self.__comment
[ "def", "get_comment", "(", "self", ")", ":", "return", "self", ".", "__comment" ]
https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py#L316-L320
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/drafttaskpanels/task_polararray.py
python
TaskPanelPolarArray.set_focus
(self, key=None)
Set the focus on the widget that receives the key signal.
Set the focus on the widget that receives the key signal.
[ "Set", "the", "focus", "on", "the", "widget", "that", "receives", "the", "key", "signal", "." ]
def set_focus(self, key=None): """Set the focus on the widget that receives the key signal.""" if key is None or key == "x": self.form.input_c_x.setFocus() self.form.input_c_x.selectAll() elif key == "y": self.form.input_c_y.setFocus() self.form.input_c_y.selectAll() elif key == "z": self.form.input_c_z.setFocus() self.form.input_c_z.selectAll()
[ "def", "set_focus", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", "or", "key", "==", "\"x\"", ":", "self", ".", "form", ".", "input_c_x", ".", "setFocus", "(", ")", "self", ".", "form", ".", "input_c_x", ".", "selectAll", "(", ")", "elif", "key", "==", "\"y\"", ":", "self", ".", "form", ".", "input_c_y", ".", "setFocus", "(", ")", "self", ".", "form", ".", "input_c_y", ".", "selectAll", "(", ")", "elif", "key", "==", "\"z\"", ":", "self", ".", "form", ".", "input_c_z", ".", "setFocus", "(", ")", "self", ".", "form", ".", "input_c_z", ".", "selectAll", "(", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/drafttaskpanels/task_polararray.py#L424-L434
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/boto3/docs/service.py
python
ServiceDocumenter.document_service
(self)
return doc_structure.flush_structure()
Documents an entire service. :returns: The reStructured text of the documented service.
Documents an entire service.
[ "Documents", "an", "entire", "service", "." ]
def document_service(self): """Documents an entire service. :returns: The reStructured text of the documented service. """ doc_structure = DocumentStructure( self._service_name, section_names=self.sections, target='html') self.title(doc_structure.get_section('title')) self.table_of_contents(doc_structure.get_section('table-of-contents')) self.client_api(doc_structure.get_section('client')) self.paginator_api(doc_structure.get_section('paginators')) self.waiter_api(doc_structure.get_section('waiters')) if self._service_resource: self._document_service_resource( doc_structure.get_section('service-resource')) self._document_resources(doc_structure.get_section('resources')) self._document_examples(doc_structure.get_section('examples')) return doc_structure.flush_structure()
[ "def", "document_service", "(", "self", ")", ":", "doc_structure", "=", "DocumentStructure", "(", "self", ".", "_service_name", ",", "section_names", "=", "self", ".", "sections", ",", "target", "=", "'html'", ")", "self", ".", "title", "(", "doc_structure", ".", "get_section", "(", "'title'", ")", ")", "self", ".", "table_of_contents", "(", "doc_structure", ".", "get_section", "(", "'table-of-contents'", ")", ")", "self", ".", "client_api", "(", "doc_structure", ".", "get_section", "(", "'client'", ")", ")", "self", ".", "paginator_api", "(", "doc_structure", ".", "get_section", "(", "'paginators'", ")", ")", "self", ".", "waiter_api", "(", "doc_structure", ".", "get_section", "(", "'waiters'", ")", ")", "if", "self", ".", "_service_resource", ":", "self", ".", "_document_service_resource", "(", "doc_structure", ".", "get_section", "(", "'service-resource'", ")", ")", "self", ".", "_document_resources", "(", "doc_structure", ".", "get_section", "(", "'resources'", ")", ")", "self", ".", "_document_examples", "(", "doc_structure", ".", "get_section", "(", "'examples'", ")", ")", "return", "doc_structure", ".", "flush_structure", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/boto3/docs/service.py#L53-L72
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py
python
tbe_initialize
(job: TbeJob)
return res
Tbe Initialize :param job: :return:
Tbe Initialize :param job: :return:
[ "Tbe", "Initialize", ":", "param", "job", ":", ":", "return", ":" ]
def tbe_initialize(job: TbeJob): """ Tbe Initialize :param job: :return: """ os.environ["CONTEXT_MODELCOMPILING"] = "TRUE" soc_info = get_soc_info(job.content) res = te_set_version(*soc_info) if not res: job.error("Set version failed") res = _tune_init(job) if not res: job.error("Tune init failed") lock_file = os.path.join(job.content["SocInfo"]["op_debug_dir"], "kernel_meta", "file.lock") local_lock = LocalLock(lock_file) try: local_lock.lock() res = _cann_kb_load(job) if res == 1: job.error("Cann kb load failed") res = _parallel_compilation_init(job) if not res: job.error("Parallel compilation failed") except RuntimeError: job.error("Initialize failed with RuntimeError") finally: local_lock.unlock() job.result = "Success" return res
[ "def", "tbe_initialize", "(", "job", ":", "TbeJob", ")", ":", "os", ".", "environ", "[", "\"CONTEXT_MODELCOMPILING\"", "]", "=", "\"TRUE\"", "soc_info", "=", "get_soc_info", "(", "job", ".", "content", ")", "res", "=", "te_set_version", "(", "*", "soc_info", ")", "if", "not", "res", ":", "job", ".", "error", "(", "\"Set version failed\"", ")", "res", "=", "_tune_init", "(", "job", ")", "if", "not", "res", ":", "job", ".", "error", "(", "\"Tune init failed\"", ")", "lock_file", "=", "os", ".", "path", ".", "join", "(", "job", ".", "content", "[", "\"SocInfo\"", "]", "[", "\"op_debug_dir\"", "]", ",", "\"kernel_meta\"", ",", "\"file.lock\"", ")", "local_lock", "=", "LocalLock", "(", "lock_file", ")", "try", ":", "local_lock", ".", "lock", "(", ")", "res", "=", "_cann_kb_load", "(", "job", ")", "if", "res", "==", "1", ":", "job", ".", "error", "(", "\"Cann kb load failed\"", ")", "res", "=", "_parallel_compilation_init", "(", "job", ")", "if", "not", "res", ":", "job", ".", "error", "(", "\"Parallel compilation failed\"", ")", "except", "RuntimeError", ":", "job", ".", "error", "(", "\"Initialize failed with RuntimeError\"", ")", "finally", ":", "local_lock", ".", "unlock", "(", ")", "job", ".", "result", "=", "\"Success\"", "return", "res" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py#L248-L277
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/contrib/learn/python/learn/datasets/base.py
python
load_iris
(data_path=None)
return load_csv_with_header( data_path, target_dtype=np.int, features_dtype=np.float)
Load Iris dataset. Args: data_path: string, path to iris dataset (optional) Returns: Dataset object containing data in-memory.
Load Iris dataset.
[ "Load", "Iris", "dataset", "." ]
def load_iris(data_path=None): """Load Iris dataset. Args: data_path: string, path to iris dataset (optional) Returns: Dataset object containing data in-memory. """ if data_path is None: module_path = path.dirname(__file__) data_path = path.join(module_path, 'data', 'iris.csv') return load_csv_with_header( data_path, target_dtype=np.int, features_dtype=np.float)
[ "def", "load_iris", "(", "data_path", "=", "None", ")", ":", "if", "data_path", "is", "None", ":", "module_path", "=", "path", ".", "dirname", "(", "__file__", ")", "data_path", "=", "path", ".", "join", "(", "module_path", ",", "'data'", ",", "'iris.csv'", ")", "return", "load_csv_with_header", "(", "data_path", ",", "target_dtype", "=", "np", ".", "int", ",", "features_dtype", "=", "np", ".", "float", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/learn/python/learn/datasets/base.py#L88-L103
omnisci/omniscidb
b9c95f1bd602b4ffc8b0edf18bfad61031e08d86
Benchmarks/run_benchmark.py
python
clear_system_caches
()
Clears system caches
Clears system caches
[ "Clears", "system", "caches" ]
def clear_system_caches(): """ Clears system caches """ try: os.system('sudo sh -c "/bin/echo 3 > /proc/sys/vm/drop_caches"') except Exception as e: errormessage = "Clear system caches failed with error: " + str(e) logging.error(errormessage)
[ "def", "clear_system_caches", "(", ")", ":", "try", ":", "os", ".", "system", "(", "'sudo sh -c \"/bin/echo 3 > /proc/sys/vm/drop_caches\"'", ")", "except", "Exception", "as", "e", ":", "errormessage", "=", "\"Clear system caches failed with error: \"", "+", "str", "(", "e", ")", "logging", ".", "error", "(", "errormessage", ")" ]
https://github.com/omnisci/omniscidb/blob/b9c95f1bd602b4ffc8b0edf18bfad61031e08d86/Benchmarks/run_benchmark.py#L626-L634
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_aarch64/python2.7/dist-packages/rosdistro/dependency_walker.py
python
DependencyWalker.get_depends_on
(self, pkg_name, depend_type, ignore_pkgs=None)
return depends_on
Return a set of package names which depend on the package.
Return a set of package names which depend on the package.
[ "Return", "a", "set", "of", "package", "names", "which", "depend", "on", "the", "package", "." ]
def get_depends_on(self, pkg_name, depend_type, ignore_pkgs=None): '''Return a set of package names which depend on the package.''' ignore_pkgs = ignore_pkgs or [] depends_on = set([]) for name in self._distribution_instance.release_packages.keys(): if name in ignore_pkgs: continue pkg = self._distribution_instance.release_packages[name] repo = self._distribution_instance.repositories[pkg.repository_name].release_repository if repo is None or repo.version is None: continue deps = self._get_dependencies(name, depend_type) if pkg_name in deps: depends_on.add(name) return depends_on
[ "def", "get_depends_on", "(", "self", ",", "pkg_name", ",", "depend_type", ",", "ignore_pkgs", "=", "None", ")", ":", "ignore_pkgs", "=", "ignore_pkgs", "or", "[", "]", "depends_on", "=", "set", "(", "[", "]", ")", "for", "name", "in", "self", ".", "_distribution_instance", ".", "release_packages", ".", "keys", "(", ")", ":", "if", "name", "in", "ignore_pkgs", ":", "continue", "pkg", "=", "self", ".", "_distribution_instance", ".", "release_packages", "[", "name", "]", "repo", "=", "self", ".", "_distribution_instance", ".", "repositories", "[", "pkg", ".", "repository_name", "]", ".", "release_repository", "if", "repo", "is", "None", "or", "repo", ".", "version", "is", "None", ":", "continue", "deps", "=", "self", ".", "_get_dependencies", "(", "name", ",", "depend_type", ")", "if", "pkg_name", "in", "deps", ":", "depends_on", ".", "add", "(", "name", ")", "return", "depends_on" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_aarch64/python2.7/dist-packages/rosdistro/dependency_walker.py#L86-L100
EricLYang/courseRepo
60679ec7ec130fe0cff9d26b704f1e286e5fde13
9_class/arcodelocal/build/catkin_generated/installspace/_setup_util.py
python
_prefix_env_variable
(environ, name, paths, subfolders)
return prefix_str
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
[ "Return", "the", "prefix", "to", "prepend", "to", "the", "environment", "variable", "NAME", "adding", "any", "path", "in", "NEW_PATHS_STR", "without", "creating", "duplicate", "or", "empty", "items", "." ]
def _prefix_env_variable(environ, name, paths, subfolders): ''' Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items. ''' value = environ[name] if name in environ else '' environ_paths = [path for path in value.split(os.pathsep) if path] checked_paths = [] for path in paths: if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: path_tmp = path if subfolder: path_tmp = os.path.join(path_tmp, subfolder) # skip nonexistent paths if not os.path.exists(path_tmp): continue # exclude any path already in env and any path we already added if path_tmp not in environ_paths and path_tmp not in checked_paths: checked_paths.append(path_tmp) prefix_str = os.pathsep.join(checked_paths) if prefix_str != '' and environ_paths: prefix_str += os.pathsep return prefix_str
[ "def", "_prefix_env_variable", "(", "environ", ",", "name", ",", "paths", ",", "subfolders", ")", ":", "value", "=", "environ", "[", "name", "]", "if", "name", "in", "environ", "else", "''", "environ_paths", "=", "[", "path", "for", "path", "in", "value", ".", "split", "(", "os", ".", "pathsep", ")", "if", "path", "]", "checked_paths", "=", "[", "]", "for", "path", "in", "paths", ":", "if", "not", "isinstance", "(", "subfolders", ",", "list", ")", ":", "subfolders", "=", "[", "subfolders", "]", "for", "subfolder", "in", "subfolders", ":", "path_tmp", "=", "path", "if", "subfolder", ":", "path_tmp", "=", "os", ".", "path", ".", "join", "(", "path_tmp", ",", "subfolder", ")", "# skip nonexistent paths", "if", "not", "os", ".", "path", ".", "exists", "(", "path_tmp", ")", ":", "continue", "# exclude any path already in env and any path we already added", "if", "path_tmp", "not", "in", "environ_paths", "and", "path_tmp", "not", "in", "checked_paths", ":", "checked_paths", ".", "append", "(", "path_tmp", ")", "prefix_str", "=", "os", ".", "pathsep", ".", "join", "(", "checked_paths", ")", "if", "prefix_str", "!=", "''", "and", "environ_paths", ":", "prefix_str", "+=", "os", ".", "pathsep", "return", "prefix_str" ]
https://github.com/EricLYang/courseRepo/blob/60679ec7ec130fe0cff9d26b704f1e286e5fde13/9_class/arcodelocal/build/catkin_generated/installspace/_setup_util.py#L150-L173
CGRU/cgru
1881a4128530e3d31ac6c25314c18314fc50c2c7
afanasy/python/services/service.py
python
service.doPost
(self)
return post_cmds
Missing DocString :return:
Missing DocString
[ "Missing", "DocString" ]
def doPost(self): """Missing DocString :return: """ post_cmds = [] if not afcommon.checkBlockFlag(self.taskInfo['block_flags'], 'skipthumbnails'): post_cmds.extend(self.generateThumbnail(False)) # post_cmds.extend(['ls -la > ' + self.taskInfo['store_dir'] + '/afile']) return post_cmds
[ "def", "doPost", "(", "self", ")", ":", "post_cmds", "=", "[", "]", "if", "not", "afcommon", ".", "checkBlockFlag", "(", "self", ".", "taskInfo", "[", "'block_flags'", "]", ",", "'skipthumbnails'", ")", ":", "post_cmds", ".", "extend", "(", "self", ".", "generateThumbnail", "(", "False", ")", ")", "# post_cmds.extend(['ls -la > ' + self.taskInfo['store_dir'] + '/afile'])", "return", "post_cmds" ]
https://github.com/CGRU/cgru/blob/1881a4128530e3d31ac6c25314c18314fc50c2c7/afanasy/python/services/service.py#L340-L352
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/range-sum-query-mutable.py
python
NumArray.sumRange
(self, i, j)
return self.__sum(j) - self.__sum(i-1)
sum of elements nums[i..j], inclusive. :type i: int :type j: int :rtype: int
sum of elements nums[i..j], inclusive. :type i: int :type j: int :rtype: int
[ "sum", "of", "elements", "nums", "[", "i", "..", "j", "]", "inclusive", ".", ":", "type", "i", ":", "int", ":", "type", "j", ":", "int", ":", "rtype", ":", "int" ]
def sumRange(self, i, j): """ sum of elements nums[i..j], inclusive. :type i: int :type j: int :rtype: int """ return self.__sum(j) - self.__sum(i-1)
[ "def", "sumRange", "(", "self", ",", "i", ",", "j", ")", ":", "return", "self", ".", "__sum", "(", "j", ")", "-", "self", ".", "__sum", "(", "i", "-", "1", ")" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/range-sum-query-mutable.py#L33-L40
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py2/numpy/lib/mixins.py
python
_disables_array_ufunc
(obj)
True when __array_ufunc__ is set to None.
True when __array_ufunc__ is set to None.
[ "True", "when", "__array_ufunc__", "is", "set", "to", "None", "." ]
def _disables_array_ufunc(obj): """True when __array_ufunc__ is set to None.""" try: return obj.__array_ufunc__ is None except AttributeError: return False
[ "def", "_disables_array_ufunc", "(", "obj", ")", ":", "try", ":", "return", "obj", ".", "__array_ufunc__", "is", "None", "except", "AttributeError", ":", "return", "False" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/lib/mixins.py#L12-L17
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2.py
python
xmlDoc.schemaValidateDoc
(self, ctxt)
return ret
Validate a document tree in memory.
Validate a document tree in memory.
[ "Validate", "a", "document", "tree", "in", "memory", "." ]
def schemaValidateDoc(self, ctxt): """Validate a document tree in memory. """ if ctxt is None: ctxt__o = None else: ctxt__o = ctxt._o ret = libxml2mod.xmlSchemaValidateDoc(ctxt__o, self._o) return ret
[ "def", "schemaValidateDoc", "(", "self", ",", "ctxt", ")", ":", "if", "ctxt", "is", "None", ":", "ctxt__o", "=", "None", "else", ":", "ctxt__o", "=", "ctxt", ".", "_o", "ret", "=", "libxml2mod", ".", "xmlSchemaValidateDoc", "(", "ctxt__o", ",", "self", ".", "_o", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L4858-L4863
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/scipy/signal/wavelets.py
python
morlet
(M, w=5.0, s=1.0, complete=True)
return output
Complex Morlet wavelet. Parameters ---------- M : int Length of the wavelet. w : float, optional Omega0. Default is 5 s : float, optional Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. complete : bool, optional Whether to use the complete or the standard version. Returns ------- morlet : (M,) ndarray See Also -------- scipy.signal.gausspulse Notes ----- The standard version:: pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) This commonly used wavelet is often referred to simply as the Morlet wavelet. Note that this simplified version can cause admissibility problems at low values of `w`. The complete version:: pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) This version has a correction term to improve admissibility. For `w` greater than 5, the correction term is negligible. Note that the energy of the return wavelet is not normalised according to `s`. The fundamental frequency of this wavelet in Hz is given by ``f = 2*s*w*r / M`` where `r` is the sampling rate. Note: This function was created before `cwt` and is not compatible with it.
Complex Morlet wavelet.
[ "Complex", "Morlet", "wavelet", "." ]
def morlet(M, w=5.0, s=1.0, complete=True): """ Complex Morlet wavelet. Parameters ---------- M : int Length of the wavelet. w : float, optional Omega0. Default is 5 s : float, optional Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. complete : bool, optional Whether to use the complete or the standard version. Returns ------- morlet : (M,) ndarray See Also -------- scipy.signal.gausspulse Notes ----- The standard version:: pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) This commonly used wavelet is often referred to simply as the Morlet wavelet. Note that this simplified version can cause admissibility problems at low values of `w`. The complete version:: pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) This version has a correction term to improve admissibility. For `w` greater than 5, the correction term is negligible. Note that the energy of the return wavelet is not normalised according to `s`. The fundamental frequency of this wavelet in Hz is given by ``f = 2*s*w*r / M`` where `r` is the sampling rate. Note: This function was created before `cwt` and is not compatible with it. """ x = linspace(-s * 2 * pi, s * 2 * pi, M) output = exp(1j * w * x) if complete: output -= exp(-0.5 * (w**2)) output *= exp(-0.5 * (x**2)) * pi**(-0.25) return output
[ "def", "morlet", "(", "M", ",", "w", "=", "5.0", ",", "s", "=", "1.0", ",", "complete", "=", "True", ")", ":", "x", "=", "linspace", "(", "-", "s", "*", "2", "*", "pi", ",", "s", "*", "2", "*", "pi", ",", "M", ")", "output", "=", "exp", "(", "1j", "*", "w", "*", "x", ")", "if", "complete", ":", "output", "-=", "exp", "(", "-", "0.5", "*", "(", "w", "**", "2", ")", ")", "output", "*=", "exp", "(", "-", "0.5", "*", "(", "x", "**", "2", ")", ")", "*", "pi", "**", "(", "-", "0.25", ")", "return", "output" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/signal/wavelets.py#L202-L261
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/pyfakefs/pyfakefs/fake_tempfile.py
python
FakeTempfileModule.FakeReturnedMktempValues
(self)
return self._mktemp_retvals
For validation purposes, mktemp()'s return values are stored.
For validation purposes, mktemp()'s return values are stored.
[ "For", "validation", "purposes", "mktemp", "()", "s", "return", "values", "are", "stored", "." ]
def FakeReturnedMktempValues(self): """For validation purposes, mktemp()'s return values are stored.""" return self._mktemp_retvals
[ "def", "FakeReturnedMktempValues", "(", "self", ")", ":", "return", "self", ".", "_mktemp_retvals" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/pyfakefs/pyfakefs/fake_tempfile.py#L307-L309
google/swiftshader
8ccc63f045d5975fb67f9dfd3d2b8235b0526990
third_party/SPIRV-Headers/tools/buildHeaders/bin/makeExtinstHeaders.py
python
mk_extinst
(name, grammar_file)
Generate one C header from a grammar
Generate one C header from a grammar
[ "Generate", "one", "C", "header", "from", "a", "grammar" ]
def mk_extinst(name, grammar_file): """Generate one C header from a grammar""" script = '../../../tools/buildHeaders/bin/generate_language_headers.py' subprocess.check_call(['python3', script, '--extinst-name=' + name, '--extinst-grammar=' + grammar_file, '--extinst-output-base=' + name]) subprocess.check_call(['dos2unix', name + '.h'])
[ "def", "mk_extinst", "(", "name", ",", "grammar_file", ")", ":", "script", "=", "'../../../tools/buildHeaders/bin/generate_language_headers.py'", "subprocess", ".", "check_call", "(", "[", "'python3'", ",", "script", ",", "'--extinst-name='", "+", "name", ",", "'--extinst-grammar='", "+", "grammar_file", ",", "'--extinst-output-base='", "+", "name", "]", ")", "subprocess", ".", "check_call", "(", "[", "'dos2unix'", ",", "name", "+", "'.h'", "]", ")" ]
https://github.com/google/swiftshader/blob/8ccc63f045d5975fb67f9dfd3d2b8235b0526990/third_party/SPIRV-Headers/tools/buildHeaders/bin/makeExtinstHeaders.py#L10-L18
wangkuiyi/mapreduce-lite
1bb92fe094dc47480ef9163c34070a3199feead6
src/mapreduce_lite/scheduler/worker.py
python
MapOnlyWorker.get_worker_cmd
(self)
return cmd_map_worker
Get commands for map wokers
Get commands for map wokers
[ "Get", "commands", "for", "map", "wokers" ]
def get_worker_cmd(self): """ Get commands for map wokers """ options = self.options rank = self.rank map_worker_id = rank task = options.all_tasks[rank] executable = '%s/%s %s' %(task['tmp_dir'], options.remote_executable, options.cmd_args) param = (executable, task['input_path'], task['output_path'], task['log_filebase'], options.num_map_worker, options.reduce_workers, map_worker_id, task['class'], task['input_format'], task['output_format']) cmd_map_worker = """ %s --mr_input_filepattern="%s" --mr_output_files="%s" --mr_log_filebase="%s" --mr_num_map_workers=%s --mr_reduce_workers=%s --mr_map_worker_id=%s --mr_map_only=true --mr_mapper_class=%s --mr_input_format=%s --mr_output_format=%s """ % param return cmd_map_worker
[ "def", "get_worker_cmd", "(", "self", ")", ":", "options", "=", "self", ".", "options", "rank", "=", "self", ".", "rank", "map_worker_id", "=", "rank", "task", "=", "options", ".", "all_tasks", "[", "rank", "]", "executable", "=", "'%s/%s %s'", "%", "(", "task", "[", "'tmp_dir'", "]", ",", "options", ".", "remote_executable", ",", "options", ".", "cmd_args", ")", "param", "=", "(", "executable", ",", "task", "[", "'input_path'", "]", ",", "task", "[", "'output_path'", "]", ",", "task", "[", "'log_filebase'", "]", ",", "options", ".", "num_map_worker", ",", "options", ".", "reduce_workers", ",", "map_worker_id", ",", "task", "[", "'class'", "]", ",", "task", "[", "'input_format'", "]", ",", "task", "[", "'output_format'", "]", ")", "cmd_map_worker", "=", "\"\"\" %s\n --mr_input_filepattern=\"%s\"\n --mr_output_files=\"%s\"\n --mr_log_filebase=\"%s\"\n --mr_num_map_workers=%s\n --mr_reduce_workers=%s\n --mr_map_worker_id=%s\n --mr_map_only=true\n --mr_mapper_class=%s\n --mr_input_format=%s\n --mr_output_format=%s\n \"\"\"", "%", "param", "return", "cmd_map_worker" ]
https://github.com/wangkuiyi/mapreduce-lite/blob/1bb92fe094dc47480ef9163c34070a3199feead6/src/mapreduce_lite/scheduler/worker.py#L191-L223
gtcasl/gpuocelot
fa63920ee7c5f9a86e264cd8acd4264657cbd190
ocelot/scripts/build_environment.py
python
getLibCXXPaths
()
return (inc_path, lib_path)
Determines libc++ path returns (inc_path, lib_path)
Determines libc++ path
[ "Determines", "libc", "++", "path" ]
def getLibCXXPaths(): """Determines libc++ path returns (inc_path, lib_path) """ # determine defaults if os.name == 'posix': inc_path = '/usr/include' lib_path = '/usr/lib/libc++.so' else: raise ValueError, 'Error: unknown OS. Where is libc++ installed?' # override with environement variables if 'LIBCXX_INC_PATH' in os.environ: inc_path = os.path.abspath(os.environ['LIBCXX_INC_PATH']) if 'LIBCXX_LIB_PATH' in os.environ: lib_path = os.path.abspath(os.environ['LIBCXX_LIB_PATH']) return (inc_path, lib_path)
[ "def", "getLibCXXPaths", "(", ")", ":", "# determine defaults", "if", "os", ".", "name", "==", "'posix'", ":", "inc_path", "=", "'/usr/include'", "lib_path", "=", "'/usr/lib/libc++.so'", "else", ":", "raise", "ValueError", ",", "'Error: unknown OS. Where is libc++ installed?'", "# override with environement variables", "if", "'LIBCXX_INC_PATH'", "in", "os", ".", "environ", ":", "inc_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "environ", "[", "'LIBCXX_INC_PATH'", "]", ")", "if", "'LIBCXX_LIB_PATH'", "in", "os", ".", "environ", ":", "lib_path", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "environ", "[", "'LIBCXX_LIB_PATH'", "]", ")", "return", "(", "inc_path", ",", "lib_path", ")" ]
https://github.com/gtcasl/gpuocelot/blob/fa63920ee7c5f9a86e264cd8acd4264657cbd190/ocelot/scripts/build_environment.py#L364-L383
PixarAnimationStudios/USD
faed18ce62c8736b02413635b584a2f637156bad
pxr/usdImaging/usdviewq/appController.py
python
AppController._isHUDVisible
(self)
return self._dataModel.viewSettings.showHUD and self._dataModel.viewSettings.showHUD_Info
Checks if the upper HUD is visible by looking at the global HUD visibility menu as well as the 'Subtree Info' menu
Checks if the upper HUD is visible by looking at the global HUD visibility menu as well as the 'Subtree Info' menu
[ "Checks", "if", "the", "upper", "HUD", "is", "visible", "by", "looking", "at", "the", "global", "HUD", "visibility", "menu", "as", "well", "as", "the", "Subtree", "Info", "menu" ]
def _isHUDVisible(self): """Checks if the upper HUD is visible by looking at the global HUD visibility menu as well as the 'Subtree Info' menu""" return self._dataModel.viewSettings.showHUD and self._dataModel.viewSettings.showHUD_Info
[ "def", "_isHUDVisible", "(", "self", ")", ":", "return", "self", ".", "_dataModel", ".", "viewSettings", ".", "showHUD", "and", "self", ".", "_dataModel", ".", "viewSettings", ".", "showHUD_Info" ]
https://github.com/PixarAnimationStudios/USD/blob/faed18ce62c8736b02413635b584a2f637156bad/pxr/usdImaging/usdviewq/appController.py#L4194-L4197
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/learn/python/learn/monitors.py
python
ValidationMonitor.__init__
(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric="loss", early_stopping_metric_minimize=True, name=None)
Initializes a ValidationMonitor. Args: x: See `BaseEstimator.evaluate`. y: See `BaseEstimator.evaluate`. input_fn: See `BaseEstimator.evaluate`. batch_size: See `BaseEstimator.evaluate`. eval_steps: See `BaseEstimator.evaluate`. every_n_steps: Check for new checkpoints to evaluate every N steps. If a new checkpoint is found, it is evaluated. See `EveryN`. metrics: See `BaseEstimator.evaluate`. early_stopping_rounds: `int`. If the metric indicated by `early_stopping_metric` does not change according to `early_stopping_metric_minimize` for this many steps, then training will be stopped. early_stopping_metric: `string`, name of the metric to check for early stopping. early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is expected to decrease (thus early stopping occurs when this metric stops decreasing), False if `early_stopping_metric` is expected to increase. Typically, `early_stopping_metric_minimize` is True for loss metrics like mean squared error, and False for performance metrics like accuracy. name: See `BaseEstimator.evaluate`. Raises: ValueError: If both x and input_fn are provided.
Initializes a ValidationMonitor.
[ "Initializes", "a", "ValidationMonitor", "." ]
def __init__(self, x=None, y=None, input_fn=None, batch_size=None, eval_steps=None, every_n_steps=100, metrics=None, early_stopping_rounds=None, early_stopping_metric="loss", early_stopping_metric_minimize=True, name=None): """Initializes a ValidationMonitor. Args: x: See `BaseEstimator.evaluate`. y: See `BaseEstimator.evaluate`. input_fn: See `BaseEstimator.evaluate`. batch_size: See `BaseEstimator.evaluate`. eval_steps: See `BaseEstimator.evaluate`. every_n_steps: Check for new checkpoints to evaluate every N steps. If a new checkpoint is found, it is evaluated. See `EveryN`. metrics: See `BaseEstimator.evaluate`. early_stopping_rounds: `int`. If the metric indicated by `early_stopping_metric` does not change according to `early_stopping_metric_minimize` for this many steps, then training will be stopped. early_stopping_metric: `string`, name of the metric to check for early stopping. early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is expected to decrease (thus early stopping occurs when this metric stops decreasing), False if `early_stopping_metric` is expected to increase. Typically, `early_stopping_metric_minimize` is True for loss metrics like mean squared error, and False for performance metrics like accuracy. name: See `BaseEstimator.evaluate`. Raises: ValueError: If both x and input_fn are provided. """ super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps, first_n_steps=-1) # TODO(mdan): Checks like this are already done by evaluate. if x is None and input_fn is None: raise ValueError("Either x or input_fn should be provided.") self.x = x self.y = y self.input_fn = input_fn self.batch_size = batch_size self.eval_steps = eval_steps self.metrics = metrics self.early_stopping_rounds = early_stopping_rounds self.early_stopping_metric = early_stopping_metric self.early_stopping_metric_minimize = early_stopping_metric_minimize self.name = name self._best_value_step = None self._best_value = None self._early_stopped = False self._latest_path = None self._latest_path_step = None
[ "def", "__init__", "(", "self", ",", "x", "=", "None", ",", "y", "=", "None", ",", "input_fn", "=", "None", ",", "batch_size", "=", "None", ",", "eval_steps", "=", "None", ",", "every_n_steps", "=", "100", ",", "metrics", "=", "None", ",", "early_stopping_rounds", "=", "None", ",", "early_stopping_metric", "=", "\"loss\"", ",", "early_stopping_metric_minimize", "=", "True", ",", "name", "=", "None", ")", ":", "super", "(", "ValidationMonitor", ",", "self", ")", ".", "__init__", "(", "every_n_steps", "=", "every_n_steps", ",", "first_n_steps", "=", "-", "1", ")", "# TODO(mdan): Checks like this are already done by evaluate.", "if", "x", "is", "None", "and", "input_fn", "is", "None", ":", "raise", "ValueError", "(", "\"Either x or input_fn should be provided.\"", ")", "self", ".", "x", "=", "x", "self", ".", "y", "=", "y", "self", ".", "input_fn", "=", "input_fn", "self", ".", "batch_size", "=", "batch_size", "self", ".", "eval_steps", "=", "eval_steps", "self", ".", "metrics", "=", "metrics", "self", ".", "early_stopping_rounds", "=", "early_stopping_rounds", "self", ".", "early_stopping_metric", "=", "early_stopping_metric", "self", ".", "early_stopping_metric_minimize", "=", "early_stopping_metric_minimize", "self", ".", "name", "=", "name", "self", ".", "_best_value_step", "=", "None", "self", ".", "_best_value", "=", "None", "self", ".", "_early_stopped", "=", "False", "self", ".", "_latest_path", "=", "None", "self", ".", "_latest_path_step", "=", "None" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/learn/python/learn/monitors.py#L604-L656
sonyxperiadev/WebGL
0299b38196f78c6d5f74bcf6fa312a3daee6de60
Tools/Scripts/webkitpy/style/checkers/cpp.py
python
replaceable_check
(operator, macro, line)
return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
Determine whether a basic CHECK can be replaced with a more specific one. For example suggest using CHECK_EQ instead of CHECK(a == b) and similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE. Args: operator: The C++ operator used in the CHECK. macro: The CHECK or EXPECT macro being called. line: The current source line. Returns: True if the CHECK can be replaced with a more specific one.
Determine whether a basic CHECK can be replaced with a more specific one.
[ "Determine", "whether", "a", "basic", "CHECK", "can", "be", "replaced", "with", "a", "more", "specific", "one", "." ]
def replaceable_check(operator, macro, line): """Determine whether a basic CHECK can be replaced with a more specific one. For example suggest using CHECK_EQ instead of CHECK(a == b) and similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE. Args: operator: The C++ operator used in the CHECK. macro: The CHECK or EXPECT macro being called. line: The current source line. Returns: True if the CHECK can be replaced with a more specific one. """ # This matches decimal and hex integers, strings, and chars (in that order). match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')' # Expression to match two sides of the operator with something that # looks like a literal, since CHECK(x == iterator) won't compile. # This means we can't catch all the cases where a more specific # CHECK is possible, but it's less annoying than dealing with # extraneous warnings. match_this = (r'\s*' + macro + r'\((\s*' + match_constant + r'\s*' + operator + r'[^<>].*|' r'.*[^<>]' + operator + r'\s*' + match_constant + r'\s*\))') # Don't complain about CHECK(x == NULL) or similar because # CHECK_EQ(x, NULL) won't compile (requires a cast). # Also, don't complain about more complex boolean expressions # involving && or || such as CHECK(a == b || c == d). return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
[ "def", "replaceable_check", "(", "operator", ",", "macro", ",", "line", ")", ":", "# This matches decimal and hex integers, strings, and chars (in that order).", "match_constant", "=", "r'([-+]?(\\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|\".*\"|\\'.*\\')'", "# Expression to match two sides of the operator with something that", "# looks like a literal, since CHECK(x == iterator) won't compile.", "# This means we can't catch all the cases where a more specific", "# CHECK is possible, but it's less annoying than dealing with", "# extraneous warnings.", "match_this", "=", "(", "r'\\s*'", "+", "macro", "+", "r'\\((\\s*'", "+", "match_constant", "+", "r'\\s*'", "+", "operator", "+", "r'[^<>].*|'", "r'.*[^<>]'", "+", "operator", "+", "r'\\s*'", "+", "match_constant", "+", "r'\\s*\\))'", ")", "# Don't complain about CHECK(x == NULL) or similar because", "# CHECK_EQ(x, NULL) won't compile (requires a cast).", "# Also, don't complain about more complex boolean expressions", "# involving && or || such as CHECK(a == b || c == d).", "return", "match", "(", "match_this", ",", "line", ")", "and", "not", "search", "(", "r'NULL|&&|\\|\\|'", ",", "line", ")" ]
https://github.com/sonyxperiadev/WebGL/blob/0299b38196f78c6d5f74bcf6fa312a3daee6de60/Tools/Scripts/webkitpy/style/checkers/cpp.py#L2265-L2297
Illumina/manta
75b5c38d4fcd2f6961197b28a41eb61856f2d976
scratch/util/largeIntrachromFilter.py
python
resolveRec
(recEqualSet, recList)
determine which of a set of vcf records presumed to refer to the same inversion are kept right now best is a record with PASS in the filter field, and secondarily the high quality
determine which of a set of vcf records presumed to refer to the same inversion are kept right now best is a record with PASS in the filter field, and secondarily the high quality
[ "determine", "which", "of", "a", "set", "of", "vcf", "records", "presumed", "to", "refer", "to", "the", "same", "inversion", "are", "kept", "right", "now", "best", "is", "a", "record", "with", "PASS", "in", "the", "filter", "field", "and", "secondarily", "the", "high", "quality" ]
def resolveRec(recEqualSet, recList) : """ determine which of a set of vcf records presumed to refer to the same inversion are kept right now best is a record with PASS in the filter field, and secondarily the high quality """ if not recEqualSet: return bestIndex=0 bestSS=0. bestPos=0 bestIsPass=False for (index,rec) in enumerate(recEqualSet) : assert rec.pos > 0 isNewPass=((not bestIsPass) and rec.isPass) isHighQual=((bestIsPass == rec.isPass) and (rec.pos < bestPos)) #(rec.ss > bestSS)) if (isNewPass or isHighQual) : bestIndex = index bestPos = rec.pos bestIsPass = rec.isPass # potentially could reward two non-pass inversion calls here: # if not bestIsPass and (len(recEqualSet) == 2) : # if (recEqualSet[0].isInv3 and reEqualSet[1].isInv5) or # recEqualSet[1].isInv3 and reEqualSet[0].isInv5)) : recList.append(recEqualSet[bestIndex])
[ "def", "resolveRec", "(", "recEqualSet", ",", "recList", ")", ":", "if", "not", "recEqualSet", ":", "return", "bestIndex", "=", "0", "bestSS", "=", "0.", "bestPos", "=", "0", "bestIsPass", "=", "False", "for", "(", "index", ",", "rec", ")", "in", "enumerate", "(", "recEqualSet", ")", ":", "assert", "rec", ".", "pos", ">", "0", "isNewPass", "=", "(", "(", "not", "bestIsPass", ")", "and", "rec", ".", "isPass", ")", "isHighQual", "=", "(", "(", "bestIsPass", "==", "rec", ".", "isPass", ")", "and", "(", "rec", ".", "pos", "<", "bestPos", ")", ")", "#(rec.ss > bestSS))", "if", "(", "isNewPass", "or", "isHighQual", ")", ":", "bestIndex", "=", "index", "bestPos", "=", "rec", ".", "pos", "bestIsPass", "=", "rec", ".", "isPass", "# potentially could reward two non-pass inversion calls here:", "# if not bestIsPass and (len(recEqualSet) == 2) :", "# if (recEqualSet[0].isInv3 and reEqualSet[1].isInv5) or", "# recEqualSet[1].isInv3 and reEqualSet[0].isInv5)) :", "recList", ".", "append", "(", "recEqualSet", "[", "bestIndex", "]", ")" ]
https://github.com/Illumina/manta/blob/75b5c38d4fcd2f6961197b28a41eb61856f2d976/scratch/util/largeIntrachromFilter.py#L127-L154
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/debug/cli/curses_ui.py
python
CursesUI._screen_create_command_textbox
(self, existing_command)
Create command textbox on screen. Args: existing_command: (str) A command string to put in the textbox right after its creation.
Create command textbox on screen.
[ "Create", "command", "textbox", "on", "screen", "." ]
def _screen_create_command_textbox(self, existing_command): """Create command textbox on screen. Args: existing_command: (str) A command string to put in the textbox right after its creation. """ # Display the tfdbg prompt. self._stdscr.addstr(self._max_y - self._command_textbox_height, 0, self.CLI_PROMPT, curses.A_BOLD) self._stdscr.refresh() self._command_window.clear() # Command text box. self._command_textbox = textpad.Textbox( self._command_window, insert_mode=True) # Enter existing command. self._auto_key_in(existing_command)
[ "def", "_screen_create_command_textbox", "(", "self", ",", "existing_command", ")", ":", "# Display the tfdbg prompt.", "self", ".", "_stdscr", ".", "addstr", "(", "self", ".", "_max_y", "-", "self", ".", "_command_textbox_height", ",", "0", ",", "self", ".", "CLI_PROMPT", ",", "curses", ".", "A_BOLD", ")", "self", ".", "_stdscr", ".", "refresh", "(", ")", "self", ".", "_command_window", ".", "clear", "(", ")", "# Command text box.", "self", ".", "_command_textbox", "=", "textpad", ".", "Textbox", "(", "self", ".", "_command_window", ",", "insert_mode", "=", "True", ")", "# Enter existing command.", "self", ".", "_auto_key_in", "(", "existing_command", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/debug/cli/curses_ui.py#L275-L295
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/help.py
python
info
()
return { 'platform': platform_info, 'implementation': implementation_info, 'system_ssl': system_ssl_info, 'using_pyopenssl': pyopenssl is not None, 'pyOpenSSL': pyopenssl_info, 'urllib3': urllib3_info, 'chardet': chardet_info, 'cryptography': cryptography_info, 'idna': idna_info, 'requests': { 'version': requests_version, }, }
Generate information for a bug report.
Generate information for a bug report.
[ "Generate", "information", "for", "a", "bug", "report", "." ]
def info(): """Generate information for a bug report.""" try: platform_info = { 'system': platform.system(), 'release': platform.release(), } except IOError: platform_info = { 'system': 'Unknown', 'release': 'Unknown', } implementation_info = _implementation() urllib3_info = {'version': urllib3.__version__} chardet_info = {'version': chardet.__version__} pyopenssl_info = { 'version': None, 'openssl_version': '', } if OpenSSL: pyopenssl_info = { 'version': OpenSSL.__version__, 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, } cryptography_info = { 'version': getattr(cryptography, '__version__', ''), } idna_info = { 'version': getattr(idna, '__version__', ''), } system_ssl = ssl.OPENSSL_VERSION_NUMBER system_ssl_info = { 'version': '%x' % system_ssl if system_ssl is not None else '' } return { 'platform': platform_info, 'implementation': implementation_info, 'system_ssl': system_ssl_info, 'using_pyopenssl': pyopenssl is not None, 'pyOpenSSL': pyopenssl_info, 'urllib3': urllib3_info, 'chardet': chardet_info, 'cryptography': cryptography_info, 'idna': idna_info, 'requests': { 'version': requests_version, }, }
[ "def", "info", "(", ")", ":", "try", ":", "platform_info", "=", "{", "'system'", ":", "platform", ".", "system", "(", ")", ",", "'release'", ":", "platform", ".", "release", "(", ")", ",", "}", "except", "IOError", ":", "platform_info", "=", "{", "'system'", ":", "'Unknown'", ",", "'release'", ":", "'Unknown'", ",", "}", "implementation_info", "=", "_implementation", "(", ")", "urllib3_info", "=", "{", "'version'", ":", "urllib3", ".", "__version__", "}", "chardet_info", "=", "{", "'version'", ":", "chardet", ".", "__version__", "}", "pyopenssl_info", "=", "{", "'version'", ":", "None", ",", "'openssl_version'", ":", "''", ",", "}", "if", "OpenSSL", ":", "pyopenssl_info", "=", "{", "'version'", ":", "OpenSSL", ".", "__version__", ",", "'openssl_version'", ":", "'%x'", "%", "OpenSSL", ".", "SSL", ".", "OPENSSL_VERSION_NUMBER", ",", "}", "cryptography_info", "=", "{", "'version'", ":", "getattr", "(", "cryptography", ",", "'__version__'", ",", "''", ")", ",", "}", "idna_info", "=", "{", "'version'", ":", "getattr", "(", "idna", ",", "'__version__'", ",", "''", ")", ",", "}", "system_ssl", "=", "ssl", ".", "OPENSSL_VERSION_NUMBER", "system_ssl_info", "=", "{", "'version'", ":", "'%x'", "%", "system_ssl", "if", "system_ssl", "is", "not", "None", "else", "''", "}", "return", "{", "'platform'", ":", "platform_info", ",", "'implementation'", ":", "implementation_info", ",", "'system_ssl'", ":", "system_ssl_info", ",", "'using_pyopenssl'", ":", "pyopenssl", "is", "not", "None", ",", "'pyOpenSSL'", ":", "pyopenssl_info", ",", "'urllib3'", ":", "urllib3_info", ",", "'chardet'", ":", "chardet_info", ",", "'cryptography'", ":", "cryptography_info", ",", "'idna'", ":", "idna_info", ",", "'requests'", ":", "{", "'version'", ":", "requests_version", ",", "}", ",", "}" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/help.py#L59-L110
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/io/pytables.py
python
WORMTable.write
(self, **kwargs)
write in a format that we can search later on (but cannot append to): write out the indices and the values using _write_array (e.g. a CArray) create an indexing table so that we can search
write in a format that we can search later on (but cannot append to): write out the indices and the values using _write_array (e.g. a CArray) create an indexing table so that we can search
[ "write", "in", "a", "format", "that", "we", "can", "search", "later", "on", "(", "but", "cannot", "append", "to", ")", ":", "write", "out", "the", "indices", "and", "the", "values", "using", "_write_array", "(", "e", ".", "g", ".", "a", "CArray", ")", "create", "an", "indexing", "table", "so", "that", "we", "can", "search" ]
def write(self, **kwargs): """ write in a format that we can search later on (but cannot append to): write out the indices and the values using _write_array (e.g. a CArray) create an indexing table so that we can search """ raise NotImplementedError("WORKTable needs to implement write")
[ "def", "write", "(", "self", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "\"WORKTable needs to implement write\"", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/io/pytables.py#L3868-L3873
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/combo.py
python
ComboCtrl.SetPopupAnchor
(*args, **kwargs)
return _combo.ComboCtrl_SetPopupAnchor(*args, **kwargs)
SetPopupAnchor(self, int anchorSide) Set side of the control to which the popup will align itself. Valid values are wx.LEFT, wx.RIGHT and 0. The default value 0 means that the most appropriate side is used (which, currently, is always wx.LEFT).
SetPopupAnchor(self, int anchorSide)
[ "SetPopupAnchor", "(", "self", "int", "anchorSide", ")" ]
def SetPopupAnchor(*args, **kwargs): """ SetPopupAnchor(self, int anchorSide) Set side of the control to which the popup will align itself. Valid values are wx.LEFT, wx.RIGHT and 0. The default value 0 means that the most appropriate side is used (which, currently, is always wx.LEFT). """ return _combo.ComboCtrl_SetPopupAnchor(*args, **kwargs)
[ "def", "SetPopupAnchor", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_combo", ".", "ComboCtrl_SetPopupAnchor", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/combo.py#L316-L324
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/finding-pairs-with-a-certain-sum.py
python
FindSumPairs.count
(self, tot)
return sum(cnt * self.__count2[tot-x] for x, cnt in self.__count1.iteritems())
:type tot: int :rtype: int
:type tot: int :rtype: int
[ ":", "type", "tot", ":", "int", ":", "rtype", ":", "int" ]
def count(self, tot): """ :type tot: int :rtype: int """ return sum(cnt * self.__count2[tot-x] for x, cnt in self.__count1.iteritems())
[ "def", "count", "(", "self", ",", "tot", ")", ":", "return", "sum", "(", "cnt", "*", "self", ".", "__count2", "[", "tot", "-", "x", "]", "for", "x", ",", "cnt", "in", "self", ".", "__count1", ".", "iteritems", "(", ")", ")" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/finding-pairs-with-a-certain-sum.py#L31-L36
hpi-xnor/BMXNet-v2
af2b1859eafc5c721b1397cef02f946aaf2ce20d
python/mxnet/ndarray/utils.py
python
array
(source_array, ctx=None, dtype=None)
Creates an array from any object exposing the array interface. Parameters ---------- source_array : array_like An object exposing the array interface, an object whose `__array__` method returns an array, or any (nested) sequence. ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `float32` otherwise. Returns ------- NDArray, RowSparseNDArray or CSRNDArray An array with the same contents as the `source_array`. Examples -------- >>> import numpy as np >>> mx.nd.array([1, 2, 3]) <NDArray 3 @cpu(0)> >>> mx.nd.array([[1, 2], [3, 4]]) <NDArray 2x2 @cpu(0)> >>> mx.nd.array(np.zeros((3, 2))) <NDArray 3x2 @cpu(0)> >>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0)) <NDArray 3x2 @gpu(0)> >>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse')) <RowSparseNDArray 3x2 @cpu(0)>
Creates an array from any object exposing the array interface.
[ "Creates", "an", "array", "from", "any", "object", "exposing", "the", "array", "interface", "." ]
def array(source_array, ctx=None, dtype=None): """Creates an array from any object exposing the array interface. Parameters ---------- source_array : array_like An object exposing the array interface, an object whose `__array__` method returns an array, or any (nested) sequence. ctx : Context, optional Device context (default is the current default context). dtype : str or numpy.dtype, optional The data type of the output array. The default dtype is ``source_array.dtype`` if `source_array` is an `NDArray`, `float32` otherwise. Returns ------- NDArray, RowSparseNDArray or CSRNDArray An array with the same contents as the `source_array`. Examples -------- >>> import numpy as np >>> mx.nd.array([1, 2, 3]) <NDArray 3 @cpu(0)> >>> mx.nd.array([[1, 2], [3, 4]]) <NDArray 2x2 @cpu(0)> >>> mx.nd.array(np.zeros((3, 2))) <NDArray 3x2 @cpu(0)> >>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0)) <NDArray 3x2 @gpu(0)> >>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse')) <RowSparseNDArray 3x2 @cpu(0)> """ if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix): return _sparse_array(source_array, ctx=ctx, dtype=dtype) elif isinstance(source_array, NDArray) and source_array.stype != 'default': return _sparse_array(source_array, ctx=ctx, dtype=dtype) else: return _array(source_array, ctx=ctx, dtype=dtype)
[ "def", "array", "(", "source_array", ",", "ctx", "=", "None", ",", "dtype", "=", "None", ")", ":", "if", "spsp", "is", "not", "None", "and", "isinstance", "(", "source_array", ",", "spsp", ".", "csr", ".", "csr_matrix", ")", ":", "return", "_sparse_array", "(", "source_array", ",", "ctx", "=", "ctx", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "source_array", ",", "NDArray", ")", "and", "source_array", ".", "stype", "!=", "'default'", ":", "return", "_sparse_array", "(", "source_array", ",", "ctx", "=", "ctx", ",", "dtype", "=", "dtype", ")", "else", ":", "return", "_array", "(", "source_array", ",", "ctx", "=", "ctx", ",", "dtype", "=", "dtype", ")" ]
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/ndarray/utils.py#L108-L146
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
media/webrtc/trunk/tools/gyp/pylib/gyp/xcode_emulation.py
python
XcodeSettings.GetProductType
(self)
Returns the PRODUCT_TYPE of this target.
Returns the PRODUCT_TYPE of this target.
[ "Returns", "the", "PRODUCT_TYPE", "of", "this", "target", "." ]
def GetProductType(self): """Returns the PRODUCT_TYPE of this target.""" if self._IsBundle(): return { 'executable': 'com.apple.product-type.application', 'loadable_module': 'com.apple.product-type.bundle', 'shared_library': 'com.apple.product-type.framework', }[self.spec['type']] else: return { 'executable': 'com.apple.product-type.tool', 'loadable_module': 'com.apple.product-type.library.dynamic', 'shared_library': 'com.apple.product-type.library.dynamic', 'static_library': 'com.apple.product-type.library.static', }[self.spec['type']]
[ "def", "GetProductType", "(", "self", ")", ":", "if", "self", ".", "_IsBundle", "(", ")", ":", "return", "{", "'executable'", ":", "'com.apple.product-type.application'", ",", "'loadable_module'", ":", "'com.apple.product-type.bundle'", ",", "'shared_library'", ":", "'com.apple.product-type.framework'", ",", "}", "[", "self", ".", "spec", "[", "'type'", "]", "]", "else", ":", "return", "{", "'executable'", ":", "'com.apple.product-type.tool'", ",", "'loadable_module'", ":", "'com.apple.product-type.library.dynamic'", ",", "'shared_library'", ":", "'com.apple.product-type.library.dynamic'", ",", "'static_library'", ":", "'com.apple.product-type.library.static'", ",", "}", "[", "self", ".", "spec", "[", "'type'", "]", "]" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/media/webrtc/trunk/tools/gyp/pylib/gyp/xcode_emulation.py#L128-L142
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/codecontext.py
python
CodeContext.__init__
(self, editwin)
Initialize settings for context block. editwin is the Editor window for the context block. self.text is the editor window text widget. self.context displays the code context text above the editor text. Initially None, it is toggled via <<toggle-code-context>>. self.topvisible is the number of the top text line displayed. self.info is a list of (line number, indent level, line text, block keyword) tuples for the block structure above topvisible. self.info[0] is initialized with a 'dummy' line which starts the toplevel 'block' of the module. self.t1 and self.t2 are two timer events on the editor text widget to monitor for changes to the context text or editor font.
Initialize settings for context block.
[ "Initialize", "settings", "for", "context", "block", "." ]
def __init__(self, editwin): """Initialize settings for context block. editwin is the Editor window for the context block. self.text is the editor window text widget. self.context displays the code context text above the editor text. Initially None, it is toggled via <<toggle-code-context>>. self.topvisible is the number of the top text line displayed. self.info is a list of (line number, indent level, line text, block keyword) tuples for the block structure above topvisible. self.info[0] is initialized with a 'dummy' line which starts the toplevel 'block' of the module. self.t1 and self.t2 are two timer events on the editor text widget to monitor for changes to the context text or editor font. """ self.editwin = editwin self.text = editwin.text self._reset()
[ "def", "__init__", "(", "self", ",", "editwin", ")", ":", "self", ".", "editwin", "=", "editwin", "self", ".", "text", "=", "editwin", ".", "text", "self", ".", "_reset", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/codecontext.py#L46-L65
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/parenmatch.py
python
ParenMatch.set_timeout_none
(self)
Highlight will remain until user input turns it off or the insert has moved
Highlight will remain until user input turns it off or the insert has moved
[ "Highlight", "will", "remain", "until", "user", "input", "turns", "it", "off", "or", "the", "insert", "has", "moved" ]
def set_timeout_none(self): """Highlight will remain until user input turns it off or the insert has moved""" # After CHECK_DELAY, call a function which disables the "paren" tag # if the event is for the most recent timer and the insert has changed, # or schedules another call for itself. self.counter += 1 def callme(callme, self=self, c=self.counter, index=self.text.index("insert")): if index != self.text.index("insert"): self.handle_restore_timer(c) else: self.editwin.text_frame.after(CHECK_DELAY, callme, callme) self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
[ "def", "set_timeout_none", "(", "self", ")", ":", "# After CHECK_DELAY, call a function which disables the \"paren\" tag", "# if the event is for the most recent timer and the insert has changed,", "# or schedules another call for itself.", "self", ".", "counter", "+=", "1", "def", "callme", "(", "callme", ",", "self", "=", "self", ",", "c", "=", "self", ".", "counter", ",", "index", "=", "self", ".", "text", ".", "index", "(", "\"insert\"", ")", ")", ":", "if", "index", "!=", "self", ".", "text", ".", "index", "(", "\"insert\"", ")", ":", "self", ".", "handle_restore_timer", "(", "c", ")", "else", ":", "self", ".", "editwin", ".", "text_frame", ".", "after", "(", "CHECK_DELAY", ",", "callme", ",", "callme", ")", "self", ".", "editwin", ".", "text_frame", ".", "after", "(", "CHECK_DELAY", ",", "callme", ",", "callme", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/idlelib/parenmatch.py#L153-L166
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
samples/ide/activegrid/tool/ProjectEditor.py
python
GetDocCallback
(filepath)
return doc, docModel
Get the Document used by the IDE and the in-memory document model used by runtime engine
Get the Document used by the IDE and the in-memory document model used by runtime engine
[ "Get", "the", "Document", "used", "by", "the", "IDE", "and", "the", "in", "-", "memory", "document", "model", "used", "by", "runtime", "engine" ]
def GetDocCallback(filepath): """ Get the Document used by the IDE and the in-memory document model used by runtime engine """ docMgr = wx.GetApp().GetDocumentManager() try: doc = docMgr.CreateDocument(filepath, docMgr.GetFlags()|wx.lib.docview.DOC_SILENT|wx.lib.docview.DOC_OPEN_ONCE|wx.lib.docview.DOC_NO_VIEW) if doc: AddProjectMapping(doc) else: # already open for d in docMgr.GetDocuments(): if os.path.normcase(d.GetFilename()) == os.path.normcase(filepath): doc = d break except Exception,e: doc = None aglogging.reportException(e, stacktrace=True) if doc and doc.GetDocumentTemplate().GetDocumentType() == WsdlAgEditor.WsdlAgDocument: # get referenced wsdl doc instead if doc.GetModel().filePath: if os.path.isabs(doc.GetModel().filePath): # if absolute path, leave it alone filepath = doc.GetModel().filePath else: filepath = doc.GetAppDocMgr().fullPath(doc.GetModel().filePath) # check relative to project homeDir if not os.path.isfile(filepath): filepath = os.path.normpath(os.path.join(os.path.dirname(doc.GetFilename()), doc.GetModel().filePath)) # check relative to wsdlag file if not os.path.isfile(filepath): filename = os.sep + os.path.basename(doc.GetModel().filePath) # check to see if in project file filePaths = findDocumentMgr(doc).filePaths for fp in filePaths: if fp.endswith(filename): filepath = fp break try: doc = docMgr.CreateDocument(filepath, docMgr.GetFlags()|wx.lib.docview.DOC_SILENT|wx.lib.docview.DOC_OPEN_ONCE|wx.lib.docview.DOC_NO_VIEW) except Exception,e: doc = None aglogging.reportException(e, stacktrace=True) if doc: AddProjectMapping(doc) else: # already open for d in docMgr.GetDocuments(): if os.path.normcase(d.GetFilename()) == os.path.normcase(filepath): doc = d break else: doc = None if doc: docModel = doc.GetModel() else: docModel = None return doc, docModel
[ "def", "GetDocCallback", "(", "filepath", ")", ":", "docMgr", "=", "wx", ".", "GetApp", "(", ")", ".", "GetDocumentManager", "(", ")", "try", ":", "doc", "=", "docMgr", ".", "CreateDocument", "(", "filepath", ",", "docMgr", ".", "GetFlags", "(", ")", "|", "wx", ".", "lib", ".", "docview", ".", "DOC_SILENT", "|", "wx", ".", "lib", ".", "docview", ".", "DOC_OPEN_ONCE", "|", "wx", ".", "lib", ".", "docview", ".", "DOC_NO_VIEW", ")", "if", "doc", ":", "AddProjectMapping", "(", "doc", ")", "else", ":", "# already open", "for", "d", "in", "docMgr", ".", "GetDocuments", "(", ")", ":", "if", "os", ".", "path", ".", "normcase", "(", "d", ".", "GetFilename", "(", ")", ")", "==", "os", ".", "path", ".", "normcase", "(", "filepath", ")", ":", "doc", "=", "d", "break", "except", "Exception", ",", "e", ":", "doc", "=", "None", "aglogging", ".", "reportException", "(", "e", ",", "stacktrace", "=", "True", ")", "if", "doc", "and", "doc", ".", "GetDocumentTemplate", "(", ")", ".", "GetDocumentType", "(", ")", "==", "WsdlAgEditor", ".", "WsdlAgDocument", ":", "# get referenced wsdl doc instead", "if", "doc", ".", "GetModel", "(", ")", ".", "filePath", ":", "if", "os", ".", "path", ".", "isabs", "(", "doc", ".", "GetModel", "(", ")", ".", "filePath", ")", ":", "# if absolute path, leave it alone", "filepath", "=", "doc", ".", "GetModel", "(", ")", ".", "filePath", "else", ":", "filepath", "=", "doc", ".", "GetAppDocMgr", "(", ")", ".", "fullPath", "(", "doc", ".", "GetModel", "(", ")", ".", "filePath", ")", "# check relative to project homeDir", "if", "not", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "filepath", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "doc", ".", "GetFilename", "(", ")", ")", ",", "doc", ".", "GetModel", "(", ")", ".", "filePath", ")", ")", "# check relative to wsdlag file", "if", "not", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "filename", "=", "os", ".", "sep", "+", "os", ".", "path", ".", "basename", "(", "doc", ".", "GetModel", "(", ")", ".", "filePath", ")", "# check to see if in project file", "filePaths", "=", "findDocumentMgr", "(", "doc", ")", ".", "filePaths", "for", "fp", "in", "filePaths", ":", "if", "fp", ".", "endswith", "(", "filename", ")", ":", "filepath", "=", "fp", "break", "try", ":", "doc", "=", "docMgr", ".", "CreateDocument", "(", "filepath", ",", "docMgr", ".", "GetFlags", "(", ")", "|", "wx", ".", "lib", ".", "docview", ".", "DOC_SILENT", "|", "wx", ".", "lib", ".", "docview", ".", "DOC_OPEN_ONCE", "|", "wx", ".", "lib", ".", "docview", ".", "DOC_NO_VIEW", ")", "except", "Exception", ",", "e", ":", "doc", "=", "None", "aglogging", ".", "reportException", "(", "e", ",", "stacktrace", "=", "True", ")", "if", "doc", ":", "AddProjectMapping", "(", "doc", ")", "else", ":", "# already open", "for", "d", "in", "docMgr", ".", "GetDocuments", "(", ")", ":", "if", "os", ".", "path", ".", "normcase", "(", "d", ".", "GetFilename", "(", ")", ")", "==", "os", ".", "path", ".", "normcase", "(", "filepath", ")", ":", "doc", "=", "d", "break", "else", ":", "doc", "=", "None", "if", "doc", ":", "docModel", "=", "doc", ".", "GetModel", "(", ")", "else", ":", "docModel", "=", "None", "return", "doc", ",", "docModel" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/ide/activegrid/tool/ProjectEditor.py#L112-L169
include-what-you-use/include-what-you-use
208fbfffa5d69364b9f78e427caa443441279283
mapgen/iwyu-mapgen-cpython.py
python
main
(pythonroot)
return 0
Entry point.
Entry point.
[ "Entry", "point", "." ]
def main(pythonroot): """ Entry point. """ # Collect all include names in the root. These are the private includes. included_names = [] for fname in iterfiles(pythonroot, '*.h'): included_names.extend(parse_include_names(fname)) # Discard duplicates and remove Python.h itself. included_names = set(included_names) included_names.remove('Python.h') # Print mappings from name -> Python.h. print('[') print(',\n'.join(generate_imp_lines(sorted(included_names)))) print(']') return 0
[ "def", "main", "(", "pythonroot", ")", ":", "# Collect all include names in the root. These are the private includes.", "included_names", "=", "[", "]", "for", "fname", "in", "iterfiles", "(", "pythonroot", ",", "'*.h'", ")", ":", "included_names", ".", "extend", "(", "parse_include_names", "(", "fname", ")", ")", "# Discard duplicates and remove Python.h itself.", "included_names", "=", "set", "(", "included_names", ")", "included_names", ".", "remove", "(", "'Python.h'", ")", "# Print mappings from name -> Python.h.", "print", "(", "'['", ")", "print", "(", "',\\n'", ".", "join", "(", "generate_imp_lines", "(", "sorted", "(", "included_names", ")", ")", ")", ")", "print", "(", "']'", ")", "return", "0" ]
https://github.com/include-what-you-use/include-what-you-use/blob/208fbfffa5d69364b9f78e427caa443441279283/mapgen/iwyu-mapgen-cpython.py#L67-L84
OpenGenus/cosmos
1a94e8880068e51d571543be179c323936bd0936
code/data_structures/src/list/singly_linked_list/operations/find/find.py
python
Node.__repr__
(self)
return self.data
Node representation as required
Node representation as required
[ "Node", "representation", "as", "required" ]
def __repr__(self): """ Node representation as required""" return self.data
[ "def", "__repr__", "(", "self", ")", ":", "return", "self", ".", "data" ]
https://github.com/OpenGenus/cosmos/blob/1a94e8880068e51d571543be179c323936bd0936/code/data_structures/src/list/singly_linked_list/operations/find/find.py#L17-L19
mysql/mysql-workbench
2f35f9034f015cbcd22139a60e1baa2e3e8e795c
modules/db.mysql/db_mysql_fe_grt.py
python
createScriptForCatalogObjects
(path, catalog, objectCreationParams)
return 1
Create a CREATE script with the catalog objects. The catalog must have been previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql attributes set with their respective SQL CREATE statements.
Create a CREATE script with the catalog objects. The catalog must have been previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql attributes set with their respective SQL CREATE statements.
[ "Create", "a", "CREATE", "script", "with", "the", "catalog", "objects", ".", "The", "catalog", "must", "have", "been", "previously", "processed", "with", "generateSQLCreateStatements", "()", "so", "that", "the", "objects", "have", "their", "temp_sql", "attributes", "set", "with", "their", "respective", "SQL", "CREATE", "statements", "." ]
def createScriptForCatalogObjects(path, catalog, objectCreationParams): """Create a CREATE script with the catalog objects. The catalog must have been previously processed with generateSQLCreateStatements(), so that the objects have their temp_sql attributes set with their respective SQL CREATE statements. """ def object_heading(type, name): text = """ -- ---------------------------------------------------------------------------- -- %s %s -- ---------------------------------------------------------------------------- """ % (type, name) return text import time file = open(path, "w+") file.write("""-- ---------------------------------------------------------------------------- -- MySQL Workbench Migration -- Migrated Schemata: %s -- Source Schemata: %s -- Created: %s -- Workbench Version: %s -- ---------------------------------------------------------------------------- """ % (", ".join([s.name for s in catalog.schemata]), ", ".join([s.oldName for s in catalog.schemata]), time.ctime(), Version.fromgrt(grt.root.wb.info.version))) preamble = catalog.customData["migration:preamble"] if preamble and preamble.temp_sql: #file.write(object_heading("Preamble script", "")) file.write(preamble.temp_sql+"\n") for schema in catalog.schemata: file.write(object_heading("Schema", schema.name)) file.write(schema.temp_sql+";\n") for table in schema.tables: file.write(object_heading("Table", "%s.%s" % (schema.name, table.name))) file.write(table.temp_sql+";\n") for view in schema.views: file.write(object_heading("View", "%s.%s" % (schema.name, view.name))) file.write(view.temp_sql+";\n") for routine in schema.routines: file.write(object_heading("Routine", "%s.%s" % (schema.name, routine.name))) file.write(routine.temp_sql) for table in schema.tables: for trigger in table.triggers: file.write(object_heading("Trigger", "%s.%s" % (schema.name, trigger.name))) file.write(trigger.temp_sql+";\n") postamble = catalog.customData["migration:postamble"] if postamble and postamble.temp_sql: #file.write(object_heading("Postamble script", "")) file.write(postamble.temp_sql+"\n") file.close() return 1
[ "def", "createScriptForCatalogObjects", "(", "path", ",", "catalog", ",", "objectCreationParams", ")", ":", "def", "object_heading", "(", "type", ",", "name", ")", ":", "text", "=", "\"\"\"\n-- ----------------------------------------------------------------------------\n-- %s %s\n-- ----------------------------------------------------------------------------\n\"\"\"", "%", "(", "type", ",", "name", ")", "return", "text", "import", "time", "file", "=", "open", "(", "path", ",", "\"w+\"", ")", "file", ".", "write", "(", "\"\"\"-- ----------------------------------------------------------------------------\n-- MySQL Workbench Migration\n-- Migrated Schemata: %s\n-- Source Schemata: %s\n-- Created: %s\n-- Workbench Version: %s\n-- ----------------------------------------------------------------------------\n\n\"\"\"", "%", "(", "\", \"", ".", "join", "(", "[", "s", ".", "name", "for", "s", "in", "catalog", ".", "schemata", "]", ")", ",", "\", \"", ".", "join", "(", "[", "s", ".", "oldName", "for", "s", "in", "catalog", ".", "schemata", "]", ")", ",", "time", ".", "ctime", "(", ")", ",", "Version", ".", "fromgrt", "(", "grt", ".", "root", ".", "wb", ".", "info", ".", "version", ")", ")", ")", "preamble", "=", "catalog", ".", "customData", "[", "\"migration:preamble\"", "]", "if", "preamble", "and", "preamble", ".", "temp_sql", ":", "#file.write(object_heading(\"Preamble script\", \"\"))", "file", ".", "write", "(", "preamble", ".", "temp_sql", "+", "\"\\n\"", ")", "for", "schema", "in", "catalog", ".", "schemata", ":", "file", ".", "write", "(", "object_heading", "(", "\"Schema\"", ",", "schema", ".", "name", ")", ")", "file", ".", "write", "(", "schema", ".", "temp_sql", "+", "\";\\n\"", ")", "for", "table", "in", "schema", ".", "tables", ":", "file", ".", "write", "(", "object_heading", "(", "\"Table\"", ",", "\"%s.%s\"", "%", "(", "schema", ".", "name", ",", "table", ".", "name", ")", ")", ")", "file", ".", "write", "(", "table", ".", "temp_sql", "+", "\";\\n\"", ")", "for", "view", "in", "schema", ".", "views", ":", "file", ".", "write", "(", "object_heading", "(", "\"View\"", ",", "\"%s.%s\"", "%", "(", "schema", ".", "name", ",", "view", ".", "name", ")", ")", ")", "file", ".", "write", "(", "view", ".", "temp_sql", "+", "\";\\n\"", ")", "for", "routine", "in", "schema", ".", "routines", ":", "file", ".", "write", "(", "object_heading", "(", "\"Routine\"", ",", "\"%s.%s\"", "%", "(", "schema", ".", "name", ",", "routine", ".", "name", ")", ")", ")", "file", ".", "write", "(", "routine", ".", "temp_sql", ")", "for", "table", "in", "schema", ".", "tables", ":", "for", "trigger", "in", "table", ".", "triggers", ":", "file", ".", "write", "(", "object_heading", "(", "\"Trigger\"", ",", "\"%s.%s\"", "%", "(", "schema", ".", "name", ",", "trigger", ".", "name", ")", ")", ")", "file", ".", "write", "(", "trigger", ".", "temp_sql", "+", "\";\\n\"", ")", "postamble", "=", "catalog", ".", "customData", "[", "\"migration:postamble\"", "]", "if", "postamble", "and", "postamble", ".", "temp_sql", ":", "#file.write(object_heading(\"Postamble script\", \"\"))", "file", ".", "write", "(", "postamble", ".", "temp_sql", "+", "\"\\n\"", ")", "file", ".", "close", "(", ")", "return", "1" ]
https://github.com/mysql/mysql-workbench/blob/2f35f9034f015cbcd22139a60e1baa2e3e8e795c/modules/db.mysql/db_mysql_fe_grt.py#L230-L290
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
models/AI-Model-Zoo/caffe-xilinx/examples/pycaffe/layers/pascal_multilabel_datalayers.py
python
check_params
(params)
A utility function to check the parameters for the data layers.
A utility function to check the parameters for the data layers.
[ "A", "utility", "function", "to", "check", "the", "parameters", "for", "the", "data", "layers", "." ]
def check_params(params): """ A utility function to check the parameters for the data layers. """ assert 'split' in params.keys( ), 'Params must include split (train, val, or test).' required = ['batch_size', 'pascal_root', 'im_shape'] for r in required: assert r in params.keys(), 'Params must include {}'.format(r)
[ "def", "check_params", "(", "params", ")", ":", "assert", "'split'", "in", "params", ".", "keys", "(", ")", ",", "'Params must include split (train, val, or test).'", "required", "=", "[", "'batch_size'", ",", "'pascal_root'", ",", "'im_shape'", "]", "for", "r", "in", "required", ":", "assert", "r", "in", "params", ".", "keys", "(", ")", ",", "'Params must include {}'", ".", "format", "(", "r", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/models/AI-Model-Zoo/caffe-xilinx/examples/pycaffe/layers/pascal_multilabel_datalayers.py#L196-L205
arvidn/libtorrent
717fc489aa1d3380b7f5f7e5167ba076a60035e6
tools/benchmark_checking.py
python
rm_file_or_dir
(path)
Attempt to remove file or directory at path
Attempt to remove file or directory at path
[ "Attempt", "to", "remove", "file", "or", "directory", "at", "path" ]
def rm_file_or_dir(path): """ Attempt to remove file or directory at path """ try: shutil.rmtree(path) except Exception: pass try: os.remove(path) except Exception: pass
[ "def", "rm_file_or_dir", "(", "path", ")", ":", "try", ":", "shutil", ".", "rmtree", "(", "path", ")", "except", "Exception", ":", "pass", "try", ":", "os", ".", "remove", "(", "path", ")", "except", "Exception", ":", "pass" ]
https://github.com/arvidn/libtorrent/blob/717fc489aa1d3380b7f5f7e5167ba076a60035e6/tools/benchmark_checking.py#L101-L112