nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
bigartm/bigartm
47e37f982de87aa67bfd475ff1f39da696b181b3
3rdparty/protobuf-3.0.0/python/google/protobuf/internal/python_message.py
python
_AddPropertiesForNonRepeatedScalarField
(field, cls)
Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing.
Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking.
[ "Adds", "a", "public", "property", "for", "a", "nonrepeated", "scalar", "protocol", "message", "field", ".", "Clients", "can", "use", "this", "property", "to", "get", "and", "directly", "set", "the", "value", "of", "the", "field", ".", "Note", "that", "when", "the", "client", "sets", "the", "value", "of", "a", "field", "by", "using", "this", "property", "all", "necessary", "has", "bits", "are", "set", "as", "a", "side", "-", "effect", "and", "we", "also", "perform", "type", "-", "checking", "." ]
def _AddPropertiesForNonRepeatedScalarField(field, cls): """Adds a public property for a nonrepeated, scalar protocol message field. Clients can use this property to get and directly set the value of the field. Note that when the client sets the value of a field by using this property, all necessary "has" bits are set as a side-effect, and we also perform type-checking. Args: field: A FieldDescriptor for this field. cls: The class we're constructing. """ proto_field_name = field.name property_name = _PropertyName(proto_field_name) type_checker = type_checkers.GetTypeChecker(field) default_value = field.default_value valid_values = set() is_proto3 = field.containing_type.syntax == "proto3" def getter(self): # TODO(protobuf-team): This may be broken since there may not be # default_value. Combine with has_default_value somehow. return self._fields.get(field, default_value) getter.__module__ = None getter.__doc__ = 'Getter for %s.' % proto_field_name clear_when_set_to_default = is_proto3 and not field.containing_oneof def field_setter(self, new_value): # pylint: disable=protected-access # Testing the value for truthiness captures all of the proto3 defaults # (0, 0.0, enum 0, and False). new_value = type_checker.CheckValue(new_value) if clear_when_set_to_default and not new_value: self._fields.pop(field, None) else: self._fields[field] = new_value # Check _cached_byte_size_dirty inline to improve performance, since scalar # setters are called frequently. if not self._cached_byte_size_dirty: self._Modified() if field.containing_oneof: def setter(self, new_value): field_setter(self, new_value) self._UpdateOneofState(field) else: setter = field_setter setter.__module__ = None setter.__doc__ = 'Setter for %s.' % proto_field_name # Add a property to encapsulate the getter/setter. doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name setattr(cls, property_name, property(getter, setter, doc=doc))
[ "def", "_AddPropertiesForNonRepeatedScalarField", "(", "field", ",", "cls", ")", ":", "proto_field_name", "=", "field", ".", "name", "property_name", "=", "_PropertyName", "(", "proto_field_name", ")", "type_checker", "=", "type_checkers", ".", "GetTypeChecker", "(", "field", ")", "default_value", "=", "field", ".", "default_value", "valid_values", "=", "set", "(", ")", "is_proto3", "=", "field", ".", "containing_type", ".", "syntax", "==", "\"proto3\"", "def", "getter", "(", "self", ")", ":", "# TODO(protobuf-team): This may be broken since there may not be", "# default_value. Combine with has_default_value somehow.", "return", "self", ".", "_fields", ".", "get", "(", "field", ",", "default_value", ")", "getter", ".", "__module__", "=", "None", "getter", ".", "__doc__", "=", "'Getter for %s.'", "%", "proto_field_name", "clear_when_set_to_default", "=", "is_proto3", "and", "not", "field", ".", "containing_oneof", "def", "field_setter", "(", "self", ",", "new_value", ")", ":", "# pylint: disable=protected-access", "# Testing the value for truthiness captures all of the proto3 defaults", "# (0, 0.0, enum 0, and False).", "new_value", "=", "type_checker", ".", "CheckValue", "(", "new_value", ")", "if", "clear_when_set_to_default", "and", "not", "new_value", ":", "self", ".", "_fields", ".", "pop", "(", "field", ",", "None", ")", "else", ":", "self", ".", "_fields", "[", "field", "]", "=", "new_value", "# Check _cached_byte_size_dirty inline to improve performance, since scalar", "# setters are called frequently.", "if", "not", "self", ".", "_cached_byte_size_dirty", ":", "self", ".", "_Modified", "(", ")", "if", "field", ".", "containing_oneof", ":", "def", "setter", "(", "self", ",", "new_value", ")", ":", "field_setter", "(", "self", ",", "new_value", ")", "self", ".", "_UpdateOneofState", "(", "field", ")", "else", ":", "setter", "=", "field_setter", "setter", ".", "__module__", "=", "None", "setter", ".", "__doc__", "=", "'Setter for %s.'", "%", "proto_field_name", "# Add a property to encapsulate the getter/setter.", "doc", "=", "'Magic attribute generated for \"%s\" proto field.'", "%", "proto_field_name", "setattr", "(", "cls", ",", "property_name", ",", "property", "(", "getter", ",", "setter", ",", "doc", "=", "doc", ")", ")" ]
https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/3rdparty/protobuf-3.0.0/python/google/protobuf/internal/python_message.py#L638-L691
garbear/kodi-steamlink
3f8e5970b01607cdb3c2688fbaa78e08f2d9c561
tools/EventClients/lib/python/zeroconf.py
python
Browser.add_service
( self, type, handler = None )
Add a service that the browser should watch for
Add a service that the browser should watch for
[ "Add", "a", "service", "that", "the", "browser", "should", "watch", "for" ]
def add_service( self, type, handler = None ): """ Add a service that the browser should watch for """ self.sbrowser = dbus.Interface( self.bus.get_object( avahi.DBUS_NAME, self.server.ServiceBrowserNew( avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, type, 'local', dbus.UInt32(0) ) ), avahi.DBUS_INTERFACE_SERVICE_BROWSER) self.handlers[ type ] = handler self.sbrowser.connect_to_signal("ItemNew", self._new_item_handler) self.sbrowser.connect_to_signal("ItemRemove", self._remove_item_handler)
[ "def", "add_service", "(", "self", ",", "type", ",", "handler", "=", "None", ")", ":", "self", ".", "sbrowser", "=", "dbus", ".", "Interface", "(", "self", ".", "bus", ".", "get_object", "(", "avahi", ".", "DBUS_NAME", ",", "self", ".", "server", ".", "ServiceBrowserNew", "(", "avahi", ".", "IF_UNSPEC", ",", "avahi", ".", "PROTO_UNSPEC", ",", "type", ",", "'local'", ",", "dbus", ".", "UInt32", "(", "0", ")", ")", ")", ",", "avahi", ".", "DBUS_INTERFACE_SERVICE_BROWSER", ")", "self", ".", "handlers", "[", "type", "]", "=", "handler", "self", ".", "sbrowser", ".", "connect_to_signal", "(", "\"ItemNew\"", ",", "self", ".", "_new_item_handler", ")", "self", ".", "sbrowser", ".", "connect_to_signal", "(", "\"ItemRemove\"", ",", "self", ".", "_remove_item_handler", ")" ]
https://github.com/garbear/kodi-steamlink/blob/3f8e5970b01607cdb3c2688fbaa78e08f2d9c561/tools/EventClients/lib/python/zeroconf.py#L58-L76
mitmedialab/Junkyard-Jumbotron
7e32ecc8a01ea5a578fea6ea54f1f44c7f8f546e
python/calibrate.py
python
_get_change_basis_xform
(center, up, normal)
return xform
Return a matrix that changes from the given basis to the x-y-z basis. Basically, this 'straightens' out the plane described by the basis. The 'right' axis is calculated from the up and normal.
Return a matrix that changes from the given basis to the x-y-z basis. Basically, this 'straightens' out the plane described by the basis. The 'right' axis is calculated from the up and normal.
[ "Return", "a", "matrix", "that", "changes", "from", "the", "given", "basis", "to", "the", "x", "-", "y", "-", "z", "basis", ".", "Basically", "this", "straightens", "out", "the", "plane", "described", "by", "the", "basis", ".", "The", "right", "axis", "is", "calculated", "from", "the", "up", "and", "normal", "." ]
def _get_change_basis_xform(center, up, normal): """Return a matrix that changes from the given basis to the x-y-z basis. Basically, this 'straightens' out the plane described by the basis. The 'right' axis is calculated from the up and normal.""" normal = normal.normalized() right = up.cross(normal).normalize() up = normal.cross(right).normalize() xform = Mat4.new_translate(*center) xform *= Mat4.new_change_basis(right, up, normal, center).inverse() return xform
[ "def", "_get_change_basis_xform", "(", "center", ",", "up", ",", "normal", ")", ":", "normal", "=", "normal", ".", "normalized", "(", ")", "right", "=", "up", ".", "cross", "(", "normal", ")", ".", "normalize", "(", ")", "up", "=", "normal", ".", "cross", "(", "right", ")", ".", "normalize", "(", ")", "xform", "=", "Mat4", ".", "new_translate", "(", "*", "center", ")", "xform", "*=", "Mat4", ".", "new_change_basis", "(", "right", ",", "up", ",", "normal", ",", "center", ")", ".", "inverse", "(", ")", "return", "xform" ]
https://github.com/mitmedialab/Junkyard-Jumbotron/blob/7e32ecc8a01ea5a578fea6ea54f1f44c7f8f546e/python/calibrate.py#L29-L39
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/python/turicreate/data_structures/sarray.py
python
SArray.dict_values
(self)
Create an SArray that contains all the values from each dictionary element as a list. Fails on SArrays whose data type is not ``dict``. Returns ------- out : SArray A SArray of list type, where each element is a list of values from the input SArray element. See Also -------- dict_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_values() dtype: list Rows: 2 [[1, 5, 7], [2, 1, 5]]
Create an SArray that contains all the values from each dictionary element as a list. Fails on SArrays whose data type is not ``dict``.
[ "Create", "an", "SArray", "that", "contains", "all", "the", "values", "from", "each", "dictionary", "element", "as", "a", "list", ".", "Fails", "on", "SArrays", "whose", "data", "type", "is", "not", "dict", "." ]
def dict_values(self): """ Create an SArray that contains all the values from each dictionary element as a list. Fails on SArrays whose data type is not ``dict``. Returns ------- out : SArray A SArray of list type, where each element is a list of values from the input SArray element. See Also -------- dict_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_values() dtype: list Rows: 2 [[1, 5, 7], [2, 1, 5]] """ with cython_context(): return SArray(_proxy=self.__proxy__.dict_values())
[ "def", "dict_values", "(", "self", ")", ":", "with", "cython_context", "(", ")", ":", "return", "SArray", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "dict_values", "(", ")", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/data_structures/sarray.py#L1748-L1775
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/xmlrpc/client.py
python
loads
(data, use_datetime=False, use_builtin_types=False)
return u.close(), u.getmethodname()
data -> unmarshalled data, method name Convert an XML-RPC packet to unmarshalled data plus a method name (None if not present). If the XML-RPC packet represents a fault condition, this function raises a Fault exception.
data -> unmarshalled data, method name
[ "data", "-", ">", "unmarshalled", "data", "method", "name" ]
def loads(data, use_datetime=False, use_builtin_types=False): """data -> unmarshalled data, method name Convert an XML-RPC packet to unmarshalled data plus a method name (None if not present). If the XML-RPC packet represents a fault condition, this function raises a Fault exception. """ p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types) p.feed(data) p.close() return u.close(), u.getmethodname()
[ "def", "loads", "(", "data", ",", "use_datetime", "=", "False", ",", "use_builtin_types", "=", "False", ")", ":", "p", ",", "u", "=", "getparser", "(", "use_datetime", "=", "use_datetime", ",", "use_builtin_types", "=", "use_builtin_types", ")", "p", ".", "feed", "(", "data", ")", "p", ".", "close", "(", ")", "return", "u", ".", "close", "(", ")", ",", "u", ".", "getmethodname", "(", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/xmlrpc/client.py#L1019-L1031
google/mozc
7329757e1ad30e327c1ae823a8302c79482d6b9c
src/build_tools/copy_dll_and_symbol.py
python
ParseOption
()
return opts
Parse command line options.
Parse command line options.
[ "Parse", "command", "line", "options", "." ]
def ParseOption(): """Parse command line options.""" parser = optparse.OptionParser() MSG = ' you can use %s as path separator' % os.pathsep parser.add_option('--dll_paths', dest='dll_paths', default='', help='Search paths for DLLs.' + MSG) parser.add_option('--pdb_paths', dest='pdb_paths', default='', help='Search paths for PDB files.' + MSG) parser.add_option('--target_dir', dest='target_dir', default='', help='Deploy target directory.') parser.add_option('--basenames', dest='basenames', default='', help='The basenames of DLL and/or PDB.' + MSG) (opts, _) = parser.parse_args() return opts
[ "def", "ParseOption", "(", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "MSG", "=", "' you can use %s as path separator'", "%", "os", ".", "pathsep", "parser", ".", "add_option", "(", "'--dll_paths'", ",", "dest", "=", "'dll_paths'", ",", "default", "=", "''", ",", "help", "=", "'Search paths for DLLs.'", "+", "MSG", ")", "parser", ".", "add_option", "(", "'--pdb_paths'", ",", "dest", "=", "'pdb_paths'", ",", "default", "=", "''", ",", "help", "=", "'Search paths for PDB files.'", "+", "MSG", ")", "parser", ".", "add_option", "(", "'--target_dir'", ",", "dest", "=", "'target_dir'", ",", "default", "=", "''", ",", "help", "=", "'Deploy target directory.'", ")", "parser", ".", "add_option", "(", "'--basenames'", ",", "dest", "=", "'basenames'", ",", "default", "=", "''", ",", "help", "=", "'The basenames of DLL and/or PDB.'", "+", "MSG", ")", "(", "opts", ",", "_", ")", "=", "parser", ".", "parse_args", "(", ")", "return", "opts" ]
https://github.com/google/mozc/blob/7329757e1ad30e327c1ae823a8302c79482d6b9c/src/build_tools/copy_dll_and_symbol.py#L44-L59
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
src/icebox/icebox_py/__init__.py
python
Counter.read
(self)
return self.count
Read current counter value.
Read current counter value.
[ "Read", "current", "counter", "value", "." ]
def read(self): """Read current counter value.""" return self.count
[ "def", "read", "(", "self", ")", ":", "return", "self", ".", "count" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/src/icebox/icebox_py/__init__.py#L739-L741
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/static-binding/lldb.py
python
SBType.IsVectorType
(self)
return _lldb.SBType_IsVectorType(self)
IsVectorType(SBType self) -> bool
IsVectorType(SBType self) -> bool
[ "IsVectorType", "(", "SBType", "self", ")", "-", ">", "bool" ]
def IsVectorType(self): """IsVectorType(SBType self) -> bool""" return _lldb.SBType_IsVectorType(self)
[ "def", "IsVectorType", "(", "self", ")", ":", "return", "_lldb", ".", "SBType_IsVectorType", "(", "self", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L12652-L12654
SOUI2/soui
774e5566b2d3254a94f4b3efd55b982e7c665434
third-part/jsoncpp/amalgamate.py
python
amalgamate_source
(source_top_dir=None, target_source_path=None, header_include_path=None)
Produces amalgamated source. Parameters: source_top_dir: top-directory target_source_path: output .cpp path header_include_path: generated header path relative to target_source_path.
Produces amalgamated source. Parameters: source_top_dir: top-directory target_source_path: output .cpp path header_include_path: generated header path relative to target_source_path.
[ "Produces", "amalgamated", "source", ".", "Parameters", ":", "source_top_dir", ":", "top", "-", "directory", "target_source_path", ":", "output", ".", "cpp", "path", "header_include_path", ":", "generated", "header", "path", "relative", "to", "target_source_path", "." ]
def amalgamate_source(source_top_dir=None, target_source_path=None, header_include_path=None): """Produces amalgamated source. Parameters: source_top_dir: top-directory target_source_path: output .cpp path header_include_path: generated header path relative to target_source_path. """ print("Amalgamating header...") header = AmalgamationFile(source_top_dir) header.add_text("/// Json-cpp amalgamated header (http://jsoncpp.sourceforge.net/).") header.add_text('/// It is intended to be used with #include "%s"' % header_include_path) header.add_file("LICENSE", wrap_in_comment=True) header.add_text("#ifndef JSON_AMALGAMATED_H_INCLUDED") header.add_text("# define JSON_AMALGAMATED_H_INCLUDED") header.add_text("/// If defined, indicates that the source file is amalgamated") header.add_text("/// to prevent private header inclusion.") header.add_text("#define JSON_IS_AMALGAMATION") header.add_file("include/json/version.h") #header.add_file("include/json/allocator.h") # Not available here. header.add_file("include/json/config.h") header.add_file("include/json/forwards.h") header.add_file("include/json/features.h") header.add_file("include/json/value.h") header.add_file("include/json/reader.h") header.add_file("include/json/writer.h") header.add_file("include/json/assertions.h") header.add_text("#endif //ifndef JSON_AMALGAMATED_H_INCLUDED") target_header_path = os.path.join(os.path.dirname(target_source_path), header_include_path) print("Writing amalgamated header to %r" % target_header_path) header.write_to(target_header_path) base, ext = os.path.splitext(header_include_path) forward_header_include_path = base + "-forwards" + ext print("Amalgamating forward header...") header = AmalgamationFile(source_top_dir) header.add_text("/// Json-cpp amalgamated forward header (http://jsoncpp.sourceforge.net/).") header.add_text('/// It is intended to be used with #include "%s"' % forward_header_include_path) header.add_text("/// This header provides forward declaration for all JsonCpp types.") header.add_file("LICENSE", wrap_in_comment=True) header.add_text("#ifndef JSON_FORWARD_AMALGAMATED_H_INCLUDED") header.add_text("# define JSON_FORWARD_AMALGAMATED_H_INCLUDED") header.add_text("/// If defined, indicates that the source file is amalgamated") header.add_text("/// to prevent private header inclusion.") header.add_text("#define JSON_IS_AMALGAMATION") header.add_file("include/json/config.h") header.add_file("include/json/forwards.h") header.add_text("#endif //ifndef JSON_FORWARD_AMALGAMATED_H_INCLUDED") target_forward_header_path = os.path.join(os.path.dirname(target_source_path), forward_header_include_path) print("Writing amalgamated forward header to %r" % target_forward_header_path) header.write_to(target_forward_header_path) print("Amalgamating source...") source = AmalgamationFile(source_top_dir) source.add_text("/// Json-cpp amalgamated source (http://jsoncpp.sourceforge.net/).") source.add_text('/// It is intended to be used with #include "%s"' % header_include_path) source.add_file("LICENSE", wrap_in_comment=True) source.add_text("") source.add_text('#include "%s"' % header_include_path) source.add_text(""" #ifndef JSON_IS_AMALGAMATION #error "Compile with -I PATH_TO_JSON_DIRECTORY" #endif """) source.add_text("") lib_json = "src/lib_json" source.add_file(os.path.join(lib_json, "json_tool.h")) source.add_file(os.path.join(lib_json, "json_reader.cpp")) source.add_file(os.path.join(lib_json, "json_valueiterator.inl")) source.add_file(os.path.join(lib_json, "json_value.cpp")) source.add_file(os.path.join(lib_json, "json_writer.cpp")) print("Writing amalgamated source to %r" % target_source_path) source.write_to(target_source_path)
[ "def", "amalgamate_source", "(", "source_top_dir", "=", "None", ",", "target_source_path", "=", "None", ",", "header_include_path", "=", "None", ")", ":", "print", "(", "\"Amalgamating header...\"", ")", "header", "=", "AmalgamationFile", "(", "source_top_dir", ")", "header", ".", "add_text", "(", "\"/// Json-cpp amalgamated header (http://jsoncpp.sourceforge.net/).\"", ")", "header", ".", "add_text", "(", "'/// It is intended to be used with #include \"%s\"'", "%", "header_include_path", ")", "header", ".", "add_file", "(", "\"LICENSE\"", ",", "wrap_in_comment", "=", "True", ")", "header", ".", "add_text", "(", "\"#ifndef JSON_AMALGAMATED_H_INCLUDED\"", ")", "header", ".", "add_text", "(", "\"# define JSON_AMALGAMATED_H_INCLUDED\"", ")", "header", ".", "add_text", "(", "\"/// If defined, indicates that the source file is amalgamated\"", ")", "header", ".", "add_text", "(", "\"/// to prevent private header inclusion.\"", ")", "header", ".", "add_text", "(", "\"#define JSON_IS_AMALGAMATION\"", ")", "header", ".", "add_file", "(", "\"include/json/version.h\"", ")", "#header.add_file(\"include/json/allocator.h\") # Not available here.", "header", ".", "add_file", "(", "\"include/json/config.h\"", ")", "header", ".", "add_file", "(", "\"include/json/forwards.h\"", ")", "header", ".", "add_file", "(", "\"include/json/features.h\"", ")", "header", ".", "add_file", "(", "\"include/json/value.h\"", ")", "header", ".", "add_file", "(", "\"include/json/reader.h\"", ")", "header", ".", "add_file", "(", "\"include/json/writer.h\"", ")", "header", ".", "add_file", "(", "\"include/json/assertions.h\"", ")", "header", ".", "add_text", "(", "\"#endif //ifndef JSON_AMALGAMATED_H_INCLUDED\"", ")", "target_header_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "target_source_path", ")", ",", "header_include_path", ")", "print", "(", "\"Writing amalgamated header to %r\"", "%", "target_header_path", ")", "header", ".", "write_to", "(", "target_header_path", ")", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "header_include_path", ")", "forward_header_include_path", "=", "base", "+", "\"-forwards\"", "+", "ext", "print", "(", "\"Amalgamating forward header...\"", ")", "header", "=", "AmalgamationFile", "(", "source_top_dir", ")", "header", ".", "add_text", "(", "\"/// Json-cpp amalgamated forward header (http://jsoncpp.sourceforge.net/).\"", ")", "header", ".", "add_text", "(", "'/// It is intended to be used with #include \"%s\"'", "%", "forward_header_include_path", ")", "header", ".", "add_text", "(", "\"/// This header provides forward declaration for all JsonCpp types.\"", ")", "header", ".", "add_file", "(", "\"LICENSE\"", ",", "wrap_in_comment", "=", "True", ")", "header", ".", "add_text", "(", "\"#ifndef JSON_FORWARD_AMALGAMATED_H_INCLUDED\"", ")", "header", ".", "add_text", "(", "\"# define JSON_FORWARD_AMALGAMATED_H_INCLUDED\"", ")", "header", ".", "add_text", "(", "\"/// If defined, indicates that the source file is amalgamated\"", ")", "header", ".", "add_text", "(", "\"/// to prevent private header inclusion.\"", ")", "header", ".", "add_text", "(", "\"#define JSON_IS_AMALGAMATION\"", ")", "header", ".", "add_file", "(", "\"include/json/config.h\"", ")", "header", ".", "add_file", "(", "\"include/json/forwards.h\"", ")", "header", ".", "add_text", "(", "\"#endif //ifndef JSON_FORWARD_AMALGAMATED_H_INCLUDED\"", ")", "target_forward_header_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "target_source_path", ")", ",", "forward_header_include_path", ")", "print", "(", "\"Writing amalgamated forward header to %r\"", "%", "target_forward_header_path", ")", "header", ".", "write_to", "(", "target_forward_header_path", ")", "print", "(", "\"Amalgamating source...\"", ")", "source", "=", "AmalgamationFile", "(", "source_top_dir", ")", "source", ".", "add_text", "(", "\"/// Json-cpp amalgamated source (http://jsoncpp.sourceforge.net/).\"", ")", "source", ".", "add_text", "(", "'/// It is intended to be used with #include \"%s\"'", "%", "header_include_path", ")", "source", ".", "add_file", "(", "\"LICENSE\"", ",", "wrap_in_comment", "=", "True", ")", "source", ".", "add_text", "(", "\"\"", ")", "source", ".", "add_text", "(", "'#include \"%s\"'", "%", "header_include_path", ")", "source", ".", "add_text", "(", "\"\"\"\n#ifndef JSON_IS_AMALGAMATION\n#error \"Compile with -I PATH_TO_JSON_DIRECTORY\"\n#endif\n\"\"\"", ")", "source", ".", "add_text", "(", "\"\"", ")", "lib_json", "=", "\"src/lib_json\"", "source", ".", "add_file", "(", "os", ".", "path", ".", "join", "(", "lib_json", ",", "\"json_tool.h\"", ")", ")", "source", ".", "add_file", "(", "os", ".", "path", ".", "join", "(", "lib_json", ",", "\"json_reader.cpp\"", ")", ")", "source", ".", "add_file", "(", "os", ".", "path", ".", "join", "(", "lib_json", ",", "\"json_valueiterator.inl\"", ")", ")", "source", ".", "add_file", "(", "os", ".", "path", ".", "join", "(", "lib_json", ",", "\"json_value.cpp\"", ")", ")", "source", ".", "add_file", "(", "os", ".", "path", ".", "join", "(", "lib_json", ",", "\"json_writer.cpp\"", ")", ")", "print", "(", "\"Writing amalgamated source to %r\"", "%", "target_source_path", ")", "source", ".", "write_to", "(", "target_source_path", ")" ]
https://github.com/SOUI2/soui/blob/774e5566b2d3254a94f4b3efd55b982e7c665434/third-part/jsoncpp/amalgamate.py#L50-L127
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
media/tools/constrained_network_server/traffic_control.py
python
_AddSubQdisc
(config)
Adds a qdisc attached to the class identified by the config port. Args: config: Constraint configuration dictionary, format: port: Port to constrain (integer 1-65535). interface: Network interface name (string). latency: Delay added on each packet sent (integer in ms). loss: Percentage of packets to drop (integer 0-100).
Adds a qdisc attached to the class identified by the config port.
[ "Adds", "a", "qdisc", "attached", "to", "the", "class", "identified", "by", "the", "config", "port", "." ]
def _AddSubQdisc(config): """Adds a qdisc attached to the class identified by the config port. Args: config: Constraint configuration dictionary, format: port: Port to constrain (integer 1-65535). interface: Network interface name (string). latency: Delay added on each packet sent (integer in ms). loss: Percentage of packets to drop (integer 0-100). """ port_hex = '%x' % config['port'] class_id = '1:%x' % config['port'] command = ['sudo', 'tc', 'qdisc', 'add', 'dev', config['interface'], 'parent', class_id, 'handle', port_hex + ':0', 'netem'] # Check if packet-loss is set in the configuration. if 'loss' in config.keys() and config['loss']: loss = '%d%%' % config['loss'] command.extend(['loss', loss]) # Check if latency is set in the configuration. if 'latency' in config.keys() and config['latency']: latency = '%dms' % config['latency'] command.extend(['delay', latency]) _Exec(command, msg='Could not attach qdisc to class ID %s.' % class_id)
[ "def", "_AddSubQdisc", "(", "config", ")", ":", "port_hex", "=", "'%x'", "%", "config", "[", "'port'", "]", "class_id", "=", "'1:%x'", "%", "config", "[", "'port'", "]", "command", "=", "[", "'sudo'", ",", "'tc'", ",", "'qdisc'", ",", "'add'", ",", "'dev'", ",", "config", "[", "'interface'", "]", ",", "'parent'", ",", "class_id", ",", "'handle'", ",", "port_hex", "+", "':0'", ",", "'netem'", "]", "# Check if packet-loss is set in the configuration.", "if", "'loss'", "in", "config", ".", "keys", "(", ")", "and", "config", "[", "'loss'", "]", ":", "loss", "=", "'%d%%'", "%", "config", "[", "'loss'", "]", "command", ".", "extend", "(", "[", "'loss'", ",", "loss", "]", ")", "# Check if latency is set in the configuration.", "if", "'latency'", "in", "config", ".", "keys", "(", ")", "and", "config", "[", "'latency'", "]", ":", "latency", "=", "'%dms'", "%", "config", "[", "'latency'", "]", "command", ".", "extend", "(", "[", "'delay'", ",", "latency", "]", ")", "_Exec", "(", "command", ",", "msg", "=", "'Could not attach qdisc to class ID %s.'", "%", "class_id", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/media/tools/constrained_network_server/traffic_control.py#L210-L234
microsoft/TSS.MSR
0f2516fca2cd9929c31d5450e39301c9bde43688
TSS.Py/src/TpmTypes.py
python
PCR_ReadResponse.__init__
(self, pcrUpdateCounter = 0, pcrSelectionOut = None, pcrValues = None)
This command returns the values of all PCR specified in pcrSelectionIn. Attributes: pcrUpdateCounter (int): The current value of the PCR update counter pcrSelectionOut (TPMS_PCR_SELECTION[]): The PCR in the returned list pcrValues (TPM2B_DIGEST[]): The contents of the PCR indicated in pcrSelectOut-˃ pcrSelection[] as tagged digests
This command returns the values of all PCR specified in pcrSelectionIn.
[ "This", "command", "returns", "the", "values", "of", "all", "PCR", "specified", "in", "pcrSelectionIn", "." ]
def __init__(self, pcrUpdateCounter = 0, pcrSelectionOut = None, pcrValues = None): """ This command returns the values of all PCR specified in pcrSelectionIn. Attributes: pcrUpdateCounter (int): The current value of the PCR update counter pcrSelectionOut (TPMS_PCR_SELECTION[]): The PCR in the returned list pcrValues (TPM2B_DIGEST[]): The contents of the PCR indicated in pcrSelectOut-˃ pcrSelection[] as tagged digests """ self.pcrUpdateCounter = pcrUpdateCounter self.pcrSelectionOut = pcrSelectionOut self.pcrValues = pcrValues
[ "def", "__init__", "(", "self", ",", "pcrUpdateCounter", "=", "0", ",", "pcrSelectionOut", "=", "None", ",", "pcrValues", "=", "None", ")", ":", "self", ".", "pcrUpdateCounter", "=", "pcrUpdateCounter", "self", ".", "pcrSelectionOut", "=", "pcrSelectionOut", "self", ".", "pcrValues", "=", "pcrValues" ]
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L13851-L13862
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/thumbnailctrl.py
python
ScrolledThumbnail.ScrollToSelected
(self)
Scrolls the :class:`ScrolledWindow` to the selected thumbnail.
Scrolls the :class:`ScrolledWindow` to the selected thumbnail.
[ "Scrolls", "the", ":", "class", ":", "ScrolledWindow", "to", "the", "selected", "thumbnail", "." ]
def ScrollToSelected(self): """ Scrolls the :class:`ScrolledWindow` to the selected thumbnail. """ if self.GetSelection() == -1: return # get row row = self.GetSelection()/self._cols # calc position to scroll view paintRect = self.GetPaintRect() y1 = row*(self._tHeight + self._tBorder) + self.GetCaptionHeight(0, row) y2 = y1 + self._tBorder + self._tHeight + self.GetCaptionHeight(row) if y1 < paintRect.GetTop(): sy = y1 # scroll top elif y2 > paintRect.GetBottom(): sy = y2 - paintRect.height # scroll bottom else: return # scroll view xu, yu = self.GetScrollPixelsPerUnit() sy = sy/yu + (sy%yu and [1] or [0])[0] # convert sy to scroll units x, y = self.GetViewStart() self.Scroll(x,sy)
[ "def", "ScrollToSelected", "(", "self", ")", ":", "if", "self", ".", "GetSelection", "(", ")", "==", "-", "1", ":", "return", "# get row", "row", "=", "self", ".", "GetSelection", "(", ")", "/", "self", ".", "_cols", "# calc position to scroll view", "paintRect", "=", "self", ".", "GetPaintRect", "(", ")", "y1", "=", "row", "*", "(", "self", ".", "_tHeight", "+", "self", ".", "_tBorder", ")", "+", "self", ".", "GetCaptionHeight", "(", "0", ",", "row", ")", "y2", "=", "y1", "+", "self", ".", "_tBorder", "+", "self", ".", "_tHeight", "+", "self", ".", "GetCaptionHeight", "(", "row", ")", "if", "y1", "<", "paintRect", ".", "GetTop", "(", ")", ":", "sy", "=", "y1", "# scroll top", "elif", "y2", ">", "paintRect", ".", "GetBottom", "(", ")", ":", "sy", "=", "y2", "-", "paintRect", ".", "height", "# scroll bottom", "else", ":", "return", "# scroll view", "xu", ",", "yu", "=", "self", ".", "GetScrollPixelsPerUnit", "(", ")", "sy", "=", "sy", "/", "yu", "+", "(", "sy", "%", "yu", "and", "[", "1", "]", "or", "[", "0", "]", ")", "[", "0", "]", "# convert sy to scroll units", "x", ",", "y", "=", "self", ".", "GetViewStart", "(", ")", "self", ".", "Scroll", "(", "x", ",", "sy", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/thumbnailctrl.py#L1946-L1972
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_windows.py
python
FontData.__init__
(self, *args, **kwargs)
__init__(self) -> FontData This class holds a variety of information related to font dialogs and is used to transfer settings to and results from a `wx.FontDialog`.
__init__(self) -> FontData
[ "__init__", "(", "self", ")", "-", ">", "FontData" ]
def __init__(self, *args, **kwargs): """ __init__(self) -> FontData This class holds a variety of information related to font dialogs and is used to transfer settings to and results from a `wx.FontDialog`. """ _windows_.FontData_swiginit(self,_windows_.new_FontData(*args, **kwargs))
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_windows_", ".", "FontData_swiginit", "(", "self", ",", "_windows_", ".", "new_FontData", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L3446-L3453
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/src/robotsim.py
python
WorldModel.makeRobot
(self, name: "char const *")
return _robotsim.WorldModel_makeRobot(self, name)
r""" makeRobot(WorldModel self, char const * name) -> RobotModel Creates a new empty robot. (Not terribly useful now since you can't resize the number of links yet)
r""" makeRobot(WorldModel self, char const * name) -> RobotModel
[ "r", "makeRobot", "(", "WorldModel", "self", "char", "const", "*", "name", ")", "-", ">", "RobotModel" ]
def makeRobot(self, name: "char const *") -> "RobotModel": r""" makeRobot(WorldModel self, char const * name) -> RobotModel Creates a new empty robot. (Not terribly useful now since you can't resize the number of links yet) """ return _robotsim.WorldModel_makeRobot(self, name)
[ "def", "makeRobot", "(", "self", ",", "name", ":", "\"char const *\"", ")", "->", "\"RobotModel\"", ":", "return", "_robotsim", ".", "WorldModel_makeRobot", "(", "self", ",", "name", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/src/robotsim.py#L6071-L6080
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/websocket-client/websocket.py
python
WebSocket.recv_frame
(self)
return ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
recieve data as frame from server. return value: ABNF frame object.
recieve data as frame from server.
[ "recieve", "data", "as", "frame", "from", "server", "." ]
def recv_frame(self): """ recieve data as frame from server. return value: ABNF frame object. """ # Header if self._frame_header is None: self._frame_header = self._recv_strict(2) b1 = ord(self._frame_header[0]) fin = b1 >> 7 & 1 rsv1 = b1 >> 6 & 1 rsv2 = b1 >> 5 & 1 rsv3 = b1 >> 4 & 1 opcode = b1 & 0xf b2 = ord(self._frame_header[1]) has_mask = b2 >> 7 & 1 # Frame length if self._frame_length is None: length_bits = b2 & 0x7f if length_bits == 0x7e: length_data = self._recv_strict(2) self._frame_length = struct.unpack("!H", length_data)[0] elif length_bits == 0x7f: length_data = self._recv_strict(8) self._frame_length = struct.unpack("!Q", length_data)[0] else: self._frame_length = length_bits # Mask if self._frame_mask is None: self._frame_mask = self._recv_strict(4) if has_mask else "" # Payload payload = self._recv_strict(self._frame_length) if has_mask: payload = ABNF.mask(self._frame_mask, payload) # Reset for next frame self._frame_header = None self._frame_length = None self._frame_mask = None return ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
[ "def", "recv_frame", "(", "self", ")", ":", "# Header", "if", "self", ".", "_frame_header", "is", "None", ":", "self", ".", "_frame_header", "=", "self", ".", "_recv_strict", "(", "2", ")", "b1", "=", "ord", "(", "self", ".", "_frame_header", "[", "0", "]", ")", "fin", "=", "b1", ">>", "7", "&", "1", "rsv1", "=", "b1", ">>", "6", "&", "1", "rsv2", "=", "b1", ">>", "5", "&", "1", "rsv3", "=", "b1", ">>", "4", "&", "1", "opcode", "=", "b1", "&", "0xf", "b2", "=", "ord", "(", "self", ".", "_frame_header", "[", "1", "]", ")", "has_mask", "=", "b2", ">>", "7", "&", "1", "# Frame length", "if", "self", ".", "_frame_length", "is", "None", ":", "length_bits", "=", "b2", "&", "0x7f", "if", "length_bits", "==", "0x7e", ":", "length_data", "=", "self", ".", "_recv_strict", "(", "2", ")", "self", ".", "_frame_length", "=", "struct", ".", "unpack", "(", "\"!H\"", ",", "length_data", ")", "[", "0", "]", "elif", "length_bits", "==", "0x7f", ":", "length_data", "=", "self", ".", "_recv_strict", "(", "8", ")", "self", ".", "_frame_length", "=", "struct", ".", "unpack", "(", "\"!Q\"", ",", "length_data", ")", "[", "0", "]", "else", ":", "self", ".", "_frame_length", "=", "length_bits", "# Mask", "if", "self", ".", "_frame_mask", "is", "None", ":", "self", ".", "_frame_mask", "=", "self", ".", "_recv_strict", "(", "4", ")", "if", "has_mask", "else", "\"\"", "# Payload", "payload", "=", "self", ".", "_recv_strict", "(", "self", ".", "_frame_length", ")", "if", "has_mask", ":", "payload", "=", "ABNF", ".", "mask", "(", "self", ".", "_frame_mask", ",", "payload", ")", "# Reset for next frame", "self", ".", "_frame_header", "=", "None", "self", ".", "_frame_length", "=", "None", "self", ".", "_frame_mask", "=", "None", "return", "ABNF", "(", "fin", ",", "rsv1", ",", "rsv2", ",", "rsv3", ",", "opcode", ",", "has_mask", ",", "payload", ")" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/websocket-client/websocket.py#L629-L668
google/shaka-packager
e1b0c7c45431327fd3ce193514a5407d07b39b22
packager/third_party/protobuf/python/google/protobuf/internal/python_message.py
python
_AddMessageMethods
(message_descriptor, cls)
Adds implementations of all Message methods to cls.
Adds implementations of all Message methods to cls.
[ "Adds", "implementations", "of", "all", "Message", "methods", "to", "cls", "." ]
def _AddMessageMethods(message_descriptor, cls): """Adds implementations of all Message methods to cls.""" _AddListFieldsMethod(message_descriptor, cls) _AddHasFieldMethod(message_descriptor, cls) _AddClearFieldMethod(message_descriptor, cls) if message_descriptor.is_extendable: _AddClearExtensionMethod(cls) _AddHasExtensionMethod(cls) _AddEqualsMethod(message_descriptor, cls) _AddStrMethod(message_descriptor, cls) _AddReprMethod(message_descriptor, cls) _AddUnicodeMethod(message_descriptor, cls) _AddByteSizeMethod(message_descriptor, cls) _AddSerializeToStringMethod(message_descriptor, cls) _AddSerializePartialToStringMethod(message_descriptor, cls) _AddMergeFromStringMethod(message_descriptor, cls) _AddIsInitializedMethod(message_descriptor, cls) _AddMergeFromMethod(cls) _AddWhichOneofMethod(message_descriptor, cls) _AddReduceMethod(cls) # Adds methods which do not depend on cls. cls.Clear = _Clear cls.DiscardUnknownFields = _DiscardUnknownFields cls._SetListener = _SetListener
[ "def", "_AddMessageMethods", "(", "message_descriptor", ",", "cls", ")", ":", "_AddListFieldsMethod", "(", "message_descriptor", ",", "cls", ")", "_AddHasFieldMethod", "(", "message_descriptor", ",", "cls", ")", "_AddClearFieldMethod", "(", "message_descriptor", ",", "cls", ")", "if", "message_descriptor", ".", "is_extendable", ":", "_AddClearExtensionMethod", "(", "cls", ")", "_AddHasExtensionMethod", "(", "cls", ")", "_AddEqualsMethod", "(", "message_descriptor", ",", "cls", ")", "_AddStrMethod", "(", "message_descriptor", ",", "cls", ")", "_AddReprMethod", "(", "message_descriptor", ",", "cls", ")", "_AddUnicodeMethod", "(", "message_descriptor", ",", "cls", ")", "_AddByteSizeMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSerializeToStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddSerializePartialToStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddMergeFromStringMethod", "(", "message_descriptor", ",", "cls", ")", "_AddIsInitializedMethod", "(", "message_descriptor", ",", "cls", ")", "_AddMergeFromMethod", "(", "cls", ")", "_AddWhichOneofMethod", "(", "message_descriptor", ",", "cls", ")", "_AddReduceMethod", "(", "cls", ")", "# Adds methods which do not depend on cls.", "cls", ".", "Clear", "=", "_Clear", "cls", ".", "DiscardUnknownFields", "=", "_DiscardUnknownFields", "cls", ".", "_SetListener", "=", "_SetListener" ]
https://github.com/google/shaka-packager/blob/e1b0c7c45431327fd3ce193514a5407d07b39b22/packager/third_party/protobuf/python/google/protobuf/internal/python_message.py#L1295-L1318
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/Tkinter.py
python
mainloop
(n=0)
Run the main loop of Tcl.
Run the main loop of Tcl.
[ "Run", "the", "main", "loop", "of", "Tcl", "." ]
def mainloop(n=0): """Run the main loop of Tcl.""" _default_root.tk.mainloop(n)
[ "def", "mainloop", "(", "n", "=", "0", ")", ":", "_default_root", ".", "tk", ".", "mainloop", "(", "n", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib-tk/Tkinter.py#L359-L361
nickgillian/grt
4d4cab1999a349b00d8924da769ff3f0c29d3176
build/python/examples/PreProcessingModulesExamples/double_moving_average_example.py
python
main
()
GRT DoubleMovingAverageFilter Example This example demonstrates how to create and use the GRT DoubleMovingAverageFilter PreProcessing Module. The DoubleMovingAverageFilter implements a low pass double moving average filter. In this example we create an instance of a DoubleMovingAverageFilter and use this to filter some dummy data, generated from a sine wave + random noise. The test signal and filtered signals are then printed to std::cout. This example shows you how to: - Create a new DoubleMovingAverageFilter instance with a specific window size for a 1 dimensional signal - Filter some data using the DoubleMovingAverageFilter - Save the DoubleMovingAverageFilter settings to a file - Load the DoubleMovingAverageFilter settings from a file
GRT DoubleMovingAverageFilter Example This example demonstrates how to create and use the GRT DoubleMovingAverageFilter PreProcessing Module. The DoubleMovingAverageFilter implements a low pass double moving average filter. In this example we create an instance of a DoubleMovingAverageFilter and use this to filter some dummy data, generated from a sine wave + random noise. The test signal and filtered signals are then printed to std::cout. This example shows you how to: - Create a new DoubleMovingAverageFilter instance with a specific window size for a 1 dimensional signal - Filter some data using the DoubleMovingAverageFilter - Save the DoubleMovingAverageFilter settings to a file - Load the DoubleMovingAverageFilter settings from a file
[ "GRT", "DoubleMovingAverageFilter", "Example", "This", "example", "demonstrates", "how", "to", "create", "and", "use", "the", "GRT", "DoubleMovingAverageFilter", "PreProcessing", "Module", ".", "The", "DoubleMovingAverageFilter", "implements", "a", "low", "pass", "double", "moving", "average", "filter", ".", "In", "this", "example", "we", "create", "an", "instance", "of", "a", "DoubleMovingAverageFilter", "and", "use", "this", "to", "filter", "some", "dummy", "data", "generated", "from", "a", "sine", "wave", "+", "random", "noise", ".", "The", "test", "signal", "and", "filtered", "signals", "are", "then", "printed", "to", "std", "::", "cout", ".", "This", "example", "shows", "you", "how", "to", ":", "-", "Create", "a", "new", "DoubleMovingAverageFilter", "instance", "with", "a", "specific", "window", "size", "for", "a", "1", "dimensional", "signal", "-", "Filter", "some", "data", "using", "the", "DoubleMovingAverageFilter", "-", "Save", "the", "DoubleMovingAverageFilter", "settings", "to", "a", "file", "-", "Load", "the", "DoubleMovingAverageFilter", "settings", "from", "a", "file" ]
def main(): """GRT DoubleMovingAverageFilter Example This example demonstrates how to create and use the GRT DoubleMovingAverageFilter PreProcessing Module. The DoubleMovingAverageFilter implements a low pass double moving average filter. In this example we create an instance of a DoubleMovingAverageFilter and use this to filter some dummy data, generated from a sine wave + random noise. The test signal and filtered signals are then printed to std::cout. This example shows you how to: - Create a new DoubleMovingAverageFilter instance with a specific window size for a 1 dimensional signal - Filter some data using the DoubleMovingAverageFilter - Save the DoubleMovingAverageFilter settings to a file - Load the DoubleMovingAverageFilter settings from a file""" # Create a new instance of a double moving average filter with a window size of 5 for a 1 dimensional signal dma_filter = GRT.DoubleMovingAverageFilter(5, 1) # Generate some data (sine wave + noise) and filter it x = 0 for i in range(1000): signal = math.sin(x) + np.random.uniform(-0.2, 0.2) filtered_value = dma_filter.filter(signal) print("%.3f %.3f" % (signal, filtered_value)) x += math.tau / 1000.0 * 10.0 # Save the filter settings to a file dma_filter.save("DoubleMovingAverageFilterSettings.grt") # We can then load the settings later if needed dma_filter.load("DoubleMovingAverageFilterSettings.grt")
[ "def", "main", "(", ")", ":", "# Create a new instance of a double moving average filter with a window size of 5 for a 1 dimensional signal", "dma_filter", "=", "GRT", ".", "DoubleMovingAverageFilter", "(", "5", ",", "1", ")", "# Generate some data (sine wave + noise) and filter it", "x", "=", "0", "for", "i", "in", "range", "(", "1000", ")", ":", "signal", "=", "math", ".", "sin", "(", "x", ")", "+", "np", ".", "random", ".", "uniform", "(", "-", "0.2", ",", "0.2", ")", "filtered_value", "=", "dma_filter", ".", "filter", "(", "signal", ")", "print", "(", "\"%.3f %.3f\"", "%", "(", "signal", ",", "filtered_value", ")", ")", "x", "+=", "math", ".", "tau", "/", "1000.0", "*", "10.0", "# Save the filter settings to a file", "dma_filter", ".", "save", "(", "\"DoubleMovingAverageFilterSettings.grt\"", ")", "# We can then load the settings later if needed", "dma_filter", ".", "load", "(", "\"DoubleMovingAverageFilterSettings.grt\"", ")" ]
https://github.com/nickgillian/grt/blob/4d4cab1999a349b00d8924da769ff3f0c29d3176/build/python/examples/PreProcessingModulesExamples/double_moving_average_example.py#L7-L41
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/keras/python/keras/backend.py
python
conv2d
(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
return _postprocess_conv2d_output(x, data_format)
2D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow data format for inputs/kernels/outputs. dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`.
2D convolution.
[ "2D", "convolution", "." ]
def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution. Arguments: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow data format for inputs/kernels/outputs. dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) # With 4d inputs, nn.convolution only supports # data_format NHWC, so we transpose the inputs # in case we are in data_format channels_first. x = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) x = nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format='NHWC') return _postprocess_conv2d_output(x, data_format)
[ "def", "conv2d", "(", "x", ",", "kernel", ",", "strides", "=", "(", "1", ",", "1", ")", ",", "padding", "=", "'valid'", ",", "data_format", "=", "None", ",", "dilation_rate", "=", "(", "1", ",", "1", ")", ")", ":", "if", "data_format", "is", "None", ":", "data_format", "=", "image_data_format", "(", ")", "if", "data_format", "not", "in", "{", "'channels_first'", ",", "'channels_last'", "}", ":", "raise", "ValueError", "(", "'Unknown data_format '", "+", "str", "(", "data_format", ")", ")", "# With 4d inputs, nn.convolution only supports", "# data_format NHWC, so we transpose the inputs", "# in case we are in data_format channels_first.", "x", "=", "_preprocess_conv2d_input", "(", "x", ",", "data_format", ")", "padding", "=", "_preprocess_padding", "(", "padding", ")", "x", "=", "nn", ".", "convolution", "(", "input", "=", "x", ",", "filter", "=", "kernel", ",", "dilation_rate", "=", "dilation_rate", ",", "strides", "=", "strides", ",", "padding", "=", "padding", ",", "data_format", "=", "'NHWC'", ")", "return", "_postprocess_conv2d_output", "(", "x", ",", "data_format", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/keras/python/keras/backend.py#L3253-L3295
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/autograd/profiler.py
python
record_function._call_end_callbacks_on_future
(self, fut: Future[Any])
return profiled_future
_call_end_callbacks_on_future is meant to be used for profiling async calls that return a future. Calling this function will extend recording beyond this scope, until the future is satisfied. It is useful for profiling the end to end time of asynchronous calls. This function should only be called once to attach the callback onto the future, and will throw if called multiple times. Args: fut: (torch._C.Future): future for which to schedule callback for. Returns: A future that completes with the value of the passed in future when the profiling callbacks have ran.
_call_end_callbacks_on_future is meant to be used for profiling async calls that return a future. Calling this function will extend recording beyond this scope, until the future is satisfied. It is useful for profiling the end to end time of asynchronous calls. This function should only be called once to attach the callback onto the future, and will throw if called multiple times.
[ "_call_end_callbacks_on_future", "is", "meant", "to", "be", "used", "for", "profiling", "async", "calls", "that", "return", "a", "future", ".", "Calling", "this", "function", "will", "extend", "recording", "beyond", "this", "scope", "until", "the", "future", "is", "satisfied", ".", "It", "is", "useful", "for", "profiling", "the", "end", "to", "end", "time", "of", "asynchronous", "calls", ".", "This", "function", "should", "only", "be", "called", "once", "to", "attach", "the", "callback", "onto", "the", "future", "and", "will", "throw", "if", "called", "multiple", "times", "." ]
def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]: """ _call_end_callbacks_on_future is meant to be used for profiling async calls that return a future. Calling this function will extend recording beyond this scope, until the future is satisfied. It is useful for profiling the end to end time of asynchronous calls. This function should only be called once to attach the callback onto the future, and will throw if called multiple times. Args: fut: (torch._C.Future): future for which to schedule callback for. Returns: A future that completes with the value of the passed in future when the profiling callbacks have ran. """ # Throw if we have already attached a callback onto the future. if not self.run_callbacks_on_exit: raise RuntimeError("_call_end_callbacks_on_future can only be called once.") # We are scheduling to run this RecordFunction's end callbacks when the # passed in future completes, so don't run end callbacks on exit. self.run_callbacks_on_exit = False profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(self.handle, fut) return profiled_future
[ "def", "_call_end_callbacks_on_future", "(", "self", ",", "fut", ":", "Future", "[", "Any", "]", ")", "->", "Future", "[", "Any", "]", ":", "# Throw if we have already attached a callback onto the future.", "if", "not", "self", ".", "run_callbacks_on_exit", ":", "raise", "RuntimeError", "(", "\"_call_end_callbacks_on_future can only be called once.\"", ")", "# We are scheduling to run this RecordFunction's end callbacks when the", "# passed in future completes, so don't run end callbacks on exit.", "self", ".", "run_callbacks_on_exit", "=", "False", "profiled_future", "=", "torch", ".", "ops", ".", "profiler", ".", "_call_end_callbacks_on_jit_fut", "(", "self", ".", "handle", ",", "fut", ")", "return", "profiled_future" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/autograd/profiler.py#L443-L469
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/python/ops/logging_ops.py
python
histogram_summary
(tag, values, collections=None, name=None)
return val
Outputs a `Summary` protocol buffer with a histogram. The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. values: A real numeric `Tensor`. Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer.
Outputs a `Summary` protocol buffer with a histogram.
[ "Outputs", "a", "Summary", "protocol", "buffer", "with", "a", "histogram", "." ]
def histogram_summary(tag, values, collections=None, name=None): """Outputs a `Summary` protocol buffer with a histogram. The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: tag: A `string` `Tensor`. 0-D. Tag to use for the summary value. values: A real numeric `Tensor`. Any shape. Values to use to build the histogram. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. name: A name for the operation (optional). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ with ops.op_scope([tag, values], name, "HistogramSummary") as scope: val = gen_logging_ops._histogram_summary( tag=tag, values=values, name=scope) _Collect(val, collections, [ops.GraphKeys.SUMMARIES]) return val
[ "def", "histogram_summary", "(", "tag", ",", "values", ",", "collections", "=", "None", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "tag", ",", "values", "]", ",", "name", ",", "\"HistogramSummary\"", ")", "as", "scope", ":", "val", "=", "gen_logging_ops", ".", "_histogram_summary", "(", "tag", "=", "tag", ",", "values", "=", "values", ",", "name", "=", "scope", ")", "_Collect", "(", "val", ",", "collections", ",", "[", "ops", ".", "GraphKeys", ".", "SUMMARIES", "]", ")", "return", "val" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/logging_ops.py#L102-L127
google/mysql-protobuf
467cda676afaa49e762c5c9164a43f6ad31a1fbf
protobuf/python/google/protobuf/descriptor.py
python
_NestedDescriptorBase.GetTopLevelContainingType
(self)
return desc
Returns the root if this is a nested type, or itself if its the root.
Returns the root if this is a nested type, or itself if its the root.
[ "Returns", "the", "root", "if", "this", "is", "a", "nested", "type", "or", "itself", "if", "its", "the", "root", "." ]
def GetTopLevelContainingType(self): """Returns the root if this is a nested type, or itself if its the root.""" desc = self while desc.containing_type is not None: desc = desc.containing_type return desc
[ "def", "GetTopLevelContainingType", "(", "self", ")", ":", "desc", "=", "self", "while", "desc", ".", "containing_type", "is", "not", "None", ":", "desc", "=", "desc", ".", "containing_type", "return", "desc" ]
https://github.com/google/mysql-protobuf/blob/467cda676afaa49e762c5c9164a43f6ad31a1fbf/protobuf/python/google/protobuf/descriptor.py#L178-L183
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
packages/Python/lldbsuite/support/fs.py
python
_find_file_in_paths
(paths, exe_basename)
return None
Returns the full exe path for the first path match. @params paths the list of directories to search for the exe_basename executable @params exe_basename the name of the file for which to search. e.g. "swig" or "swig.exe". @return the full path to the executable if found in one of the given paths; otherwise, returns None.
Returns the full exe path for the first path match.
[ "Returns", "the", "full", "exe", "path", "for", "the", "first", "path", "match", "." ]
def _find_file_in_paths(paths, exe_basename): """Returns the full exe path for the first path match. @params paths the list of directories to search for the exe_basename executable @params exe_basename the name of the file for which to search. e.g. "swig" or "swig.exe". @return the full path to the executable if found in one of the given paths; otherwise, returns None. """ for path in paths: trial_exe_path = os.path.join(path, exe_basename) if os.path.exists(trial_exe_path): return os.path.normcase(trial_exe_path) return None
[ "def", "_find_file_in_paths", "(", "paths", ",", "exe_basename", ")", ":", "for", "path", "in", "paths", ":", "trial_exe_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "exe_basename", ")", "if", "os", ".", "path", ".", "exists", "(", "trial_exe_path", ")", ":", "return", "os", ".", "path", ".", "normcase", "(", "trial_exe_path", ")", "return", "None" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/packages/Python/lldbsuite/support/fs.py#L16-L31
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/linalg/linalg.py
python
transpose
(a)
return swapaxes(a, -1, -2)
Transpose each matrix in a stack of matrices. Unlike np.transpose, this only swaps the last two axes, rather than all of them Parameters ---------- a : (...,M,N) array_like Returns ------- aT : (...,N,M) ndarray
Transpose each matrix in a stack of matrices.
[ "Transpose", "each", "matrix", "in", "a", "stack", "of", "matrices", "." ]
def transpose(a): """ Transpose each matrix in a stack of matrices. Unlike np.transpose, this only swaps the last two axes, rather than all of them Parameters ---------- a : (...,M,N) array_like Returns ------- aT : (...,N,M) ndarray """ return swapaxes(a, -1, -2)
[ "def", "transpose", "(", "a", ")", ":", "return", "swapaxes", "(", "a", ",", "-", "1", ",", "-", "2", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/linalg/linalg.py#L215-L230
kushview/Element
1cc16380caa2ab79461246ba758b9de1f46db2a5
waflib/extras/wafcache.py
python
loop
(service)
This function is run when this file is run as a standalone python script, it assumes a parent process that will communicate the commands to it as pickled-encoded tuples (one line per command) The commands are to copy files to the cache or copy files from the cache to a target destination
This function is run when this file is run as a standalone python script, it assumes a parent process that will communicate the commands to it as pickled-encoded tuples (one line per command)
[ "This", "function", "is", "run", "when", "this", "file", "is", "run", "as", "a", "standalone", "python", "script", "it", "assumes", "a", "parent", "process", "that", "will", "communicate", "the", "commands", "to", "it", "as", "pickled", "-", "encoded", "tuples", "(", "one", "line", "per", "command", ")" ]
def loop(service): """ This function is run when this file is run as a standalone python script, it assumes a parent process that will communicate the commands to it as pickled-encoded tuples (one line per command) The commands are to copy files to the cache or copy files from the cache to a target destination """ # one operation is performed at a single time by a single process # therefore stdin never has more than one line txt = sys.stdin.readline().strip() if not txt: # parent process probably ended sys.exit(1) ret = OK [sig, files_from, files_to] = cPickle.loads(base64.b64decode(txt)) if files_from: # TODO return early when pushing files upstream ret = service.copy_to_cache(sig, files_from, files_to) elif files_to: # the build process waits for workers to (possibly) obtain files from the cache ret = service.copy_from_cache(sig, files_from, files_to) else: ret = "Invalid command" obj = base64.b64encode(cPickle.dumps(ret)) sys.stdout.write(obj.decode()) sys.stdout.write('\n') sys.stdout.flush()
[ "def", "loop", "(", "service", ")", ":", "# one operation is performed at a single time by a single process", "# therefore stdin never has more than one line", "txt", "=", "sys", ".", "stdin", ".", "readline", "(", ")", ".", "strip", "(", ")", "if", "not", "txt", ":", "# parent process probably ended", "sys", ".", "exit", "(", "1", ")", "ret", "=", "OK", "[", "sig", ",", "files_from", ",", "files_to", "]", "=", "cPickle", ".", "loads", "(", "base64", ".", "b64decode", "(", "txt", ")", ")", "if", "files_from", ":", "# TODO return early when pushing files upstream", "ret", "=", "service", ".", "copy_to_cache", "(", "sig", ",", "files_from", ",", "files_to", ")", "elif", "files_to", ":", "# the build process waits for workers to (possibly) obtain files from the cache", "ret", "=", "service", ".", "copy_from_cache", "(", "sig", ",", "files_from", ",", "files_to", ")", "else", ":", "ret", "=", "\"Invalid command\"", "obj", "=", "base64", ".", "b64encode", "(", "cPickle", ".", "dumps", "(", "ret", ")", ")", "sys", ".", "stdout", ".", "write", "(", "obj", ".", "decode", "(", ")", ")", "sys", ".", "stdout", ".", "write", "(", "'\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
https://github.com/kushview/Element/blob/1cc16380caa2ab79461246ba758b9de1f46db2a5/waflib/extras/wafcache.py#L539-L569
google/earthenterprise
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
earth_enterprise/src/server/wsgi/common/utils.py
python
UrlOpener.__init__
(self)
Creates URL opener.
Creates URL opener.
[ "Creates", "URL", "opener", "." ]
def __init__(self): """Creates URL opener.""" proxy_handler = urllib2.ProxyHandler({}) self._opener = urllib2.build_opener(proxy_handler)
[ "def", "__init__", "(", "self", ")", ":", "proxy_handler", "=", "urllib2", ".", "ProxyHandler", "(", "{", "}", ")", "self", ".", "_opener", "=", "urllib2", ".", "build_opener", "(", "proxy_handler", ")" ]
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/server/wsgi/common/utils.py#L41-L44
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/compiler-rt/lib/sanitizer_common/scripts/cpplint.py
python
FileInfo.Split
(self)
return (project,) + os.path.splitext(rest)
Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension).
Splits the file into the directory, basename, and extension.
[ "Splits", "the", "file", "into", "the", "directory", "basename", "and", "extension", "." ]
def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest)
[ "def", "Split", "(", "self", ")", ":", "googlename", "=", "self", ".", "RepositoryName", "(", ")", "project", ",", "rest", "=", "os", ".", "path", ".", "split", "(", "googlename", ")", "return", "(", "project", ",", ")", "+", "os", ".", "path", ".", "splitext", "(", "rest", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/compiler-rt/lib/sanitizer_common/scripts/cpplint.py#L794-L806
aimerykong/Low-Rank-Bilinear-Pooling
487eb2c857fd9c95357a5166b0c15ad0fe135b28
caffe-20160312/python/caffe/detector.py
python
Detector.detect_windows
(self, images_windows)
return detections
Do windowed detection over given images and windows. Windows are extracted then warped to the input dimensions of the net. Parameters ---------- images_windows: (image filename, window list) iterable. context_crop: size of context border to crop in pixels. Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts.
Do windowed detection over given images and windows. Windows are extracted then warped to the input dimensions of the net.
[ "Do", "windowed", "detection", "over", "given", "images", "and", "windows", ".", "Windows", "are", "extracted", "then", "warped", "to", "the", "input", "dimensions", "of", "the", "net", "." ]
def detect_windows(self, images_windows): """ Do windowed detection over given images and windows. Windows are extracted then warped to the input dimensions of the net. Parameters ---------- images_windows: (image filename, window list) iterable. context_crop: size of context border to crop in pixels. Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts. """ # Extract windows. window_inputs = [] for image_fname, windows in images_windows: image = caffe.io.load_image(image_fname).astype(np.float32) for window in windows: window_inputs.append(self.crop(image, window)) # Run through the net (warping windows to input dimensions). in_ = self.inputs[0] caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2]) + self.blobs[in_].data.shape[2:], dtype=np.float32) for ix, window_in in enumerate(window_inputs): caffe_in[ix] = self.transformer.preprocess(in_, window_in) out = self.forward_all(**{in_: caffe_in}) predictions = out[self.outputs[0]].squeeze(axis=(2, 3)) # Package predictions with images and windows. detections = [] ix = 0 for image_fname, windows in images_windows: for window in windows: detections.append({ 'window': window, 'prediction': predictions[ix], 'filename': image_fname }) ix += 1 return detections
[ "def", "detect_windows", "(", "self", ",", "images_windows", ")", ":", "# Extract windows.", "window_inputs", "=", "[", "]", "for", "image_fname", ",", "windows", "in", "images_windows", ":", "image", "=", "caffe", ".", "io", ".", "load_image", "(", "image_fname", ")", ".", "astype", "(", "np", ".", "float32", ")", "for", "window", "in", "windows", ":", "window_inputs", ".", "append", "(", "self", ".", "crop", "(", "image", ",", "window", ")", ")", "# Run through the net (warping windows to input dimensions).", "in_", "=", "self", ".", "inputs", "[", "0", "]", "caffe_in", "=", "np", ".", "zeros", "(", "(", "len", "(", "window_inputs", ")", ",", "window_inputs", "[", "0", "]", ".", "shape", "[", "2", "]", ")", "+", "self", ".", "blobs", "[", "in_", "]", ".", "data", ".", "shape", "[", "2", ":", "]", ",", "dtype", "=", "np", ".", "float32", ")", "for", "ix", ",", "window_in", "in", "enumerate", "(", "window_inputs", ")", ":", "caffe_in", "[", "ix", "]", "=", "self", ".", "transformer", ".", "preprocess", "(", "in_", ",", "window_in", ")", "out", "=", "self", ".", "forward_all", "(", "*", "*", "{", "in_", ":", "caffe_in", "}", ")", "predictions", "=", "out", "[", "self", ".", "outputs", "[", "0", "]", "]", ".", "squeeze", "(", "axis", "=", "(", "2", ",", "3", ")", ")", "# Package predictions with images and windows.", "detections", "=", "[", "]", "ix", "=", "0", "for", "image_fname", ",", "windows", "in", "images_windows", ":", "for", "window", "in", "windows", ":", "detections", ".", "append", "(", "{", "'window'", ":", "window", ",", "'prediction'", ":", "predictions", "[", "ix", "]", ",", "'filename'", ":", "image_fname", "}", ")", "ix", "+=", "1", "return", "detections" ]
https://github.com/aimerykong/Low-Rank-Bilinear-Pooling/blob/487eb2c857fd9c95357a5166b0c15ad0fe135b28/caffe-20160312/python/caffe/detector.py#L56-L99
Project-OSRM/osrm-backend
f2e284623e25b5570dd2a5e6985abcb3790fd348
third_party/flatbuffers/conanfile.py
python
FlatbuffersConan.configure_cmake
(self)
return cmake
Create CMake instance and execute configure step
Create CMake instance and execute configure step
[ "Create", "CMake", "instance", "and", "execute", "configure", "step" ]
def configure_cmake(self): """Create CMake instance and execute configure step """ cmake = CMake(self) cmake.definitions["FLATBUFFERS_BUILD_TESTS"] = False cmake.definitions["FLATBUFFERS_BUILD_SHAREDLIB"] = self.options.shared cmake.definitions["FLATBUFFERS_BUILD_FLATLIB"] = not self.options.shared cmake.configure() return cmake
[ "def", "configure_cmake", "(", "self", ")", ":", "cmake", "=", "CMake", "(", "self", ")", "cmake", ".", "definitions", "[", "\"FLATBUFFERS_BUILD_TESTS\"", "]", "=", "False", "cmake", ".", "definitions", "[", "\"FLATBUFFERS_BUILD_SHAREDLIB\"", "]", "=", "self", ".", "options", ".", "shared", "cmake", ".", "definitions", "[", "\"FLATBUFFERS_BUILD_FLATLIB\"", "]", "=", "not", "self", ".", "options", ".", "shared", "cmake", ".", "configure", "(", ")", "return", "cmake" ]
https://github.com/Project-OSRM/osrm-backend/blob/f2e284623e25b5570dd2a5e6985abcb3790fd348/third_party/flatbuffers/conanfile.py#L38-L46
infinit/memo
3a8394d0f647efe03ccb8bfe885a7279cb8be8a6
elle/drake/src/drake/__init__.py
python
BaseNode.__lt__
(self, rhs)
return self.name_absolute() < rhs.name_absolute()
Arbitrary global order on nodes, to enable sorting/indexing.
Arbitrary global order on nodes, to enable sorting/indexing.
[ "Arbitrary", "global", "order", "on", "nodes", "to", "enable", "sorting", "/", "indexing", "." ]
def __lt__(self, rhs): """Arbitrary global order on nodes, to enable sorting/indexing.""" return self.name_absolute() < rhs.name_absolute()
[ "def", "__lt__", "(", "self", ",", "rhs", ")", ":", "return", "self", ".", "name_absolute", "(", ")", "<", "rhs", ".", "name_absolute", "(", ")" ]
https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L1535-L1538
PaddlePaddle/Paddle-Lite
75fa072dca1c54d8b4ce4fb9e5491edc787e6300
lite/tools/cmake_tools/ast.py
python
SyntaxParser.eat_left_parentheses
(self)
(
(
[ "(" ]
def eat_left_parentheses(self): ''' ( ''' self.assert_is('(') self.token = '(' self.forward()
[ "def", "eat_left_parentheses", "(", "self", ")", ":", "self", ".", "assert_is", "(", "'('", ")", "self", ".", "token", "=", "'('", "self", ".", "forward", "(", ")" ]
https://github.com/PaddlePaddle/Paddle-Lite/blob/75fa072dca1c54d8b4ce4fb9e5491edc787e6300/lite/tools/cmake_tools/ast.py#L61-L67
Harick1/caffe-yolo
eea92bf3ddfe4d0ff6b0b3ba9b15c029a83ed9a3
scripts/cpp_lint.py
python
_NestingState.InNamespaceBody
(self)
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise.
Check if we are currently one level inside a namespace body.
[ "Check", "if", "we", "are", "currently", "one", "level", "inside", "a", "namespace", "body", "." ]
def InNamespaceBody(self): """Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
[ "def", "InNamespaceBody", "(", "self", ")", ":", "return", "self", ".", "stack", "and", "isinstance", "(", "self", ".", "stack", "[", "-", "1", "]", ",", "_NamespaceInfo", ")" ]
https://github.com/Harick1/caffe-yolo/blob/eea92bf3ddfe4d0ff6b0b3ba9b15c029a83ed9a3/scripts/cpp_lint.py#L1940-L1946
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/http/cookiejar.py
python
FileCookieJar.load
(self, filename=None, ignore_discard=False, ignore_expires=False)
Load cookies from a file.
Load cookies from a file.
[ "Load", "cookies", "from", "a", "file", "." ]
def load(self, filename=None, ignore_discard=False, ignore_expires=False): """Load cookies from a file.""" if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) with open(filename) as f: self._really_load(f, filename, ignore_discard, ignore_expires)
[ "def", "load", "(", "self", ",", "filename", "=", "None", ",", "ignore_discard", "=", "False", ",", "ignore_expires", "=", "False", ")", ":", "if", "filename", "is", "None", ":", "if", "self", ".", "filename", "is", "not", "None", ":", "filename", "=", "self", ".", "filename", "else", ":", "raise", "ValueError", "(", "MISSING_FILENAME_TEXT", ")", "with", "open", "(", "filename", ")", "as", "f", ":", "self", ".", "_really_load", "(", "f", ",", "filename", ",", "ignore_discard", ",", "ignore_expires", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/http/cookiejar.py#L1792-L1799
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py
python
ParserElement.setWhitespaceChars
( self, chars )
return self
Overrides the default whitespace chars
[]
def setWhitespaceChars( self, chars ): """ Overrides the default whitespace chars """ self.skipWhitespace = True self.whiteChars = chars self.copyDefaultWhiteChars = False return self
[ "def", "setWhitespaceChars", "(", "self", ",", "chars", ")", ":", "self", ".", "skipWhitespace", "=", "True", "self", ".", "whiteChars", "=", "chars", "self", ".", "copyDefaultWhiteChars", "=", "False", "return", "self" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py#L4121-L4135
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/ops/partitioned_variables.py
python
variable_axis_size_partitioner
( max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None)
return _partitioner
Get a partitioner for VariableScope to keep shards below `max_shard_bytes`. This partitioner will shard a Variable along one axis, attempting to keep the maximum shard size below `max_shard_bytes`. In practice, this is not always possible when sharding along only one axis. When this happens, this axis is sharded as much as possible (i.e., every dimension becomes a separate shard). If the partitioner hits the `max_shards` limit, then each shard may end up larger than `max_shard_bytes`. By default `max_shards` equals `None` and no limit on the number of shards is enforced. One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost `64MB`, to keep below the protobuf byte limit. Args: max_shard_bytes: The maximum size any given shard is allowed to be. axis: The axis to partition along. Default: outermost axis. bytes_per_string_element: If the `Variable` is of type string, this provides an estimate of how large each scalar in the `Variable` is. max_shards: The maximum number of shards in int created taking precedence over `max_shard_bytes`. Returns: A partition function usable as the `partitioner` argument to `variable_scope`, `get_variable`, and `get_partitioned_variable_list`. Raises: ValueError: If any of the byte counts are non-positive.
Get a partitioner for VariableScope to keep shards below `max_shard_bytes`.
[ "Get", "a", "partitioner", "for", "VariableScope", "to", "keep", "shards", "below", "max_shard_bytes", "." ]
def variable_axis_size_partitioner( max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None): """Get a partitioner for VariableScope to keep shards below `max_shard_bytes`. This partitioner will shard a Variable along one axis, attempting to keep the maximum shard size below `max_shard_bytes`. In practice, this is not always possible when sharding along only one axis. When this happens, this axis is sharded as much as possible (i.e., every dimension becomes a separate shard). If the partitioner hits the `max_shards` limit, then each shard may end up larger than `max_shard_bytes`. By default `max_shards` equals `None` and no limit on the number of shards is enforced. One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost `64MB`, to keep below the protobuf byte limit. Args: max_shard_bytes: The maximum size any given shard is allowed to be. axis: The axis to partition along. Default: outermost axis. bytes_per_string_element: If the `Variable` is of type string, this provides an estimate of how large each scalar in the `Variable` is. max_shards: The maximum number of shards in int created taking precedence over `max_shard_bytes`. Returns: A partition function usable as the `partitioner` argument to `variable_scope`, `get_variable`, and `get_partitioned_variable_list`. Raises: ValueError: If any of the byte counts are non-positive. """ if max_shard_bytes < 1 or bytes_per_string_element < 1: raise ValueError( "Both max_shard_bytes and bytes_per_string_element must be positive.") if max_shards and max_shards < 1: raise ValueError( "max_shards must be positive.") def _partitioner(shape, dtype): """Partitioner that partitions shards to have max_shard_bytes total size. Args: shape: A `TensorShape`. dtype: A `DType`. Returns: A tuple representing how much to slice each axis in shape. Raises: ValueError: If shape is not a fully defined `TensorShape` or dtype is not a `DType`. """ if not isinstance(shape, tensor_shape.TensorShape): raise ValueError("shape is not a TensorShape: %s" % shape) if not shape.is_fully_defined(): raise ValueError("shape is not fully defined: %s" % shape) if not isinstance(dtype, dtypes.DType): raise ValueError("dtype is not a DType: %s" % dtype) if dtype.base_dtype == dtypes.string: element_size = bytes_per_string_element else: element_size = dtype.size partitions = [1] * shape.ndims bytes_per_slice = 1.0 * ( shape.num_elements() / shape[axis].value) * element_size # How many slices can we fit on one shard of size at most max_shard_bytes? # At least one slice is required. slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice)) # How many shards do we need for axis given that each shard fits # slices_per_shard slices from a total of shape[axis].value slices? axis_shards = int(math.ceil(1.0 * shape[axis].value / slices_per_shard)) if max_shards: axis_shards = min(max_shards, axis_shards) partitions[axis] = axis_shards return partitions return _partitioner
[ "def", "variable_axis_size_partitioner", "(", "max_shard_bytes", ",", "axis", "=", "0", ",", "bytes_per_string_element", "=", "16", ",", "max_shards", "=", "None", ")", ":", "if", "max_shard_bytes", "<", "1", "or", "bytes_per_string_element", "<", "1", ":", "raise", "ValueError", "(", "\"Both max_shard_bytes and bytes_per_string_element must be positive.\"", ")", "if", "max_shards", "and", "max_shards", "<", "1", ":", "raise", "ValueError", "(", "\"max_shards must be positive.\"", ")", "def", "_partitioner", "(", "shape", ",", "dtype", ")", ":", "\"\"\"Partitioner that partitions shards to have max_shard_bytes total size.\n\n Args:\n shape: A `TensorShape`.\n dtype: A `DType`.\n\n Returns:\n A tuple representing how much to slice each axis in shape.\n\n Raises:\n ValueError: If shape is not a fully defined `TensorShape` or dtype is not\n a `DType`.\n \"\"\"", "if", "not", "isinstance", "(", "shape", ",", "tensor_shape", ".", "TensorShape", ")", ":", "raise", "ValueError", "(", "\"shape is not a TensorShape: %s\"", "%", "shape", ")", "if", "not", "shape", ".", "is_fully_defined", "(", ")", ":", "raise", "ValueError", "(", "\"shape is not fully defined: %s\"", "%", "shape", ")", "if", "not", "isinstance", "(", "dtype", ",", "dtypes", ".", "DType", ")", ":", "raise", "ValueError", "(", "\"dtype is not a DType: %s\"", "%", "dtype", ")", "if", "dtype", ".", "base_dtype", "==", "dtypes", ".", "string", ":", "element_size", "=", "bytes_per_string_element", "else", ":", "element_size", "=", "dtype", ".", "size", "partitions", "=", "[", "1", "]", "*", "shape", ".", "ndims", "bytes_per_slice", "=", "1.0", "*", "(", "shape", ".", "num_elements", "(", ")", "/", "shape", "[", "axis", "]", ".", "value", ")", "*", "element_size", "# How many slices can we fit on one shard of size at most max_shard_bytes?", "# At least one slice is required.", "slices_per_shard", "=", "max", "(", "1", ",", "math", ".", "floor", "(", "max_shard_bytes", "/", "bytes_per_slice", ")", ")", "# How many shards do we need for axis given that each shard fits", "# slices_per_shard slices from a total of shape[axis].value slices?", "axis_shards", "=", "int", "(", "math", ".", "ceil", "(", "1.0", "*", "shape", "[", "axis", "]", ".", "value", "/", "slices_per_shard", ")", ")", "if", "max_shards", ":", "axis_shards", "=", "min", "(", "max_shards", ",", "axis_shards", ")", "partitions", "[", "axis", "]", "=", "axis_shards", "return", "partitions", "return", "_partitioner" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/ops/partitioned_variables.py#L70-L151
glotzerlab/hoomd-blue
f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a
hoomd/md/methods/methods.py
python
Brownian._add
(self, simulation)
Add the operation to a simulation. Brownian uses RNGs. Warn the user if they did not set the seed.
Add the operation to a simulation.
[ "Add", "the", "operation", "to", "a", "simulation", "." ]
def _add(self, simulation): """Add the operation to a simulation. Brownian uses RNGs. Warn the user if they did not set the seed. """ if isinstance(simulation, hoomd.Simulation): simulation._warn_if_seed_unset() super()._add(simulation)
[ "def", "_add", "(", "self", ",", "simulation", ")", ":", "if", "isinstance", "(", "simulation", ",", "hoomd", ".", "Simulation", ")", ":", "simulation", ".", "_warn_if_seed_unset", "(", ")", "super", "(", ")", ".", "_add", "(", "simulation", ")" ]
https://github.com/glotzerlab/hoomd-blue/blob/f7f97abfa3fcc2522fa8d458d65d0aeca7ba781a/hoomd/md/methods/methods.py#L1008-L1016
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/optimize/_tstutils.py
python
aps09_f
(x, n)
return (1 + (1 - n)**4) * x - (1 - n * x)**4
r"""Upside down quartic with parametrizable height
r"""Upside down quartic with parametrizable height
[ "r", "Upside", "down", "quartic", "with", "parametrizable", "height" ]
def aps09_f(x, n): r"""Upside down quartic with parametrizable height""" return (1 + (1 - n)**4) * x - (1 - n * x)**4
[ "def", "aps09_f", "(", "x", ",", "n", ")", ":", "return", "(", "1", "+", "(", "1", "-", "n", ")", "**", "4", ")", "*", "x", "-", "(", "1", "-", "n", "*", "x", ")", "**", "4" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/optimize/_tstutils.py#L268-L270
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/third_party/web-page-replay/third_party/dns/message.py
python
Message.rcode
(self)
return dns.rcode.from_flags(self.flags, self.ednsflags)
Return the rcode. @rtype: int
Return the rcode.
[ "Return", "the", "rcode", "." ]
def rcode(self): """Return the rcode. @rtype: int """ return dns.rcode.from_flags(self.flags, self.ednsflags)
[ "def", "rcode", "(", "self", ")", ":", "return", "dns", ".", "rcode", ".", "from_flags", "(", "self", ".", "flags", ",", "self", ".", "ednsflags", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/web-page-replay/third_party/dns/message.py#L521-L525
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
native_client_sdk/src/build_tools/manifest_util.py
python
Archive.__getattr__
(self, name)
return self.__getitem__(name)
Retrieve values from this dict using attributes. This allows for foo.bar instead of foo['bar']. Args: name: the name of the key, 'bar' in the example above. Returns: The value associated with that key.
Retrieve values from this dict using attributes.
[ "Retrieve", "values", "from", "this", "dict", "using", "attributes", "." ]
def __getattr__(self, name): """Retrieve values from this dict using attributes. This allows for foo.bar instead of foo['bar']. Args: name: the name of the key, 'bar' in the example above. Returns: The value associated with that key.""" if name not in self: raise AttributeError(name) # special case, self.checksum returns the sha1, not the checksum dict. if name == 'checksum': return self.GetChecksum() return self.__getitem__(name)
[ "def", "__getattr__", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ":", "raise", "AttributeError", "(", "name", ")", "# special case, self.checksum returns the sha1, not the checksum dict.", "if", "name", "==", "'checksum'", ":", "return", "self", ".", "GetChecksum", "(", ")", "return", "self", ".", "__getitem__", "(", "name", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/native_client_sdk/src/build_tools/manifest_util.py#L169-L183
coinapi/coinapi-sdk
854f21e7f69ea8599ae35c5403565cf299d8b795
oeml-sdk/python/openapi_client/model/ord_status.py
python
OrdStatus.__init__
(self, *args, **kwargs)
OrdStatus - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] (str): Order statuses and the lifecycle are documented in the separate section: <a href=\"#oeml-order-lifecycle\">OEML / Starter Guide / Order Lifecycle</a> ., must be one of ["RECEIVED", "ROUTING", "ROUTED", "NEW", "PENDING_CANCEL", "PARTIALLY_FILLED", "FILLED", "CANCELED", "REJECTED", ] # noqa: E501 Keyword Args: value (str): Order statuses and the lifecycle are documented in the separate section: <a href=\"#oeml-order-lifecycle\">OEML / Starter Guide / Order Lifecycle</a> ., must be one of ["RECEIVED", "ROUTING", "ROUTED", "NEW", "PENDING_CANCEL", "PARTIALLY_FILLED", "FILLED", "CANCELED", "REJECTED", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,)
OrdStatus - a model defined in OpenAPI
[ "OrdStatus", "-", "a", "model", "defined", "in", "OpenAPI" ]
def __init__(self, *args, **kwargs): """OrdStatus - a model defined in OpenAPI Note that value can be passed either in args or in kwargs, but not in both. Args: args[0] (str): Order statuses and the lifecycle are documented in the separate section: <a href=\"#oeml-order-lifecycle\">OEML / Starter Guide / Order Lifecycle</a> ., must be one of ["RECEIVED", "ROUTING", "ROUTED", "NEW", "PENDING_CANCEL", "PARTIALLY_FILLED", "FILLED", "CANCELED", "REJECTED", ] # noqa: E501 Keyword Args: value (str): Order statuses and the lifecycle are documented in the separate section: <a href=\"#oeml-order-lifecycle\">OEML / Starter Guide / Order Lifecycle</a> ., must be one of ["RECEIVED", "ROUTING", "ROUTED", "NEW", "PENDING_CANCEL", "PARTIALLY_FILLED", "FILLED", "CANCELED", "REJECTED", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ # required up here when default value is not given _path_to_item = kwargs.pop('_path_to_item', ()) if 'value' in kwargs: value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: raise ApiTypeError( "value is required, but not passed in args or kwargs and doesn't have default", path_to_item=_path_to_item, valid_classes=(self.__class__,), ) _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value if kwargs: raise ApiTypeError( "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( kwargs, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), )
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# required up here when default value is not given", "_path_to_item", "=", "kwargs", ".", "pop", "(", "'_path_to_item'", ",", "(", ")", ")", "if", "'value'", "in", "kwargs", ":", "value", "=", "kwargs", ".", "pop", "(", "'value'", ")", "elif", "args", ":", "args", "=", "list", "(", "args", ")", "value", "=", "args", ".", "pop", "(", "0", ")", "else", ":", "raise", "ApiTypeError", "(", "\"value is required, but not passed in args or kwargs and doesn't have default\"", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")", "_check_type", "=", "kwargs", ".", "pop", "(", "'_check_type'", ",", "True", ")", "_spec_property_naming", "=", "kwargs", ".", "pop", "(", "'_spec_property_naming'", ",", "False", ")", "_configuration", "=", "kwargs", ".", "pop", "(", "'_configuration'", ",", "None", ")", "_visited_composed_classes", "=", "kwargs", ".", "pop", "(", "'_visited_composed_classes'", ",", "(", ")", ")", "if", "args", ":", "raise", "ApiTypeError", "(", "\"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"", "%", "(", "args", ",", "self", ".", "__class__", ".", "__name__", ",", ")", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")", "self", ".", "_data_store", "=", "{", "}", "self", ".", "_check_type", "=", "_check_type", "self", ".", "_spec_property_naming", "=", "_spec_property_naming", "self", ".", "_path_to_item", "=", "_path_to_item", "self", ".", "_configuration", "=", "_configuration", "self", ".", "_visited_composed_classes", "=", "_visited_composed_classes", "+", "(", "self", ".", "__class__", ",", ")", "self", ".", "value", "=", "value", "if", "kwargs", ":", "raise", "ApiTypeError", "(", "\"Invalid named arguments=%s passed to %s. Remove those invalid named arguments.\"", "%", "(", "kwargs", ",", "self", ".", "__class__", ".", "__name__", ",", ")", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")" ]
https://github.com/coinapi/coinapi-sdk/blob/854f21e7f69ea8599ae35c5403565cf299d8b795/oeml-sdk/python/openapi_client/model/ord_status.py#L110-L196
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/requests/sessions.py
python
Session.merge_environment_settings
(self, url, proxies, stream, verify, cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert}
Check the environment and merge it with some settings. :rtype: dict
[]
def merge_environment_settings(self, url, proxies, stream, verify, cert): """ Check the environment and merge it with some settings. :rtype: dict """ # Gather clues from the surrounding environment. if self.trust_env: # Set environment's proxies. no_proxy = proxies.get('no_proxy') if proxies is not None else None env_proxies = get_environ_proxies(url, no_proxy=no_proxy) for (k, v) in env_proxies.items(): proxies.setdefault(k, v) # Look for requests environment configuration and be compatible # with cURL. if verify is True or verify is None: verify = (os.environ.get('REQUESTS_CA_BUNDLE') or os.environ.get('CURL_CA_BUNDLE')) # Merge all the kwargs. proxies = merge_setting(proxies, self.proxies) stream = merge_setting(stream, self.stream) verify = merge_setting(verify, self.verify) cert = merge_setting(cert, self.cert) return {'verify': verify, 'proxies': proxies, 'stream': stream, 'cert': cert}
[ "def", "merge_environment_settings", "(", "self", ",", "url", ",", "proxies", ",", "stream", ",", "verify", ",", "cert", ")", ":", "# Gather clues from the surrounding environment.", "if", "self", ".", "trust_env", ":", "# Set environment's proxies.", "no_proxy", "=", "proxies", ".", "get", "(", "'no_proxy'", ")", "if", "proxies", "is", "not", "None", "else", "None", "env_proxies", "=", "get_environ_proxies", "(", "url", ",", "no_proxy", "=", "no_proxy", ")", "for", "(", "k", ",", "v", ")", "in", "env_proxies", ".", "items", "(", ")", ":", "proxies", ".", "setdefault", "(", "k", ",", "v", ")", "# Look for requests environment configuration and be compatible", "# with cURL.", "if", "verify", "is", "True", "or", "verify", "is", "None", ":", "verify", "=", "(", "os", ".", "environ", ".", "get", "(", "'REQUESTS_CA_BUNDLE'", ")", "or", "os", ".", "environ", ".", "get", "(", "'CURL_CA_BUNDLE'", ")", ")", "# Merge all the kwargs.", "proxies", "=", "merge_setting", "(", "proxies", ",", "self", ".", "proxies", ")", "stream", "=", "merge_setting", "(", "stream", ",", "self", ".", "stream", ")", "verify", "=", "merge_setting", "(", "verify", ",", "self", ".", "verify", ")", "cert", "=", "merge_setting", "(", "cert", ",", "self", ".", "cert", ")", "return", "{", "'verify'", ":", "verify", ",", "'proxies'", ":", "proxies", ",", "'stream'", ":", "stream", ",", "'cert'", ":", "cert", "}" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/requests/sessions.py#L1401-L1455
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/fluid/contrib/sparsity/asp.py
python
ASPHelper._create_mask_variables
(cls, main_program, startup_program, params_and_grads)
r""" Create sparse mask Tensors according to supported layers in :attr:`main_program`. This function is called in second step of `ASPHelper._minimize` Args: main_program (Program): Program with model definition and its parameters. startup_program (Program): Program for initializing parameters. params_and_grads (list): Variable pairs of parameters and their gradients.
r""" Create sparse mask Tensors according to supported layers in :attr:`main_program`. This function is called in second step of `ASPHelper._minimize`
[ "r", "Create", "sparse", "mask", "Tensors", "according", "to", "supported", "layers", "in", ":", "attr", ":", "main_program", ".", "This", "function", "is", "called", "in", "second", "step", "of", "ASPHelper", ".", "_minimize" ]
def _create_mask_variables(cls, main_program, startup_program, params_and_grads): r""" Create sparse mask Tensors according to supported layers in :attr:`main_program`. This function is called in second step of `ASPHelper._minimize` Args: main_program (Program): Program with model definition and its parameters. startup_program (Program): Program for initializing parameters. params_and_grads (list): Variable pairs of parameters and their gradients. """ asp_info = cls._get_program_asp_info(main_program) with program_guard(main_program, startup_program): for param_and_grad in params_and_grads: if ASPHelper._is_supported_layer(main_program, param_and_grad[0].name): mask_param = layers.create_parameter( name=param_and_grad[0].name + ASPHelper.MASK_APPENDDED_NAME, shape=param_and_grad[0].shape, dtype=param_and_grad[0].dtype, default_initializer=ConstantInitializer(value=1.0)) mask_param.stop_gradient = True mask_param.trainable = False asp_info.update_mask_vars(param_and_grad[0].name, mask_param)
[ "def", "_create_mask_variables", "(", "cls", ",", "main_program", ",", "startup_program", ",", "params_and_grads", ")", ":", "asp_info", "=", "cls", ".", "_get_program_asp_info", "(", "main_program", ")", "with", "program_guard", "(", "main_program", ",", "startup_program", ")", ":", "for", "param_and_grad", "in", "params_and_grads", ":", "if", "ASPHelper", ".", "_is_supported_layer", "(", "main_program", ",", "param_and_grad", "[", "0", "]", ".", "name", ")", ":", "mask_param", "=", "layers", ".", "create_parameter", "(", "name", "=", "param_and_grad", "[", "0", "]", ".", "name", "+", "ASPHelper", ".", "MASK_APPENDDED_NAME", ",", "shape", "=", "param_and_grad", "[", "0", "]", ".", "shape", ",", "dtype", "=", "param_and_grad", "[", "0", "]", ".", "dtype", ",", "default_initializer", "=", "ConstantInitializer", "(", "value", "=", "1.0", ")", ")", "mask_param", ".", "stop_gradient", "=", "True", "mask_param", ".", "trainable", "=", "False", "asp_info", ".", "update_mask_vars", "(", "param_and_grad", "[", "0", "]", ".", "name", ",", "mask_param", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/contrib/sparsity/asp.py#L494-L519
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_core.py
python
Image.GetHandlers
(*args, **kwargs)
return _core_.Image_GetHandlers(*args, **kwargs)
GetHandlers() -> PyObject
GetHandlers() -> PyObject
[ "GetHandlers", "()", "-", ">", "PyObject" ]
def GetHandlers(*args, **kwargs): """GetHandlers() -> PyObject""" return _core_.Image_GetHandlers(*args, **kwargs)
[ "def", "GetHandlers", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Image_GetHandlers", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L3628-L3630
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/rsa/rsa/_version133.py
python
jacobi
(a, b)
return result
Calculates the value of the Jacobi symbol (a/b)
Calculates the value of the Jacobi symbol (a/b)
[ "Calculates", "the", "value", "of", "the", "Jacobi", "symbol", "(", "a", "/", "b", ")" ]
def jacobi(a, b): """Calculates the value of the Jacobi symbol (a/b) """ if a % b == 0: return 0 result = 1 while a > 1: if a & 1: if ((a-1)*(b-1) >> 2) & 1: result = -result b, a = a, b % a else: if ((b ** 2 - 1) >> 3) & 1: result = -result a = a >> 1 return result
[ "def", "jacobi", "(", "a", ",", "b", ")", ":", "if", "a", "%", "b", "==", "0", ":", "return", "0", "result", "=", "1", "while", "a", ">", "1", ":", "if", "a", "&", "1", ":", "if", "(", "(", "a", "-", "1", ")", "*", "(", "b", "-", "1", ")", ">>", "2", ")", "&", "1", ":", "result", "=", "-", "result", "b", ",", "a", "=", "a", ",", "b", "%", "a", "else", ":", "if", "(", "(", "b", "**", "2", "-", "1", ")", ">>", "3", ")", "&", "1", ":", "result", "=", "-", "result", "a", "=", "a", ">>", "1", "return", "result" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/rsa/rsa/_version133.py#L142-L158
flexflow/FlexFlow
581fad8ba8d10a16a3102ee2b406b0319586df24
python/flexflow/keras/utils/data_utils.py
python
get_file
(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None)
return fpath
Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. # Arguments fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored). # Returns Path to the downloaded file
Downloads a file from a URL if it not already in the cache.
[ "Downloads", "a", "file", "from", "a", "URL", "if", "it", "not", "already", "in", "the", "cache", "." ]
def get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None): """Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. # Arguments fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored). # Returns Path to the downloaded file """ # noqa if cache_dir is None: if 'KERAS_HOME' in os.environ: cache_dir = os.environ.get('KERAS_HOME') else: cache_dir = os.path.join(os.path.expanduser('~'), '.keras') if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = 'md5' datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join('/tmp', '.keras') datadir = os.path.join(datadir_base, cache_subdir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not validate_file(fpath, file_hash, algorithm=hash_algorithm): print('A local file was found, but it seems to be incomplete' ' or outdated because the {} file hash does not match ' 'the original value of {} so we will re-download the ' 'data.'.format(hash_algorithm, file_hash)) download = True else: download = True if download: print('Downloading data from', origin) class ProgressTracker(object): # Maintain progbar for the lifetime of download. # This design was chosen for Python 2.7 compatibility. progbar = None def dl_progress(count, block_size, total_size): if ProgressTracker.progbar is None: if total_size == -1: total_size = None ProgressTracker.progbar = Progbar(total_size) else: ProgressTracker.progbar.update(count * block_size) error_msg = 'URL fetch failure on {} : {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt): if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format='tar') return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath
[ "def", "get_file", "(", "fname", ",", "origin", ",", "untar", "=", "False", ",", "md5_hash", "=", "None", ",", "file_hash", "=", "None", ",", "cache_subdir", "=", "'datasets'", ",", "hash_algorithm", "=", "'auto'", ",", "extract", "=", "False", ",", "archive_format", "=", "'auto'", ",", "cache_dir", "=", "None", ")", ":", "# noqa", "if", "cache_dir", "is", "None", ":", "if", "'KERAS_HOME'", "in", "os", ".", "environ", ":", "cache_dir", "=", "os", ".", "environ", ".", "get", "(", "'KERAS_HOME'", ")", "else", ":", "cache_dir", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.keras'", ")", "if", "md5_hash", "is", "not", "None", "and", "file_hash", "is", "None", ":", "file_hash", "=", "md5_hash", "hash_algorithm", "=", "'md5'", "datadir_base", "=", "os", ".", "path", ".", "expanduser", "(", "cache_dir", ")", "if", "not", "os", ".", "access", "(", "datadir_base", ",", "os", ".", "W_OK", ")", ":", "datadir_base", "=", "os", ".", "path", ".", "join", "(", "'/tmp'", ",", "'.keras'", ")", "datadir", "=", "os", ".", "path", ".", "join", "(", "datadir_base", ",", "cache_subdir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "datadir", ")", ":", "os", ".", "makedirs", "(", "datadir", ")", "if", "untar", ":", "untar_fpath", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "fname", ")", "fpath", "=", "untar_fpath", "+", "'.tar.gz'", "else", ":", "fpath", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "fname", ")", "download", "=", "False", "if", "os", ".", "path", ".", "exists", "(", "fpath", ")", ":", "# File found; verify integrity if a hash was provided.", "if", "file_hash", "is", "not", "None", ":", "if", "not", "validate_file", "(", "fpath", ",", "file_hash", ",", "algorithm", "=", "hash_algorithm", ")", ":", "print", "(", "'A local file was found, but it seems to be incomplete'", "' or outdated because the {} file hash does not match '", "'the original value of {} so we will re-download the '", "'data.'", ".", "format", "(", "hash_algorithm", ",", "file_hash", ")", ")", "download", "=", "True", "else", ":", "download", "=", "True", "if", "download", ":", "print", "(", "'Downloading data from'", ",", "origin", ")", "class", "ProgressTracker", "(", "object", ")", ":", "# Maintain progbar for the lifetime of download.", "# This design was chosen for Python 2.7 compatibility.", "progbar", "=", "None", "def", "dl_progress", "(", "count", ",", "block_size", ",", "total_size", ")", ":", "if", "ProgressTracker", ".", "progbar", "is", "None", ":", "if", "total_size", "==", "-", "1", ":", "total_size", "=", "None", "ProgressTracker", ".", "progbar", "=", "Progbar", "(", "total_size", ")", "else", ":", "ProgressTracker", ".", "progbar", ".", "update", "(", "count", "*", "block_size", ")", "error_msg", "=", "'URL fetch failure on {} : {} -- {}'", "try", ":", "try", ":", "urlretrieve", "(", "origin", ",", "fpath", ",", "dl_progress", ")", "except", "HTTPError", "as", "e", ":", "raise", "Exception", "(", "error_msg", ".", "format", "(", "origin", ",", "e", ".", "code", ",", "e", ".", "msg", ")", ")", "except", "URLError", "as", "e", ":", "raise", "Exception", "(", "error_msg", ".", "format", "(", "origin", ",", "e", ".", "errno", ",", "e", ".", "reason", ")", ")", "except", "(", "Exception", ",", "KeyboardInterrupt", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "fpath", ")", ":", "os", ".", "remove", "(", "fpath", ")", "raise", "ProgressTracker", ".", "progbar", "=", "None", "if", "untar", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "untar_fpath", ")", ":", "_extract_archive", "(", "fpath", ",", "datadir", ",", "archive_format", "=", "'tar'", ")", "return", "untar_fpath", "if", "extract", ":", "_extract_archive", "(", "fpath", ",", "datadir", ",", "archive_format", ")", "return", "fpath" ]
https://github.com/flexflow/FlexFlow/blob/581fad8ba8d10a16a3102ee2b406b0319586df24/python/flexflow/keras/utils/data_utils.py#L123-L244
apache/incubator-mxnet
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
python/mxnet/symbol/symbol.py
python
histogram
(a, bins=10, range=None, **kwargs)
Compute the histogram of the input data. Parameters ---------- a : NDArray Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. range : (float, float), required if bins is an integer The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()). Values outside the range are ignored. The first element of the range must be less than or equal to the second. range affects the automatic bin computation as well, the range will be equally divided by the number of bins. Returns ------- out : Symbol The created Symbol
Compute the histogram of the input data.
[ "Compute", "the", "histogram", "of", "the", "input", "data", "." ]
def histogram(a, bins=10, range=None, **kwargs): """Compute the histogram of the input data. Parameters ---------- a : NDArray Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. range : (float, float), required if bins is an integer The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()). Values outside the range are ignored. The first element of the range must be less than or equal to the second. range affects the automatic bin computation as well, the range will be equally divided by the number of bins. Returns ------- out : Symbol The created Symbol """ if isinstance(bins, Symbol): return _internal._histogram(data=a, bins=bins, **kwargs) elif isinstance(bins, integer_types): if range is None: raise ValueError("null range is not supported in symbol mode") return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs) raise ValueError("bins argument should be either an integer or an NDArray")
[ "def", "histogram", "(", "a", ",", "bins", "=", "10", ",", "range", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "bins", ",", "Symbol", ")", ":", "return", "_internal", ".", "_histogram", "(", "data", "=", "a", ",", "bins", "=", "bins", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "bins", ",", "integer_types", ")", ":", "if", "range", "is", "None", ":", "raise", "ValueError", "(", "\"null range is not supported in symbol mode\"", ")", "return", "_internal", ".", "_histogram", "(", "data", "=", "a", ",", "bin_cnt", "=", "bins", ",", "range", "=", "range", ",", "*", "*", "kwargs", ")", "raise", "ValueError", "(", "\"bins argument should be either an integer or an NDArray\"", ")" ]
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/symbol/symbol.py#L3243-L3271
tkn-tub/ns3-gym
19bfe0a583e641142609939a090a09dfc63a095f
bindings/python/rad_util.py
python
reverse
(input_string)
return ''.join(str_list)
Reverse a string. Useful for strings of binary numbers. >>> reverse('abc') 'cba'
Reverse a string. Useful for strings of binary numbers.
[ "Reverse", "a", "string", ".", "Useful", "for", "strings", "of", "binary", "numbers", "." ]
def reverse(input_string): """Reverse a string. Useful for strings of binary numbers. >>> reverse('abc') 'cba' """ str_list = list(input_string) str_list.reverse() return ''.join(str_list)
[ "def", "reverse", "(", "input_string", ")", ":", "str_list", "=", "list", "(", "input_string", ")", "str_list", ".", "reverse", "(", ")", "return", "''", ".", "join", "(", "str_list", ")" ]
https://github.com/tkn-tub/ns3-gym/blob/19bfe0a583e641142609939a090a09dfc63a095f/bindings/python/rad_util.py#L133-L142
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/boto/boto/dynamodb2/results.py
python
ResultSet._reset
(self)
Resets the internal state of the ``ResultSet``. This prevents results from being cached long-term & consuming excess memory. Largely internal.
Resets the internal state of the ``ResultSet``.
[ "Resets", "the", "internal", "state", "of", "the", "ResultSet", "." ]
def _reset(self): """ Resets the internal state of the ``ResultSet``. This prevents results from being cached long-term & consuming excess memory. Largely internal. """ self._results = [] self._offset = 0
[ "def", "_reset", "(", "self", ")", ":", "self", ".", "_results", "=", "[", "]", "self", ".", "_offset", "=", "0" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/boto/boto/dynamodb2/results.py#L40-L50
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/arch/isa_parser/isa_parser.py
python
ISAParser.p_keyword_arg_list_1
(self, t)
keyword_arg_list : keyword_arg_list COMMA keyword_arg
keyword_arg_list : keyword_arg_list COMMA keyword_arg
[ "keyword_arg_list", ":", "keyword_arg_list", "COMMA", "keyword_arg" ]
def p_keyword_arg_list_1(self, t): 'keyword_arg_list : keyword_arg_list COMMA keyword_arg' t[0] = t[1] t[0].update(t[3])
[ "def", "p_keyword_arg_list_1", "(", "self", ",", "t", ")", ":", "t", "[", "0", "]", "=", "t", "[", "1", "]", "t", "[", "0", "]", ".", "update", "(", "t", "[", "3", "]", ")" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/arch/isa_parser/isa_parser.py#L1375-L1378
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/_backport/tarfile.py
python
TarInfo.create_pax_header
(self, info, encoding)
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information.
Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information.
[ "Return", "the", "object", "as", "a", "ustar", "header", "block", ".", "If", "it", "cannot", "be", "represented", "this", "way", "prepend", "a", "pax", "extended", "header", "sequence", "with", "supplement", "information", "." ]
def create_pax_header(self, info, encoding): """Return the object as a ustar header block. If it cannot be represented this way, prepend a pax extended header sequence with supplement information. """ info["magic"] = POSIX_MAGIC pax_headers = self.pax_headers.copy() # Test string fields for values that exceed the field length or cannot # be represented in ASCII encoding. for name, hname, length in ( ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), ("uname", "uname", 32), ("gname", "gname", 32)): if hname in pax_headers: # The pax header has priority. continue # Try to encode the string as ASCII. try: info[name].encode("ascii", "strict") except UnicodeEncodeError: pax_headers[hname] = info[name] continue if len(info[name]) > length: pax_headers[hname] = info[name] # Test number fields for values that exceed the field limit or values # that like to be stored as float. for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): if name in pax_headers: # The pax header has priority. Avoid overflow. info[name] = 0 continue val = info[name] if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): pax_headers[name] = str(val) info[name] = 0 # Create a pax extended header if necessary. if pax_headers: buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) else: buf = b"" return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
[ "def", "create_pax_header", "(", "self", ",", "info", ",", "encoding", ")", ":", "info", "[", "\"magic\"", "]", "=", "POSIX_MAGIC", "pax_headers", "=", "self", ".", "pax_headers", ".", "copy", "(", ")", "# Test string fields for values that exceed the field length or cannot", "# be represented in ASCII encoding.", "for", "name", ",", "hname", ",", "length", "in", "(", "(", "\"name\"", ",", "\"path\"", ",", "LENGTH_NAME", ")", ",", "(", "\"linkname\"", ",", "\"linkpath\"", ",", "LENGTH_LINK", ")", ",", "(", "\"uname\"", ",", "\"uname\"", ",", "32", ")", ",", "(", "\"gname\"", ",", "\"gname\"", ",", "32", ")", ")", ":", "if", "hname", "in", "pax_headers", ":", "# The pax header has priority.", "continue", "# Try to encode the string as ASCII.", "try", ":", "info", "[", "name", "]", ".", "encode", "(", "\"ascii\"", ",", "\"strict\"", ")", "except", "UnicodeEncodeError", ":", "pax_headers", "[", "hname", "]", "=", "info", "[", "name", "]", "continue", "if", "len", "(", "info", "[", "name", "]", ")", ">", "length", ":", "pax_headers", "[", "hname", "]", "=", "info", "[", "name", "]", "# Test number fields for values that exceed the field limit or values", "# that like to be stored as float.", "for", "name", ",", "digits", "in", "(", "(", "\"uid\"", ",", "8", ")", ",", "(", "\"gid\"", ",", "8", ")", ",", "(", "\"size\"", ",", "12", ")", ",", "(", "\"mtime\"", ",", "12", ")", ")", ":", "if", "name", "in", "pax_headers", ":", "# The pax header has priority. Avoid overflow.", "info", "[", "name", "]", "=", "0", "continue", "val", "=", "info", "[", "name", "]", "if", "not", "0", "<=", "val", "<", "8", "**", "(", "digits", "-", "1", ")", "or", "isinstance", "(", "val", ",", "float", ")", ":", "pax_headers", "[", "name", "]", "=", "str", "(", "val", ")", "info", "[", "name", "]", "=", "0", "# Create a pax extended header if necessary.", "if", "pax_headers", ":", "buf", "=", "self", ".", "_create_pax_generic_header", "(", "pax_headers", ",", "XHDTYPE", ",", "encoding", ")", "else", ":", "buf", "=", "b\"\"", "return", "buf", "+", "self", ".", "_create_header", "(", "info", ",", "USTAR_FORMAT", ",", "\"ascii\"", ",", "\"replace\"", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/_backport/tarfile.py#L1043-L1090
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/stc.py
python
StyledTextCtrl.ScrollToStart
(*args, **kwargs)
return _stc.StyledTextCtrl_ScrollToStart(*args, **kwargs)
ScrollToStart(self)
ScrollToStart(self)
[ "ScrollToStart", "(", "self", ")" ]
def ScrollToStart(*args, **kwargs): """ScrollToStart(self)""" return _stc.StyledTextCtrl_ScrollToStart(*args, **kwargs)
[ "def", "ScrollToStart", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_ScrollToStart", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L6377-L6379
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/profiler/internal/flops_registry.py
python
_squared_difference_flops
(graph, node)
return _binary_per_element_op_flops(graph, node, ops_per_element=2)
Compute flops for SquaredDifference operation.
Compute flops for SquaredDifference operation.
[ "Compute", "flops", "for", "SquaredDifference", "operation", "." ]
def _squared_difference_flops(graph, node): """Compute flops for SquaredDifference operation.""" return _binary_per_element_op_flops(graph, node, ops_per_element=2)
[ "def", "_squared_difference_flops", "(", "graph", ",", "node", ")", ":", "return", "_binary_per_element_op_flops", "(", "graph", ",", "node", ",", "ops_per_element", "=", "2", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/profiler/internal/flops_registry.py#L232-L234
InsightSoftwareConsortium/ITK
87acfce9a93d928311c38bc371b666b515b9f19d
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/scopedef.py
python
find_declaration
( declarations, decl_type=None, name=None, parent=None, recursive=True, fullname=None)
Returns single declaration that match criteria, defined by developer. If more the one declaration was found None will be returned. For more information about arguments see :class:`match_declaration_t` class. :rtype: matched declaration :class:`declaration_t` or None
Returns single declaration that match criteria, defined by developer. If more the one declaration was found None will be returned.
[ "Returns", "single", "declaration", "that", "match", "criteria", "defined", "by", "developer", ".", "If", "more", "the", "one", "declaration", "was", "found", "None", "will", "be", "returned", "." ]
def find_declaration( declarations, decl_type=None, name=None, parent=None, recursive=True, fullname=None): """ Returns single declaration that match criteria, defined by developer. If more the one declaration was found None will be returned. For more information about arguments see :class:`match_declaration_t` class. :rtype: matched declaration :class:`declaration_t` or None """ decl = find_all_declarations( declarations, decl_type=decl_type, name=name, parent=parent, recursive=recursive, fullname=fullname) if len(decl) == 1: return decl[0]
[ "def", "find_declaration", "(", "declarations", ",", "decl_type", "=", "None", ",", "name", "=", "None", ",", "parent", "=", "None", ",", "recursive", "=", "True", ",", "fullname", "=", "None", ")", ":", "decl", "=", "find_all_declarations", "(", "declarations", ",", "decl_type", "=", "decl_type", ",", "name", "=", "name", ",", "parent", "=", "parent", ",", "recursive", "=", "recursive", ",", "fullname", "=", "fullname", ")", "if", "len", "(", "decl", ")", "==", "1", ":", "return", "decl", "[", "0", "]" ]
https://github.com/InsightSoftwareConsortium/ITK/blob/87acfce9a93d928311c38bc371b666b515b9f19d/Modules/ThirdParty/pygccxml/src/pygccxml/declarations/scopedef.py#L1122-L1148
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/compiler.py
python
AutoJitCUDAKernel.specialize
(self, *args)
return kernel
Compile and bind to the current context a version of this kernel specialized for the given *args*.
Compile and bind to the current context a version of this kernel specialized for the given *args*.
[ "Compile", "and", "bind", "to", "the", "current", "context", "a", "version", "of", "this", "kernel", "specialized", "for", "the", "given", "*", "args", "*", "." ]
def specialize(self, *args): ''' Compile and bind to the current context a version of this kernel specialized for the given *args*. ''' argtypes = tuple( [self.typingctx.resolve_argument_type(a) for a in args]) kernel = self.compile(argtypes) return kernel
[ "def", "specialize", "(", "self", ",", "*", "args", ")", ":", "argtypes", "=", "tuple", "(", "[", "self", ".", "typingctx", ".", "resolve_argument_type", "(", "a", ")", "for", "a", "in", "args", "]", ")", "kernel", "=", "self", ".", "compile", "(", "argtypes", ")", "return", "kernel" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/cuda/compiler.py#L808-L816
simsong/bulk_extractor
738911df22b7066ca9e1662f4131fb44090a4196
python/bulk_extractor_reader.py
python
BulkReport.version
(self)
return self.xmldoc.getElementsByTagName("version")[0].firstChild.wholeText
Returns the version of bulk_extractor that made the file.
Returns the version of bulk_extractor that made the file.
[ "Returns", "the", "version", "of", "bulk_extractor", "that", "made", "the", "file", "." ]
def version(self): """Returns the version of bulk_extractor that made the file.""" return self.xmldoc.getElementsByTagName("version")[0].firstChild.wholeText
[ "def", "version", "(", "self", ")", ":", "return", "self", ".", "xmldoc", ".", "getElementsByTagName", "(", "\"version\"", ")", "[", "0", "]", ".", "firstChild", ".", "wholeText" ]
https://github.com/simsong/bulk_extractor/blob/738911df22b7066ca9e1662f4131fb44090a4196/python/bulk_extractor_reader.py#L227-L229
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/Jinja2/py2/jinja2/utils.py
python
select_autoescape
( enabled_extensions=("html", "htm", "xml"), disabled_extensions=(), default_for_string=True, default=False, )
return autoescape
Intelligently sets the initial value of autoescaping based on the filename of the template. This is the recommended way to configure autoescaping if you do not want to write a custom function yourself. If you want to enable it for all templates created from strings or for all templates with `.html` and `.xml` extensions:: from jinja2 import Environment, select_autoescape env = Environment(autoescape=select_autoescape( enabled_extensions=('html', 'xml'), default_for_string=True, )) Example configuration to turn it on at all times except if the template ends with `.txt`:: from jinja2 import Environment, select_autoescape env = Environment(autoescape=select_autoescape( disabled_extensions=('txt',), default_for_string=True, default=True, )) The `enabled_extensions` is an iterable of all the extensions that autoescaping should be enabled for. Likewise `disabled_extensions` is a list of all templates it should be disabled for. If a template is loaded from a string then the default from `default_for_string` is used. If nothing matches then the initial value of autoescaping is set to the value of `default`. For security reasons this function operates case insensitive. .. versionadded:: 2.9
Intelligently sets the initial value of autoescaping based on the filename of the template. This is the recommended way to configure autoescaping if you do not want to write a custom function yourself.
[ "Intelligently", "sets", "the", "initial", "value", "of", "autoescaping", "based", "on", "the", "filename", "of", "the", "template", ".", "This", "is", "the", "recommended", "way", "to", "configure", "autoescaping", "if", "you", "do", "not", "want", "to", "write", "a", "custom", "function", "yourself", "." ]
def select_autoescape( enabled_extensions=("html", "htm", "xml"), disabled_extensions=(), default_for_string=True, default=False, ): """Intelligently sets the initial value of autoescaping based on the filename of the template. This is the recommended way to configure autoescaping if you do not want to write a custom function yourself. If you want to enable it for all templates created from strings or for all templates with `.html` and `.xml` extensions:: from jinja2 import Environment, select_autoescape env = Environment(autoescape=select_autoescape( enabled_extensions=('html', 'xml'), default_for_string=True, )) Example configuration to turn it on at all times except if the template ends with `.txt`:: from jinja2 import Environment, select_autoescape env = Environment(autoescape=select_autoescape( disabled_extensions=('txt',), default_for_string=True, default=True, )) The `enabled_extensions` is an iterable of all the extensions that autoescaping should be enabled for. Likewise `disabled_extensions` is a list of all templates it should be disabled for. If a template is loaded from a string then the default from `default_for_string` is used. If nothing matches then the initial value of autoescaping is set to the value of `default`. For security reasons this function operates case insensitive. .. versionadded:: 2.9 """ enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions) disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions) def autoescape(template_name): if template_name is None: return default_for_string template_name = template_name.lower() if template_name.endswith(enabled_patterns): return True if template_name.endswith(disabled_patterns): return False return default return autoescape
[ "def", "select_autoescape", "(", "enabled_extensions", "=", "(", "\"html\"", ",", "\"htm\"", ",", "\"xml\"", ")", ",", "disabled_extensions", "=", "(", ")", ",", "default_for_string", "=", "True", ",", "default", "=", "False", ",", ")", ":", "enabled_patterns", "=", "tuple", "(", "\".\"", "+", "x", ".", "lstrip", "(", "\".\"", ")", ".", "lower", "(", ")", "for", "x", "in", "enabled_extensions", ")", "disabled_patterns", "=", "tuple", "(", "\".\"", "+", "x", ".", "lstrip", "(", "\".\"", ")", ".", "lower", "(", ")", "for", "x", "in", "disabled_extensions", ")", "def", "autoescape", "(", "template_name", ")", ":", "if", "template_name", "is", "None", ":", "return", "default_for_string", "template_name", "=", "template_name", ".", "lower", "(", ")", "if", "template_name", ".", "endswith", "(", "enabled_patterns", ")", ":", "return", "True", "if", "template_name", ".", "endswith", "(", "disabled_patterns", ")", ":", "return", "False", "return", "default", "return", "autoescape" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/Jinja2/py2/jinja2/utils.py#L542-L595
smilehao/xlua-framework
a03801538be2b0e92d39332d445b22caca1ef61f
ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/mox.py
python
MockAnything._Replay
(self)
Start replaying expected method calls.
Start replaying expected method calls.
[ "Start", "replaying", "expected", "method", "calls", "." ]
def _Replay(self): """Start replaying expected method calls.""" self._replay_mode = True
[ "def", "_Replay", "(", "self", ")", ":", "self", ".", "_replay_mode", "=", "True" ]
https://github.com/smilehao/xlua-framework/blob/a03801538be2b0e92d39332d445b22caca1ef61f/ConfigData/trunk/tools/protobuf-2.5.0/protobuf-2.5.0/python/mox.py#L326-L329
neopenx/Dragon
0e639a7319035ddc81918bd3df059230436ee0a1
Dragon/python/dragon/core/tensor.py
python
Tensor.GlorotNormal
(self, scale=2.0)
return self.MSRA(scale)
Register as a variable with glorot normal initializer.
Register as a variable with glorot normal initializer.
[ "Register", "as", "a", "variable", "with", "glorot", "normal", "initializer", "." ]
def GlorotNormal(self, scale=2.0): """ Register as a variable with glorot normal initializer. """ return self.MSRA(scale)
[ "def", "GlorotNormal", "(", "self", ",", "scale", "=", "2.0", ")", ":", "return", "self", ".", "MSRA", "(", "scale", ")" ]
https://github.com/neopenx/Dragon/blob/0e639a7319035ddc81918bd3df059230436ee0a1/Dragon/python/dragon/core/tensor.py#L192-L196
aosp-mirror/platform_system_core
eb710bfa72ad6461ab147f77d8873c561efa1010
init/perfboot.py
python
mean
(data)
return float(sum(data)) / len(data)
Calculates the mean value from |data|.
Calculates the mean value from |data|.
[ "Calculates", "the", "mean", "value", "from", "|data|", "." ]
def mean(data): """Calculates the mean value from |data|.""" return float(sum(data)) / len(data)
[ "def", "mean", "(", "data", ")", ":", "return", "float", "(", "sum", "(", "data", ")", ")", "/", "len", "(", "data", ")" ]
https://github.com/aosp-mirror/platform_system_core/blob/eb710bfa72ad6461ab147f77d8873c561efa1010/init/perfboot.py#L336-L338
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py2/setuptools/py27compat.py
python
get_all_headers
(message, key)
return message.get_all(key)
Given an HTTPMessage, return all headers matching a given key.
Given an HTTPMessage, return all headers matching a given key.
[ "Given", "an", "HTTPMessage", "return", "all", "headers", "matching", "a", "given", "key", "." ]
def get_all_headers(message, key): """ Given an HTTPMessage, return all headers matching a given key. """ return message.get_all(key)
[ "def", "get_all_headers", "(", "message", ",", "key", ")", ":", "return", "message", ".", "get_all", "(", "key", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py2/setuptools/py27compat.py#L11-L15
wxWidgets/wxWidgets
2356dda03444674c373cee43a49c5e47c7f172b2
build/tools/builder.py
python
Builder.isAvailable
(self)
return False
Run sanity checks before attempting to build with this format
Run sanity checks before attempting to build with this format
[ "Run", "sanity", "checks", "before", "attempting", "to", "build", "with", "this", "format" ]
def isAvailable(self): """ Run sanity checks before attempting to build with this format """ # Make sure the builder program exists programPath = self.getProgramPath() if os.path.exists(programPath): return True else: # check the PATH for the program # TODO: How do we check if we're in Cygwin? if sys.platform.startswith("win"): result = os.system(self.name) if result == 0: return True dirs = os.environ["PATH"].split(":") for dir in dirs: if os.path.isfile(os.path.join(dir, self.name)): return True else: result = os.system("which %s" % self.name) if result == 0: return True return False
[ "def", "isAvailable", "(", "self", ")", ":", "# Make sure the builder program exists", "programPath", "=", "self", ".", "getProgramPath", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "programPath", ")", ":", "return", "True", "else", ":", "# check the PATH for the program", "# TODO: How do we check if we're in Cygwin?", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "result", "=", "os", ".", "system", "(", "self", ".", "name", ")", "if", "result", "==", "0", ":", "return", "True", "dirs", "=", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "\":\"", ")", "for", "dir", "in", "dirs", ":", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "dir", ",", "self", ".", "name", ")", ")", ":", "return", "True", "else", ":", "result", "=", "os", ".", "system", "(", "\"which %s\"", "%", "self", ".", "name", ")", "if", "result", "==", "0", ":", "return", "True", "return", "False" ]
https://github.com/wxWidgets/wxWidgets/blob/2356dda03444674c373cee43a49c5e47c7f172b2/build/tools/builder.py#L53-L80
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/dateutil/tz/_common.py
python
_tzinfo.fromutc
(self, dt)
return enfold(dt_wall, fold=_fold)
Given a timezone-aware datetime in a given timezone, calculates a timezone-aware datetime in a new timezone. Since this is the one time that we *know* we have an unambiguous datetime object, we take this opportunity to determine whether the datetime is ambiguous and in a "fold" state (e.g. if it's the first occurance, chronologically, of the ambiguous datetime). :param dt: A timezone-aware :class:`datetime.datetime` object.
Given a timezone-aware datetime in a given timezone, calculates a timezone-aware datetime in a new timezone.
[ "Given", "a", "timezone", "-", "aware", "datetime", "in", "a", "given", "timezone", "calculates", "a", "timezone", "-", "aware", "datetime", "in", "a", "new", "timezone", "." ]
def fromutc(self, dt): """ Given a timezone-aware datetime in a given timezone, calculates a timezone-aware datetime in a new timezone. Since this is the one time that we *know* we have an unambiguous datetime object, we take this opportunity to determine whether the datetime is ambiguous and in a "fold" state (e.g. if it's the first occurance, chronologically, of the ambiguous datetime). :param dt: A timezone-aware :class:`datetime.datetime` object. """ dt_wall = self._fromutc(dt) # Calculate the fold status given the two datetimes. _fold = self._fold_status(dt, dt_wall) # Set the default fold value for ambiguous dates return enfold(dt_wall, fold=_fold)
[ "def", "fromutc", "(", "self", ",", "dt", ")", ":", "dt_wall", "=", "self", ".", "_fromutc", "(", "dt", ")", "# Calculate the fold status given the two datetimes.", "_fold", "=", "self", ".", "_fold_status", "(", "dt", ",", "dt_wall", ")", "# Set the default fold value for ambiguous dates", "return", "enfold", "(", "dt_wall", ",", "fold", "=", "_fold", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/dateutil/tz/_common.py#L211-L230
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/models/image/alexnet/alexnet_benchmark.py
python
inference
(images)
return pool5, parameters
Build the AlexNet model. Args: images: Images Tensor Returns: pool5: the last Tensor in the convolutional component of AlexNet. parameters: a list of Tensors corresponding to the weights and biases of the AlexNet model.
Build the AlexNet model.
[ "Build", "the", "AlexNet", "model", "." ]
def inference(images): """Build the AlexNet model. Args: images: Images Tensor Returns: pool5: the last Tensor in the convolutional component of AlexNet. parameters: a list of Tensors corresponding to the weights and biases of the AlexNet model. """ parameters = [] # conv1 with tf.name_scope('conv1') as scope: kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope) print_activations(conv1) parameters += [kernel, biases] # lrn1 # TODO(shlens, jiayq): Add a GPU version of local response normalization. # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1') print_activations(pool1) # conv2 with tf.name_scope('conv2') as scope: kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope) parameters += [kernel, biases] print_activations(conv2) # pool2 pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool2') print_activations(pool2) # conv3 with tf.name_scope('conv3') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv3 = tf.nn.relu(bias, name=scope) parameters += [kernel, biases] print_activations(conv3) # conv4 with tf.name_scope('conv4') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv4 = tf.nn.relu(bias, name=scope) parameters += [kernel, biases] print_activations(conv4) # conv5 with tf.name_scope('conv5') as scope: kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32, stddev=1e-1), name='weights') conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases) conv5 = tf.nn.relu(bias, name=scope) parameters += [kernel, biases] print_activations(conv5) # pool5 pool5 = tf.nn.max_pool(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool5') print_activations(pool5) return pool5, parameters
[ "def", "inference", "(", "images", ")", ":", "parameters", "=", "[", "]", "# conv1", "with", "tf", ".", "name_scope", "(", "'conv1'", ")", "as", "scope", ":", "kernel", "=", "tf", ".", "Variable", "(", "tf", ".", "truncated_normal", "(", "[", "11", ",", "11", ",", "3", ",", "64", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "stddev", "=", "1e-1", ")", ",", "name", "=", "'weights'", ")", "conv", "=", "tf", ".", "nn", ".", "conv2d", "(", "images", ",", "kernel", ",", "[", "1", ",", "4", ",", "4", ",", "1", "]", ",", "padding", "=", "'SAME'", ")", "biases", "=", "tf", ".", "Variable", "(", "tf", ".", "constant", "(", "0.0", ",", "shape", "=", "[", "64", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "trainable", "=", "True", ",", "name", "=", "'biases'", ")", "bias", "=", "tf", ".", "nn", ".", "bias_add", "(", "conv", ",", "biases", ")", "conv1", "=", "tf", ".", "nn", ".", "relu", "(", "bias", ",", "name", "=", "scope", ")", "print_activations", "(", "conv1", ")", "parameters", "+=", "[", "kernel", ",", "biases", "]", "# lrn1", "# TODO(shlens, jiayq): Add a GPU version of local response normalization.", "# pool1", "pool1", "=", "tf", ".", "nn", ".", "max_pool", "(", "conv1", ",", "ksize", "=", "[", "1", ",", "3", ",", "3", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "2", ",", "2", ",", "1", "]", ",", "padding", "=", "'VALID'", ",", "name", "=", "'pool1'", ")", "print_activations", "(", "pool1", ")", "# conv2", "with", "tf", ".", "name_scope", "(", "'conv2'", ")", "as", "scope", ":", "kernel", "=", "tf", ".", "Variable", "(", "tf", ".", "truncated_normal", "(", "[", "5", ",", "5", ",", "64", ",", "192", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "stddev", "=", "1e-1", ")", ",", "name", "=", "'weights'", ")", "conv", "=", "tf", ".", "nn", ".", "conv2d", "(", "pool1", ",", "kernel", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "padding", "=", "'SAME'", ")", "biases", "=", "tf", ".", "Variable", "(", "tf", ".", "constant", "(", "0.0", ",", "shape", "=", "[", "192", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "trainable", "=", "True", ",", "name", "=", "'biases'", ")", "bias", "=", "tf", ".", "nn", ".", "bias_add", "(", "conv", ",", "biases", ")", "conv2", "=", "tf", ".", "nn", ".", "relu", "(", "bias", ",", "name", "=", "scope", ")", "parameters", "+=", "[", "kernel", ",", "biases", "]", "print_activations", "(", "conv2", ")", "# pool2", "pool2", "=", "tf", ".", "nn", ".", "max_pool", "(", "conv2", ",", "ksize", "=", "[", "1", ",", "3", ",", "3", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "2", ",", "2", ",", "1", "]", ",", "padding", "=", "'VALID'", ",", "name", "=", "'pool2'", ")", "print_activations", "(", "pool2", ")", "# conv3", "with", "tf", ".", "name_scope", "(", "'conv3'", ")", "as", "scope", ":", "kernel", "=", "tf", ".", "Variable", "(", "tf", ".", "truncated_normal", "(", "[", "3", ",", "3", ",", "192", ",", "384", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "stddev", "=", "1e-1", ")", ",", "name", "=", "'weights'", ")", "conv", "=", "tf", ".", "nn", ".", "conv2d", "(", "pool2", ",", "kernel", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "padding", "=", "'SAME'", ")", "biases", "=", "tf", ".", "Variable", "(", "tf", ".", "constant", "(", "0.0", ",", "shape", "=", "[", "384", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "trainable", "=", "True", ",", "name", "=", "'biases'", ")", "bias", "=", "tf", ".", "nn", ".", "bias_add", "(", "conv", ",", "biases", ")", "conv3", "=", "tf", ".", "nn", ".", "relu", "(", "bias", ",", "name", "=", "scope", ")", "parameters", "+=", "[", "kernel", ",", "biases", "]", "print_activations", "(", "conv3", ")", "# conv4", "with", "tf", ".", "name_scope", "(", "'conv4'", ")", "as", "scope", ":", "kernel", "=", "tf", ".", "Variable", "(", "tf", ".", "truncated_normal", "(", "[", "3", ",", "3", ",", "384", ",", "256", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "stddev", "=", "1e-1", ")", ",", "name", "=", "'weights'", ")", "conv", "=", "tf", ".", "nn", ".", "conv2d", "(", "conv3", ",", "kernel", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "padding", "=", "'SAME'", ")", "biases", "=", "tf", ".", "Variable", "(", "tf", ".", "constant", "(", "0.0", ",", "shape", "=", "[", "256", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "trainable", "=", "True", ",", "name", "=", "'biases'", ")", "bias", "=", "tf", ".", "nn", ".", "bias_add", "(", "conv", ",", "biases", ")", "conv4", "=", "tf", ".", "nn", ".", "relu", "(", "bias", ",", "name", "=", "scope", ")", "parameters", "+=", "[", "kernel", ",", "biases", "]", "print_activations", "(", "conv4", ")", "# conv5", "with", "tf", ".", "name_scope", "(", "'conv5'", ")", "as", "scope", ":", "kernel", "=", "tf", ".", "Variable", "(", "tf", ".", "truncated_normal", "(", "[", "3", ",", "3", ",", "256", ",", "256", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "stddev", "=", "1e-1", ")", ",", "name", "=", "'weights'", ")", "conv", "=", "tf", ".", "nn", ".", "conv2d", "(", "conv4", ",", "kernel", ",", "[", "1", ",", "1", ",", "1", ",", "1", "]", ",", "padding", "=", "'SAME'", ")", "biases", "=", "tf", ".", "Variable", "(", "tf", ".", "constant", "(", "0.0", ",", "shape", "=", "[", "256", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "trainable", "=", "True", ",", "name", "=", "'biases'", ")", "bias", "=", "tf", ".", "nn", ".", "bias_add", "(", "conv", ",", "biases", ")", "conv5", "=", "tf", ".", "nn", ".", "relu", "(", "bias", ",", "name", "=", "scope", ")", "parameters", "+=", "[", "kernel", ",", "biases", "]", "print_activations", "(", "conv5", ")", "# pool5", "pool5", "=", "tf", ".", "nn", ".", "max_pool", "(", "conv5", ",", "ksize", "=", "[", "1", ",", "3", ",", "3", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "2", ",", "2", ",", "1", "]", ",", "padding", "=", "'VALID'", ",", "name", "=", "'pool5'", ")", "print_activations", "(", "pool5", ")", "return", "pool5", ",", "parameters" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/models/image/alexnet/alexnet_benchmark.py#L56-L158
apache/arrow
af33dd1157eb8d7d9bfac25ebf61445b793b7943
cpp/build-support/cpplint.py
python
Error
(filename, linenum, category, confidence, message)
Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message.
Logs the fact we've found a lint error.
[ "Logs", "the", "fact", "we", "ve", "found", "a", "lint", "error", "." ]
def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': _cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'junit': _cpplint_state.AddJUnitFailure(filename, linenum, message, category, confidence) else: final_message = '%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence) sys.stderr.write(final_message)
[ "def", "Error", "(", "filename", ",", "linenum", ",", "category", ",", "confidence", ",", "message", ")", ":", "if", "_ShouldPrintError", "(", "category", ",", "confidence", ",", "linenum", ")", ":", "_cpplint_state", ".", "IncrementErrorCount", "(", "category", ")", "if", "_cpplint_state", ".", "output_format", "==", "'vs7'", ":", "_cpplint_state", ".", "PrintError", "(", "'%s(%s): warning: %s [%s] [%d]\\n'", "%", "(", "filename", ",", "linenum", ",", "message", ",", "category", ",", "confidence", ")", ")", "elif", "_cpplint_state", ".", "output_format", "==", "'eclipse'", ":", "sys", ".", "stderr", ".", "write", "(", "'%s:%s: warning: %s [%s] [%d]\\n'", "%", "(", "filename", ",", "linenum", ",", "message", ",", "category", ",", "confidence", ")", ")", "elif", "_cpplint_state", ".", "output_format", "==", "'junit'", ":", "_cpplint_state", ".", "AddJUnitFailure", "(", "filename", ",", "linenum", ",", "message", ",", "category", ",", "confidence", ")", "else", ":", "final_message", "=", "'%s:%s: %s [%s] [%d]\\n'", "%", "(", "filename", ",", "linenum", ",", "message", ",", "category", ",", "confidence", ")", "sys", ".", "stderr", ".", "write", "(", "final_message", ")" ]
https://github.com/apache/arrow/blob/af33dd1157eb8d7d9bfac25ebf61445b793b7943/cpp/build-support/cpplint.py#L1383-L1419
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/urllib3/packages/ordered_dict.py
python
OrderedDict.__delitem__
(self, key, dict_delitem=dict.__delitem__)
od.__delitem__(y) <==> del od[y]
od.__delitem__(y) <==> del od[y]
[ "od", ".", "__delitem__", "(", "y", ")", "<", "==", ">", "del", "od", "[", "y", "]" ]
def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev
[ "def", "__delitem__", "(", "self", ",", "key", ",", "dict_delitem", "=", "dict", ".", "__delitem__", ")", ":", "# Deleting an existing item uses self.__map to find the link which is", "# then removed by updating the links in the predecessor and successor nodes.", "dict_delitem", "(", "self", ",", "key", ")", "link_prev", ",", "link_next", ",", "key", "=", "self", ".", "__map", ".", "pop", "(", "key", ")", "link_prev", "[", "1", "]", "=", "link_next", "link_next", "[", "0", "]", "=", "link_prev" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/urllib3/packages/ordered_dict.py#L54-L61
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py
python
_make_dense_split
(quantile_accumulator_handle, stats_accumulator_handle, stamp_token, next_stamp_token, multiclass_strategy, class_id, feature_column_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, is_multi_dimentional, loss_uses_sum_reduction, weak_learner_type)
return are_splits_ready, partition_ids, gains, split_infos
Function that builds splits for a dense feature column.
Function that builds splits for a dense feature column.
[ "Function", "that", "builds", "splits", "for", "a", "dense", "feature", "column", "." ]
def _make_dense_split(quantile_accumulator_handle, stats_accumulator_handle, stamp_token, next_stamp_token, multiclass_strategy, class_id, feature_column_id, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, is_multi_dimentional, loss_uses_sum_reduction, weak_learner_type): """Function that builds splits for a dense feature column.""" # Get the bucket boundaries are_splits_ready, buckets = ( gen_quantile_ops.quantile_accumulator_get_buckets( quantile_accumulator_handles=[quantile_accumulator_handle], stamp_token=stamp_token)) # quantile_accumulator_get_buckets returns a list of results per handle that # we pass to it. In this case we're getting results just for one resource. are_splits_ready = are_splits_ready[0] buckets = buckets[0] # After we receive the boundaries from previous iteration we can flush # the quantile accumulator. with ops.control_dependencies([buckets]): flush_quantiles = gen_quantile_ops.quantile_accumulator_flush( quantile_accumulator_handle=quantile_accumulator_handle, stamp_token=stamp_token, next_stamp_token=next_stamp_token) if is_multi_dimentional: num_minibatches, partition_ids, bucket_ids, gradients, hessians = ( gen_stats_accumulator_ops.stats_accumulator_tensor_flush( stats_accumulator_handle, stamp_token, next_stamp_token)) else: num_minibatches, partition_ids, bucket_ids, gradients, hessians = ( gen_stats_accumulator_ops.stats_accumulator_scalar_flush( stats_accumulator_handle, stamp_token, next_stamp_token)) # For sum_reduction, we don't need to divide by number of minibatches. num_minibatches = control_flow_ops.cond( loss_uses_sum_reduction, lambda: math_ops.cast(1, dtypes.int64), lambda: num_minibatches) # Put quantile and stats accumulator flushing in the dependency path. with ops.control_dependencies([flush_quantiles, partition_ids]): are_splits_ready = array_ops.identity(are_splits_ready) partition_ids, gains, split_infos = ( split_handler_ops.build_dense_inequality_splits( num_minibatches=num_minibatches, bucket_boundaries=buckets, partition_ids=partition_ids, bucket_ids=bucket_ids, gradients=gradients, hessians=hessians, class_id=class_id, feature_column_group_id=feature_column_id, l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, multiclass_strategy=multiclass_strategy, weak_learner_type=weak_learner_type)) return are_splits_ready, partition_ids, gains, split_infos
[ "def", "_make_dense_split", "(", "quantile_accumulator_handle", ",", "stats_accumulator_handle", ",", "stamp_token", ",", "next_stamp_token", ",", "multiclass_strategy", ",", "class_id", ",", "feature_column_id", ",", "l1_regularization", ",", "l2_regularization", ",", "tree_complexity_regularization", ",", "min_node_weight", ",", "is_multi_dimentional", ",", "loss_uses_sum_reduction", ",", "weak_learner_type", ")", ":", "# Get the bucket boundaries", "are_splits_ready", ",", "buckets", "=", "(", "gen_quantile_ops", ".", "quantile_accumulator_get_buckets", "(", "quantile_accumulator_handles", "=", "[", "quantile_accumulator_handle", "]", ",", "stamp_token", "=", "stamp_token", ")", ")", "# quantile_accumulator_get_buckets returns a list of results per handle that", "# we pass to it. In this case we're getting results just for one resource.", "are_splits_ready", "=", "are_splits_ready", "[", "0", "]", "buckets", "=", "buckets", "[", "0", "]", "# After we receive the boundaries from previous iteration we can flush", "# the quantile accumulator.", "with", "ops", ".", "control_dependencies", "(", "[", "buckets", "]", ")", ":", "flush_quantiles", "=", "gen_quantile_ops", ".", "quantile_accumulator_flush", "(", "quantile_accumulator_handle", "=", "quantile_accumulator_handle", ",", "stamp_token", "=", "stamp_token", ",", "next_stamp_token", "=", "next_stamp_token", ")", "if", "is_multi_dimentional", ":", "num_minibatches", ",", "partition_ids", ",", "bucket_ids", ",", "gradients", ",", "hessians", "=", "(", "gen_stats_accumulator_ops", ".", "stats_accumulator_tensor_flush", "(", "stats_accumulator_handle", ",", "stamp_token", ",", "next_stamp_token", ")", ")", "else", ":", "num_minibatches", ",", "partition_ids", ",", "bucket_ids", ",", "gradients", ",", "hessians", "=", "(", "gen_stats_accumulator_ops", ".", "stats_accumulator_scalar_flush", "(", "stats_accumulator_handle", ",", "stamp_token", ",", "next_stamp_token", ")", ")", "# For sum_reduction, we don't need to divide by number of minibatches.", "num_minibatches", "=", "control_flow_ops", ".", "cond", "(", "loss_uses_sum_reduction", ",", "lambda", ":", "math_ops", ".", "cast", "(", "1", ",", "dtypes", ".", "int64", ")", ",", "lambda", ":", "num_minibatches", ")", "# Put quantile and stats accumulator flushing in the dependency path.", "with", "ops", ".", "control_dependencies", "(", "[", "flush_quantiles", ",", "partition_ids", "]", ")", ":", "are_splits_ready", "=", "array_ops", ".", "identity", "(", "are_splits_ready", ")", "partition_ids", ",", "gains", ",", "split_infos", "=", "(", "split_handler_ops", ".", "build_dense_inequality_splits", "(", "num_minibatches", "=", "num_minibatches", ",", "bucket_boundaries", "=", "buckets", ",", "partition_ids", "=", "partition_ids", ",", "bucket_ids", "=", "bucket_ids", ",", "gradients", "=", "gradients", ",", "hessians", "=", "hessians", ",", "class_id", "=", "class_id", ",", "feature_column_group_id", "=", "feature_column_id", ",", "l1_regularization", "=", "l1_regularization", ",", "l2_regularization", "=", "l2_regularization", ",", "tree_complexity_regularization", "=", "tree_complexity_regularization", ",", "min_node_weight", "=", "min_node_weight", ",", "multiclass_strategy", "=", "multiclass_strategy", ",", "weak_learner_type", "=", "weak_learner_type", ")", ")", "return", "are_splits_ready", ",", "partition_ids", ",", "gains", ",", "split_infos" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py#L279-L336
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/ops/control_flow_ops.py
python
GradLoopState.grad_sync
(self)
return self._grad_sync
A control trigger node for synchronization in the grad loop. One main use is to keep the pop ops of a stack executed in the iteration order.
A control trigger node for synchronization in the grad loop.
[ "A", "control", "trigger", "node", "for", "synchronization", "in", "the", "grad", "loop", "." ]
def grad_sync(self): """A control trigger node for synchronization in the grad loop. One main use is to keep the pop ops of a stack executed in the iteration order. """ if self._grad_sync is None: with ops.control_dependencies(None): self._grad_sync = control_trigger(name="b_sync") self._grad_sync._set_control_flow_context(self._grad_context) self._grad_index.op._add_control_input(self._grad_sync) return self._grad_sync
[ "def", "grad_sync", "(", "self", ")", ":", "if", "self", ".", "_grad_sync", "is", "None", ":", "with", "ops", ".", "control_dependencies", "(", "None", ")", ":", "self", ".", "_grad_sync", "=", "control_trigger", "(", "name", "=", "\"b_sync\"", ")", "self", ".", "_grad_sync", ".", "_set_control_flow_context", "(", "self", ".", "_grad_context", ")", "self", ".", "_grad_index", ".", "op", ".", "_add_control_input", "(", "self", ".", "_grad_sync", ")", "return", "self", ".", "_grad_sync" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/ops/control_flow_ops.py#L780-L791
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_gdi.py
python
GraphicsContext.PushState
(*args, **kwargs)
return _gdi_.GraphicsContext_PushState(*args, **kwargs)
PushState(self) Push the current state of the context, (ie the transformation matrix) on a stack
PushState(self)
[ "PushState", "(", "self", ")" ]
def PushState(*args, **kwargs): """ PushState(self) Push the current state of the context, (ie the transformation matrix) on a stack """ return _gdi_.GraphicsContext_PushState(*args, **kwargs)
[ "def", "PushState", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "GraphicsContext_PushState", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L6318-L6325
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/io/json/normalize.py
python
nested_to_record
(ds, prefix="", sep=".", level=0)
return new_ds
A simplified json_normalize. Converts a nested dict into a flat dict ("record"), unlike json_normalize, it does not attempt to extract a subset of the data. Parameters ---------- ds : dict or list of dicts prefix: the prefix, optional, default: "" sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 level: the number of levels in the jason string, optional, default: 0 Returns ------- d - dict or list of dicts, matching `ds` Examples -------- IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2), nested=dict(e=dict(c=1,d=2),d=2))) Out[52]: {'dict1.c': 1, 'dict1.d': 2, 'flat1': 1, 'nested.d': 2, 'nested.e.c': 1, 'nested.e.d': 2}
A simplified json_normalize.
[ "A", "simplified", "json_normalize", "." ]
def nested_to_record(ds, prefix="", sep=".", level=0): """ A simplified json_normalize. Converts a nested dict into a flat dict ("record"), unlike json_normalize, it does not attempt to extract a subset of the data. Parameters ---------- ds : dict or list of dicts prefix: the prefix, optional, default: "" sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 level: the number of levels in the jason string, optional, default: 0 Returns ------- d - dict or list of dicts, matching `ds` Examples -------- IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2), nested=dict(e=dict(c=1,d=2),d=2))) Out[52]: {'dict1.c': 1, 'dict1.d': 2, 'flat1': 1, 'nested.d': 2, 'nested.e.c': 1, 'nested.e.d': 2} """ singleton = False if isinstance(ds, dict): ds = [ds] singleton = True new_ds = [] for d in ds: new_d = copy.deepcopy(d) for k, v in d.items(): # each key gets renamed with prefix if not isinstance(k, compat.string_types): k = str(k) if level == 0: newkey = k else: newkey = prefix + sep + k # only dicts gets recurse-flattend # only at level>1 do we rename the rest of the keys if not isinstance(v, dict): if level != 0: # so we skip copying for top level, common case v = new_d.pop(k) new_d[newkey] = v continue else: v = new_d.pop(k) new_d.update(nested_to_record(v, newkey, sep, level + 1)) new_ds.append(new_d) if singleton: return new_ds[0] return new_ds
[ "def", "nested_to_record", "(", "ds", ",", "prefix", "=", "\"\"", ",", "sep", "=", "\".\"", ",", "level", "=", "0", ")", ":", "singleton", "=", "False", "if", "isinstance", "(", "ds", ",", "dict", ")", ":", "ds", "=", "[", "ds", "]", "singleton", "=", "True", "new_ds", "=", "[", "]", "for", "d", "in", "ds", ":", "new_d", "=", "copy", ".", "deepcopy", "(", "d", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "# each key gets renamed with prefix", "if", "not", "isinstance", "(", "k", ",", "compat", ".", "string_types", ")", ":", "k", "=", "str", "(", "k", ")", "if", "level", "==", "0", ":", "newkey", "=", "k", "else", ":", "newkey", "=", "prefix", "+", "sep", "+", "k", "# only dicts gets recurse-flattend", "# only at level>1 do we rename the rest of the keys", "if", "not", "isinstance", "(", "v", ",", "dict", ")", ":", "if", "level", "!=", "0", ":", "# so we skip copying for top level, common case", "v", "=", "new_d", ".", "pop", "(", "k", ")", "new_d", "[", "newkey", "]", "=", "v", "continue", "else", ":", "v", "=", "new_d", ".", "pop", "(", "k", ")", "new_d", ".", "update", "(", "nested_to_record", "(", "v", ",", "newkey", ",", "sep", ",", "level", "+", "1", ")", ")", "new_ds", ".", "append", "(", "new_d", ")", "if", "singleton", ":", "return", "new_ds", "[", "0", "]", "return", "new_ds" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/io/json/normalize.py#L28-L96
cvmfs/cvmfs
4637bdb5153178eadf885c1acf37bdc5c685bf8a
cpplint.py
python
CheckTrailingSemicolon
(filename, clean_lines, linenum, error)
Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Looks for redundant trailing semicolon.
[ "Looks", "for", "redundant", "trailing", "semicolon", "." ]
def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs: closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }")
[ "def", "CheckTrailingSemicolon", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Block bodies should not be followed by a semicolon. Due to C++11", "# brace initialization, there are more places where semicolons are", "# required than not, so we use a whitelist approach to check these", "# rather than a blacklist. These are the places where \"};\" should", "# be replaced by just \"}\":", "# 1. Some flavor of block following closing parenthesis:", "# for (;;) {};", "# while (...) {};", "# switch (...) {};", "# Function(...) {};", "# if (...) {};", "# if (...) else if (...) {};", "#", "# 2. else block:", "# if (...) else {};", "#", "# 3. const member function:", "# Function(...) const {};", "#", "# 4. Block following some statement:", "# x = 42;", "# {};", "#", "# 5. Block at the beginning of a function:", "# Function(...) {", "# {};", "# }", "#", "# Note that naively checking for the preceding \"{\" will also match", "# braces inside multi-dimensional arrays, but this is fine since", "# that expression will not contain semicolons.", "#", "# 6. Block following another block:", "# while (true) {}", "# {};", "#", "# 7. End of namespaces:", "# namespace {};", "#", "# These semicolons seems far more common than other kinds of", "# redundant semicolons, possibly due to people converting classes", "# to namespaces. For now we do not warn for this case.", "#", "# Try matching case 1 first.", "match", "=", "Match", "(", "r'^(.*\\)\\s*)\\{'", ",", "line", ")", "if", "match", ":", "# Matched closing parenthesis (case 1). Check the token before the", "# matching opening parenthesis, and don't warn if it looks like a", "# macro. This avoids these false positives:", "# - macro that defines a base class", "# - multi-line macro that defines a base class", "# - macro that defines the whole class-head", "#", "# But we still issue warnings for macros that we know are safe to", "# warn, specifically:", "# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P", "# - TYPED_TEST", "# - INTERFACE_DEF", "# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:", "#", "# We implement a whitelist of safe macros instead of a blacklist of", "# unsafe macros, even though the latter appears less frequently in", "# google code and would have been easier to implement. This is because", "# the downside for getting the whitelist wrong means some extra", "# semicolons, while the downside for getting the blacklist wrong", "# would result in compile errors.", "#", "# In addition to macros, we also don't want to warn on", "# - Compound literals", "# - Lambdas", "# - alignas specifier with anonymous structs:", "closing_brace_pos", "=", "match", ".", "group", "(", "1", ")", ".", "rfind", "(", "')'", ")", "opening_parenthesis", "=", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "closing_brace_pos", ")", "if", "opening_parenthesis", "[", "2", "]", ">", "-", "1", ":", "line_prefix", "=", "opening_parenthesis", "[", "0", "]", "[", "0", ":", "opening_parenthesis", "[", "2", "]", "]", "macro", "=", "Search", "(", "r'\\b([A-Z_]+)\\s*$'", ",", "line_prefix", ")", "func", "=", "Match", "(", "r'^(.*\\])\\s*$'", ",", "line_prefix", ")", "if", "(", "(", "macro", "and", "macro", ".", "group", "(", "1", ")", "not", "in", "(", "'TEST'", ",", "'TEST_F'", ",", "'MATCHER'", ",", "'MATCHER_P'", ",", "'TYPED_TEST'", ",", "'EXCLUSIVE_LOCKS_REQUIRED'", ",", "'SHARED_LOCKS_REQUIRED'", ",", "'LOCKS_EXCLUDED'", ",", "'INTERFACE_DEF'", ")", ")", "or", "(", "func", "and", "not", "Search", "(", "r'\\boperator\\s*\\[\\s*\\]'", ",", "func", ".", "group", "(", "1", ")", ")", ")", "or", "Search", "(", "r'\\b(?:struct|union)\\s+alignas\\s*$'", ",", "line_prefix", ")", "or", "Search", "(", "r'\\s+=\\s*$'", ",", "line_prefix", ")", ")", ":", "match", "=", "None", "if", "(", "match", "and", "opening_parenthesis", "[", "1", "]", ">", "1", "and", "Search", "(", "r'\\]\\s*$'", ",", "clean_lines", ".", "elided", "[", "opening_parenthesis", "[", "1", "]", "-", "1", "]", ")", ")", ":", "# Multi-line lambda-expression", "match", "=", "None", "else", ":", "# Try matching cases 2-3.", "match", "=", "Match", "(", "r'^(.*(?:else|\\)\\s*const)\\s*)\\{'", ",", "line", ")", "if", "not", "match", ":", "# Try matching cases 4-6. These are always matched on separate lines.", "#", "# Note that we can't simply concatenate the previous line to the", "# current line and do a single match, otherwise we may output", "# duplicate warnings for the blank line case:", "# if (cond) {", "# // blank line", "# }", "prevline", "=", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", "if", "prevline", "and", "Search", "(", "r'[;{}]\\s*$'", ",", "prevline", ")", ":", "match", "=", "Match", "(", "r'^(\\s*)\\{'", ",", "line", ")", "# Check matching closing brace", "if", "match", ":", "(", "endline", ",", "endlinenum", ",", "endpos", ")", "=", "CloseExpression", "(", "clean_lines", ",", "linenum", ",", "len", "(", "match", ".", "group", "(", "1", ")", ")", ")", "if", "endpos", ">", "-", "1", "and", "Match", "(", "r'^\\s*;'", ",", "endline", "[", "endpos", ":", "]", ")", ":", "# Current {} pair is eligible for semicolon check, and we have found", "# the redundant semicolon, output warning here.", "#", "# Note: because we are scanning forward for opening braces, and", "# outputting warnings for the matching closing brace, if there are", "# nested blocks with trailing semicolons, we will get the error", "# messages in reversed order.", "error", "(", "filename", ",", "endlinenum", ",", "'readability/braces'", ",", "4", ",", "\"You don't need a ; after a }\"", ")" ]
https://github.com/cvmfs/cvmfs/blob/4637bdb5153178eadf885c1acf37bdc5c685bf8a/cpplint.py#L4009-L4143
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/html/parser.py
python
HTMLParser.reset
(self)
Reset this instance. Loses all unprocessed data.
Reset this instance. Loses all unprocessed data.
[ "Reset", "this", "instance", ".", "Loses", "all", "unprocessed", "data", "." ]
def reset(self): """Reset this instance. Loses all unprocessed data.""" self.rawdata = '' self.lasttag = '???' self.interesting = interesting_normal self.cdata_elem = None _markupbase.ParserBase.reset(self)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "rawdata", "=", "''", "self", ".", "lasttag", "=", "'???'", "self", ".", "interesting", "=", "interesting_normal", "self", ".", "cdata_elem", "=", "None", "_markupbase", ".", "ParserBase", ".", "reset", "(", "self", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/html/parser.py#L95-L101
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/ConfigSet.py
python
ConfigSet.__contains__
(self, key)
Enable the *in* syntax:: if 'foo' in env: print(env['foo'])
Enable the *in* syntax::
[ "Enable", "the", "*", "in", "*", "syntax", "::" ]
def __contains__(self, key): """ Enable the *in* syntax:: if 'foo' in env: print(env['foo']) """ if key in self.table: return True try: return self.parent.__contains__(key) except AttributeError: return False
[ "def", "__contains__", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "table", ":", "return", "True", "try", ":", "return", "self", ".", "parent", ".", "__contains__", "(", "key", ")", "except", "AttributeError", ":", "return", "False" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/ConfigSet.py#L40-L49
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/distutils/msvc9compiler.py
python
Reg.read_values
(cls, base, key)
return d
Return dict of registry keys and values. All names are converted to lowercase.
Return dict of registry keys and values.
[ "Return", "dict", "of", "registry", "keys", "and", "values", "." ]
def read_values(cls, base, key): """Return dict of registry keys and values. All names are converted to lowercase. """ try: handle = RegOpenKeyEx(base, key) except RegError: return None d = {} i = 0 while True: try: name, value, type = RegEnumValue(handle, i) except RegError: break name = name.lower() d[cls.convert_mbcs(name)] = cls.convert_mbcs(value) i += 1 return d
[ "def", "read_values", "(", "cls", ",", "base", ",", "key", ")", ":", "try", ":", "handle", "=", "RegOpenKeyEx", "(", "base", ",", "key", ")", "except", "RegError", ":", "return", "None", "d", "=", "{", "}", "i", "=", "0", "while", "True", ":", "try", ":", "name", ",", "value", ",", "type", "=", "RegEnumValue", "(", "handle", ",", "i", ")", "except", "RegError", ":", "break", "name", "=", "name", ".", "lower", "(", ")", "d", "[", "cls", ".", "convert_mbcs", "(", "name", ")", "]", "=", "cls", ".", "convert_mbcs", "(", "value", ")", "i", "+=", "1", "return", "d" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/distutils/msvc9compiler.py#L84-L103
cvxpy/cvxpy
5165b4fb750dfd237de8659383ef24b4b2e33aaf
cvxpy/reductions/solvers/conic_solvers/scipy_conif.py
python
SCIPY.import_solver
(self)
Imports the solver.
Imports the solver.
[ "Imports", "the", "solver", "." ]
def import_solver(self) -> None: """Imports the solver. """ from scipy import optimize as opt opt
[ "def", "import_solver", "(", "self", ")", "->", "None", ":", "from", "scipy", "import", "optimize", "as", "opt", "opt" ]
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/reductions/solvers/conic_solvers/scipy_conif.py#L47-L51
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
python/configobj/configobj.py
python
Section.as_int
(self, key)
return int(self[key])
A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2'
A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2'
[ "A", "convenience", "method", "which", "coerces", "the", "specified", "value", "to", "an", "integer", ".", "If", "the", "value", "is", "an", "invalid", "literal", "for", "int", "a", "ValueError", "will", "be", "raised", ".", ">>>", "a", "=", "ConfigObj", "()", ">>>", "a", "[", "a", "]", "=", "fish", ">>>", "a", ".", "as_int", "(", "a", ")", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "ValueError", ":", "invalid", "literal", "for", "int", "()", "with", "base", "10", ":", "fish", ">>>", "a", "[", "b", "]", "=", "1", ">>>", "a", ".", "as_int", "(", "b", ")", "1", ">>>", "a", "[", "b", "]", "=", "3", ".", "2", ">>>", "a", ".", "as_int", "(", "b", ")", "Traceback", "(", "most", "recent", "call", "last", ")", ":", "ValueError", ":", "invalid", "literal", "for", "int", "()", "with", "base", "10", ":", "3", ".", "2" ]
def as_int(self, key): """ A convenience method which coerces the specified value to an integer. If the value is an invalid literal for ``int``, a ``ValueError`` will be raised. >>> a = ConfigObj() >>> a['a'] = 'fish' >>> a.as_int('a') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: 'fish' >>> a['b'] = '1' >>> a.as_int('b') 1 >>> a['b'] = '3.2' >>> a.as_int('b') Traceback (most recent call last): ValueError: invalid literal for int() with base 10: '3.2' """ return int(self[key])
[ "def", "as_int", "(", "self", ",", "key", ")", ":", "return", "int", "(", "self", "[", "key", "]", ")" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/python/configobj/configobj.py#L984-L1004
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/stc.py
python
StyledTextCtrl.IndicatorGetOutlineAlpha
(*args, **kwargs)
return _stc.StyledTextCtrl_IndicatorGetOutlineAlpha(*args, **kwargs)
IndicatorGetOutlineAlpha(self, int indicator) -> int
IndicatorGetOutlineAlpha(self, int indicator) -> int
[ "IndicatorGetOutlineAlpha", "(", "self", "int", "indicator", ")", "-", ">", "int" ]
def IndicatorGetOutlineAlpha(*args, **kwargs): """IndicatorGetOutlineAlpha(self, int indicator) -> int""" return _stc.StyledTextCtrl_IndicatorGetOutlineAlpha(*args, **kwargs)
[ "def", "IndicatorGetOutlineAlpha", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_stc", ".", "StyledTextCtrl_IndicatorGetOutlineAlpha", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/stc.py#L5795-L5797
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_controls.py
python
Slider.GetPageSize
(*args, **kwargs)
return _controls_.Slider_GetPageSize(*args, **kwargs)
GetPageSize(self) -> int
GetPageSize(self) -> int
[ "GetPageSize", "(", "self", ")", "-", ">", "int" ]
def GetPageSize(*args, **kwargs): """GetPageSize(self) -> int""" return _controls_.Slider_GetPageSize(*args, **kwargs)
[ "def", "GetPageSize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_controls_", ".", "Slider_GetPageSize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L2883-L2885
apache/parquet-cpp
642da055adf009652689b20e68a198cffb857651
build-support/cpplint.py
python
GetLineWidth
(line)
Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters.
Determines the width of the line in column positions.
[ "Determines", "the", "width", "of", "the", "line", "in", "column", "positions", "." ]
def GetLineWidth(line): """Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line)
[ "def", "GetLineWidth", "(", "line", ")", ":", "if", "isinstance", "(", "line", ",", "unicode", ")", ":", "width", "=", "0", "for", "uc", "in", "unicodedata", ".", "normalize", "(", "'NFC'", ",", "line", ")", ":", "if", "unicodedata", ".", "east_asian_width", "(", "uc", ")", "in", "(", "'W'", ",", "'F'", ")", ":", "width", "+=", "2", "elif", "not", "unicodedata", ".", "combining", "(", "uc", ")", ":", "width", "+=", "1", "return", "width", "else", ":", "return", "len", "(", "line", ")" ]
https://github.com/apache/parquet-cpp/blob/642da055adf009652689b20e68a198cffb857651/build-support/cpplint.py#L4351-L4370
apache/singa
93fd9da72694e68bfe3fb29d0183a65263d238a1
python/singa/layer.py
python
Layer.device_check
(self, *inputs)
Check if the devices of the input tensor are the same. Keep the device where each tensors is located the same as the first tensor. Copy data to the device of the first tensor if the device does not match. Args: *inputs: input args consisting of only PyTensors
Check if the devices of the input tensor are the same.
[ "Check", "if", "the", "devices", "of", "the", "input", "tensor", "are", "the", "same", "." ]
def device_check(self, *inputs): """ Check if the devices of the input tensor are the same. Keep the device where each tensors is located the same as the first tensor. Copy data to the device of the first tensor if the device does not match. Args: *inputs: input args consisting of only PyTensors """ # disabled the graph to prevent buffering data transfer operator x_device = inputs[0].device prev_state = x_device.graph_enabled() x_device.EnableGraph(False) x_dev_id = x_device.id() for var in inputs: if var.device.id() != x_dev_id: var.to_device(x_device) x_device.EnableGraph(prev_state)
[ "def", "device_check", "(", "self", ",", "*", "inputs", ")", ":", "# disabled the graph to prevent buffering data transfer operator", "x_device", "=", "inputs", "[", "0", "]", ".", "device", "prev_state", "=", "x_device", ".", "graph_enabled", "(", ")", "x_device", ".", "EnableGraph", "(", "False", ")", "x_dev_id", "=", "x_device", ".", "id", "(", ")", "for", "var", "in", "inputs", ":", "if", "var", ".", "device", ".", "id", "(", ")", "!=", "x_dev_id", ":", "var", ".", "to_device", "(", "x_device", ")", "x_device", ".", "EnableGraph", "(", "prev_state", ")" ]
https://github.com/apache/singa/blob/93fd9da72694e68bfe3fb29d0183a65263d238a1/python/singa/layer.py#L187-L205
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/telemetry/third_party/pyserial/serial/rfc2217.py
python
RFC2217Serial.sendBreak
(self, duration=0.25)
Send break condition. Timed, returns to idle state after given duration.
Send break condition. Timed, returns to idle state after given duration.
[ "Send", "break", "condition", ".", "Timed", "returns", "to", "idle", "state", "after", "given", "duration", "." ]
def sendBreak(self, duration=0.25): """Send break condition. Timed, returns to idle state after given duration.""" if not self._isOpen: raise portNotOpenError self.setBreak(True) time.sleep(duration) self.setBreak(False)
[ "def", "sendBreak", "(", "self", ",", "duration", "=", "0.25", ")", ":", "if", "not", "self", ".", "_isOpen", ":", "raise", "portNotOpenError", "self", ".", "setBreak", "(", "True", ")", "time", ".", "sleep", "(", "duration", ")", "self", ".", "setBreak", "(", "False", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/telemetry/third_party/pyserial/serial/rfc2217.py#L618-L624
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/requests/requests/models.py
python
PreparedRequest.prepare_headers
(self, headers)
Prepares the given HTTP headers.
Prepares the given HTTP headers.
[ "Prepares", "the", "given", "HTTP", "headers", "." ]
def prepare_headers(self, headers): """Prepares the given HTTP headers.""" if headers: self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items()) else: self.headers = CaseInsensitiveDict()
[ "def", "prepare_headers", "(", "self", ",", "headers", ")", ":", "if", "headers", ":", "self", ".", "headers", "=", "CaseInsensitiveDict", "(", "(", "to_native_string", "(", "name", ")", ",", "value", ")", "for", "name", ",", "value", "in", "headers", ".", "items", "(", ")", ")", "else", ":", "self", ".", "headers", "=", "CaseInsensitiveDict", "(", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/requests/requests/models.py#L405-L411
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/richtext.py
python
RichTextObjectList.index
(*args, **kwargs)
return _richtext.RichTextObjectList_index(*args, **kwargs)
index(self, RichTextObject obj) -> int
index(self, RichTextObject obj) -> int
[ "index", "(", "self", "RichTextObject", "obj", ")", "-", ">", "int" ]
def index(*args, **kwargs): """index(self, RichTextObject obj) -> int""" return _richtext.RichTextObjectList_index(*args, **kwargs)
[ "def", "index", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextObjectList_index", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/richtext.py#L1542-L1544
facebookincubator/BOLT
88c70afe9d388ad430cc150cc158641701397f70
lldb/third_party/Python/module/pexpect-4.6/pexpect/FSM.py
python
FSM.process
(self, input_symbol)
This is the main method that you call to process input. This may cause the FSM to change state and call an action. This method calls get_transition() to find the action and next_state associated with the input_symbol and current_state. If the action is None then the action is not called and only the current state is changed. This method processes one complete input symbol. You can process a list of symbols (or a string) by calling process_list().
This is the main method that you call to process input. This may cause the FSM to change state and call an action. This method calls get_transition() to find the action and next_state associated with the input_symbol and current_state. If the action is None then the action is not called and only the current state is changed. This method processes one complete input symbol. You can process a list of symbols (or a string) by calling process_list().
[ "This", "is", "the", "main", "method", "that", "you", "call", "to", "process", "input", ".", "This", "may", "cause", "the", "FSM", "to", "change", "state", "and", "call", "an", "action", ".", "This", "method", "calls", "get_transition", "()", "to", "find", "the", "action", "and", "next_state", "associated", "with", "the", "input_symbol", "and", "current_state", ".", "If", "the", "action", "is", "None", "then", "the", "action", "is", "not", "called", "and", "only", "the", "current", "state", "is", "changed", ".", "This", "method", "processes", "one", "complete", "input", "symbol", ".", "You", "can", "process", "a", "list", "of", "symbols", "(", "or", "a", "string", ")", "by", "calling", "process_list", "()", "." ]
def process (self, input_symbol): '''This is the main method that you call to process input. This may cause the FSM to change state and call an action. This method calls get_transition() to find the action and next_state associated with the input_symbol and current_state. If the action is None then the action is not called and only the current state is changed. This method processes one complete input symbol. You can process a list of symbols (or a string) by calling process_list(). ''' self.input_symbol = input_symbol (self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state) if self.action is not None: self.action (self) self.current_state = self.next_state self.next_state = None
[ "def", "process", "(", "self", ",", "input_symbol", ")", ":", "self", ".", "input_symbol", "=", "input_symbol", "(", "self", ".", "action", ",", "self", ".", "next_state", ")", "=", "self", ".", "get_transition", "(", "self", ".", "input_symbol", ",", "self", ".", "current_state", ")", "if", "self", ".", "action", "is", "not", "None", ":", "self", ".", "action", "(", "self", ")", "self", ".", "current_state", "=", "self", ".", "next_state", "self", ".", "next_state", "=", "None" ]
https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/lldb/third_party/Python/module/pexpect-4.6/pexpect/FSM.py#L228-L243
baidu/bigflow
449245016c0df7d1252e85581e588bfc60cefad3
bigflow_python/python/bigflow/transforms.py
python
left_join
(*pcollections, **options)
return bigflow.transform_impls.join.left_join(*pcollections, **options)
对于多个输入PCollection,根据key对PCollection做左连接操作 ,连接结果为(key, (value 1, value 2, ..., value n)),若第m个PCollection没有元素, 则value m为None Args: *pcollections: 输入PCollection **options: 可配置选项 Returns: PCollection: 连接结果 >>> x = _pipeline.parallelize([("a", 1), ("b", 4)]) >>> y = _pipeline.parallelize([("a", 2)]) >>> transforms.left_join(x, y).get() [("a", (1, 2)), ("b", (4, None))]
对于多个输入PCollection,根据key对PCollection做左连接操作 ,连接结果为(key, (value 1, value 2, ..., value n)),若第m个PCollection没有元素, 则value m为None
[ "对于多个输入PCollection,根据key对PCollection做左连接操作", ",连接结果为", "(", "key", "(", "value", "1", "value", "2", "...", "value", "n", "))", ",若第m个PCollection没有元素,", "则value", "m为None" ]
def left_join(*pcollections, **options): """ 对于多个输入PCollection,根据key对PCollection做左连接操作 ,连接结果为(key, (value 1, value 2, ..., value n)),若第m个PCollection没有元素, 则value m为None Args: *pcollections: 输入PCollection **options: 可配置选项 Returns: PCollection: 连接结果 >>> x = _pipeline.parallelize([("a", 1), ("b", 4)]) >>> y = _pipeline.parallelize([("a", 2)]) >>> transforms.left_join(x, y).get() [("a", (1, 2)), ("b", (4, None))] """ import bigflow.transform_impls.join return bigflow.transform_impls.join.left_join(*pcollections, **options)
[ "def", "left_join", "(", "*", "pcollections", ",", "*", "*", "options", ")", ":", "import", "bigflow", ".", "transform_impls", ".", "join", "return", "bigflow", ".", "transform_impls", ".", "join", ".", "left_join", "(", "*", "pcollections", ",", "*", "*", "options", ")" ]
https://github.com/baidu/bigflow/blob/449245016c0df7d1252e85581e588bfc60cefad3/bigflow_python/python/bigflow/transforms.py#L555-L574
bryanyzhu/Hidden-Two-Stream
f7f684adbdacb6df6b1cf196c3a476cd23484a0f
scripts/cpp_lint.py
python
FileInfo.NoExtension
(self)
return '/'.join(self.Split()[0:2])
File has no source file extension.
File has no source file extension.
[ "File", "has", "no", "source", "file", "extension", "." ]
def NoExtension(self): """File has no source file extension.""" return '/'.join(self.Split()[0:2])
[ "def", "NoExtension", "(", "self", ")", ":", "return", "'/'", ".", "join", "(", "self", ".", "Split", "(", ")", "[", "0", ":", "2", "]", ")" ]
https://github.com/bryanyzhu/Hidden-Two-Stream/blob/f7f684adbdacb6df6b1cf196c3a476cd23484a0f/scripts/cpp_lint.py#L952-L954
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py2/scipy/linalg/_interpolative_backend.py
python
iddr_svd
(A, k)
return U, V, S
Compute SVD of a real matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray`
Compute SVD of a real matrix to a specified rank.
[ "Compute", "SVD", "of", "a", "real", "matrix", "to", "a", "specified", "rank", "." ]
def iddr_svd(A, k): """ Compute SVD of a real matrix to a specified rank. :param A: Matrix. :type A: :class:`numpy.ndarray` :param k: Rank of SVD. :type k: int :return: Left singular vectors. :rtype: :class:`numpy.ndarray` :return: Right singular vectors. :rtype: :class:`numpy.ndarray` :return: Singular values. :rtype: :class:`numpy.ndarray` """ A = np.asfortranarray(A) U, V, S, ier = _id.iddr_svd(A, k) if ier: raise _RETCODE_ERROR return U, V, S
[ "def", "iddr_svd", "(", "A", ",", "k", ")", ":", "A", "=", "np", ".", "asfortranarray", "(", "A", ")", "U", ",", "V", ",", "S", ",", "ier", "=", "_id", ".", "iddr_svd", "(", "A", ",", "k", ")", "if", "ier", ":", "raise", "_RETCODE_ERROR", "return", "U", ",", "V", ",", "S" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/linalg/_interpolative_backend.py#L420-L445
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/richtext.py
python
RichTextRange.__sub__
(*args, **kwargs)
return _richtext.RichTextRange___sub__(*args, **kwargs)
__sub__(self, RichTextRange range) -> RichTextRange
__sub__(self, RichTextRange range) -> RichTextRange
[ "__sub__", "(", "self", "RichTextRange", "range", ")", "-", ">", "RichTextRange" ]
def __sub__(*args, **kwargs): """__sub__(self, RichTextRange range) -> RichTextRange""" return _richtext.RichTextRange___sub__(*args, **kwargs)
[ "def", "__sub__", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextRange___sub__", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/richtext.py#L957-L959
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/instrument.py
python
Instrument.prev_close_price
(self, prev_close_price)
Sets the prev_close_price of this Instrument. :param prev_close_price: The prev_close_price of this Instrument. # noqa: E501 :type: float
Sets the prev_close_price of this Instrument.
[ "Sets", "the", "prev_close_price", "of", "this", "Instrument", "." ]
def prev_close_price(self, prev_close_price): """Sets the prev_close_price of this Instrument. :param prev_close_price: The prev_close_price of this Instrument. # noqa: E501 :type: float """ self._prev_close_price = prev_close_price
[ "def", "prev_close_price", "(", "self", ",", "prev_close_price", ")", ":", "self", ".", "_prev_close_price", "=", "prev_close_price" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/instrument.py#L1832-L1840
makefile/frcnn
8d9b9ebf8be8315ba2f374d460121b0adf1df29c
scripts/cpp_lint.py
python
CleansedLines._CollapseStrings
(elided)
return elided
Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings.
Collapses strings and chars on a line to simple "" or '' blocks.
[ "Collapses", "strings", "and", "chars", "on", "a", "line", "to", "simple", "or", "blocks", "." ]
def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if not _RE_PATTERN_INCLUDE.match(elided): # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) return elided
[ "def", "_CollapseStrings", "(", "elided", ")", ":", "if", "not", "_RE_PATTERN_INCLUDE", ".", "match", "(", "elided", ")", ":", "# Remove escaped characters first to make quote/single quote collapsing", "# basic. Things that look like escaped characters shouldn't occur", "# outside of strings and chars.", "elided", "=", "_RE_PATTERN_CLEANSE_LINE_ESCAPES", ".", "sub", "(", "''", ",", "elided", ")", "elided", "=", "_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES", ".", "sub", "(", "\"''\"", ",", "elided", ")", "elided", "=", "_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES", ".", "sub", "(", "'\"\"'", ",", "elided", ")", "return", "elided" ]
https://github.com/makefile/frcnn/blob/8d9b9ebf8be8315ba2f374d460121b0adf1df29c/scripts/cpp_lint.py#L1209-L1227
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/retries/throttling.py
python
CubicCalculator.get_params_snapshot
(self)
return CubicParams( w_max=self._w_max, k=self._k, last_fail=self._last_fail )
Return a read-only object of the current cubic parameters. These parameters are intended to be used for debug/troubleshooting purposes. These object is a read-only snapshot and cannot be used to modify the behavior of the CUBIC calculations. New parameters may be added to this object in the future.
Return a read-only object of the current cubic parameters.
[ "Return", "a", "read", "-", "only", "object", "of", "the", "current", "cubic", "parameters", "." ]
def get_params_snapshot(self): """Return a read-only object of the current cubic parameters. These parameters are intended to be used for debug/troubleshooting purposes. These object is a read-only snapshot and cannot be used to modify the behavior of the CUBIC calculations. New parameters may be added to this object in the future. """ return CubicParams( w_max=self._w_max, k=self._k, last_fail=self._last_fail )
[ "def", "get_params_snapshot", "(", "self", ")", ":", "return", "CubicParams", "(", "w_max", "=", "self", ".", "_w_max", ",", "k", "=", "self", ".", "_k", ",", "last_fail", "=", "self", ".", "_last_fail", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/botocore/retries/throttling.py#L40-L54
ablab/spades
3a754192b88540524ce6fb69eef5ea9273a38465
webvis/pydot.py
python
Graph.get_subgraph
(self, name)
return match
Retrieved a subgraph from the graph. Given a subgraph's name the corresponding Subgraph instance will be returned. If one or more subgraphs exist with the same name, a list of Subgraph instances is returned. An empty list is returned otherwise.
Retrieved a subgraph from the graph. Given a subgraph's name the corresponding Subgraph instance will be returned. If one or more subgraphs exist with the same name, a list of Subgraph instances is returned. An empty list is returned otherwise.
[ "Retrieved", "a", "subgraph", "from", "the", "graph", ".", "Given", "a", "subgraph", "s", "name", "the", "corresponding", "Subgraph", "instance", "will", "be", "returned", ".", "If", "one", "or", "more", "subgraphs", "exist", "with", "the", "same", "name", "a", "list", "of", "Subgraph", "instances", "is", "returned", ".", "An", "empty", "list", "is", "returned", "otherwise", "." ]
def get_subgraph(self, name): """Retrieved a subgraph from the graph. Given a subgraph's name the corresponding Subgraph instance will be returned. If one or more subgraphs exist with the same name, a list of Subgraph instances is returned. An empty list is returned otherwise. """ match = list() if self.obj_dict['subgraphs'].has_key( name ): sgraphs_obj_dict = self.obj_dict['subgraphs'].get( name ) for obj_dict_list in sgraphs_obj_dict: #match.extend( Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list ) match.append( Subgraph( obj_dict = obj_dict_list ) ) return match
[ "def", "get_subgraph", "(", "self", ",", "name", ")", ":", "match", "=", "list", "(", ")", "if", "self", ".", "obj_dict", "[", "'subgraphs'", "]", ".", "has_key", "(", "name", ")", ":", "sgraphs_obj_dict", "=", "self", ".", "obj_dict", "[", "'subgraphs'", "]", ".", "get", "(", "name", ")", "for", "obj_dict_list", "in", "sgraphs_obj_dict", ":", "#match.extend( Subgraph( obj_dict = obj_d ) for obj_d in obj_dict_list )", "match", ".", "append", "(", "Subgraph", "(", "obj_dict", "=", "obj_dict_list", ")", ")", "return", "match" ]
https://github.com/ablab/spades/blob/3a754192b88540524ce6fb69eef5ea9273a38465/webvis/pydot.py#L1516-L1537
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/maximum-equal-frequency.py
python
Solution.maxEqualFreq
(self, nums)
return result
:type nums: List[int] :rtype: int
:type nums: List[int] :rtype: int
[ ":", "type", "nums", ":", "List", "[", "int", "]", ":", "rtype", ":", "int" ]
def maxEqualFreq(self, nums): """ :type nums: List[int] :rtype: int """ result = 0 count = collections.Counter() freq = [0 for _ in xrange(len(nums)+1)] for i, n in enumerate(nums, 1): freq[count[n]] -= 1 freq[count[n]+1] += 1 count[n] += 1 c = count[n] if freq[c]*c == i and i < len(nums): result = i+1 remain = i-freq[c]*c if freq[remain] == 1 and remain in [1, c+1]: result = i return result
[ "def", "maxEqualFreq", "(", "self", ",", "nums", ")", ":", "result", "=", "0", "count", "=", "collections", ".", "Counter", "(", ")", "freq", "=", "[", "0", "for", "_", "in", "xrange", "(", "len", "(", "nums", ")", "+", "1", ")", "]", "for", "i", ",", "n", "in", "enumerate", "(", "nums", ",", "1", ")", ":", "freq", "[", "count", "[", "n", "]", "]", "-=", "1", "freq", "[", "count", "[", "n", "]", "+", "1", "]", "+=", "1", "count", "[", "n", "]", "+=", "1", "c", "=", "count", "[", "n", "]", "if", "freq", "[", "c", "]", "*", "c", "==", "i", "and", "i", "<", "len", "(", "nums", ")", ":", "result", "=", "i", "+", "1", "remain", "=", "i", "-", "freq", "[", "c", "]", "*", "c", "if", "freq", "[", "remain", "]", "==", "1", "and", "remain", "in", "[", "1", ",", "c", "+", "1", "]", ":", "result", "=", "i", "return", "result" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/maximum-equal-frequency.py#L8-L26
macchina-io/macchina.io
ef24ba0e18379c3dd48fb84e6dbf991101cb8db0
platform/JS/V8/v8/third_party/jinja2/utils.py
python
open_if_exists
(filename, mode='rb')
Returns a file descriptor for the filename if that file exists, otherwise `None`.
Returns a file descriptor for the filename if that file exists, otherwise `None`.
[ "Returns", "a", "file", "descriptor", "for", "the", "filename", "if", "that", "file", "exists", "otherwise", "None", "." ]
def open_if_exists(filename, mode='rb'): """Returns a file descriptor for the filename if that file exists, otherwise `None`. """ try: return open(filename, mode) except IOError as e: if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL): raise
[ "def", "open_if_exists", "(", "filename", ",", "mode", "=", "'rb'", ")", ":", "try", ":", "return", "open", "(", "filename", ",", "mode", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "not", "in", "(", "errno", ".", "ENOENT", ",", "errno", ".", "EISDIR", ",", "errno", ".", "EINVAL", ")", ":", "raise" ]
https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/third_party/jinja2/utils.py#L146-L154
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/tools/cpplint.py
python
CheckSpacingForFunctionCall
(filename, clean_lines, linenum, error)
Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Checks for the correctness of various spacing around function calls.
[ "Checks", "for", "the", "correctness", "of", "various", "spacing", "around", "function", "calls", "." ]
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): """Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and not Search(r'\bcase\s+\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. if Search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )')
[ "def", "CheckSpacingForFunctionCall", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Since function calls often occur inside if/for/while/switch", "# expressions - which have their own, more liberal conventions - we", "# first see if we should be looking inside such an expression for a", "# function call, to which we can apply more strict standards.", "fncall", "=", "line", "# if there's no control flow construct, look at whole line", "for", "pattern", "in", "(", "r'\\bif\\s*\\((.*)\\)\\s*{'", ",", "r'\\bfor\\s*\\((.*)\\)\\s*{'", ",", "r'\\bwhile\\s*\\((.*)\\)\\s*[{;]'", ",", "r'\\bswitch\\s*\\((.*)\\)\\s*{'", ")", ":", "match", "=", "Search", "(", "pattern", ",", "line", ")", "if", "match", ":", "fncall", "=", "match", ".", "group", "(", "1", ")", "# look inside the parens for function calls", "break", "# Except in if/for/while/switch, there should never be space", "# immediately inside parens (eg \"f( 3, 4 )\"). We make an exception", "# for nested parens ( (a+b) + c ). Likewise, there should never be", "# a space before a ( when it's a function argument. I assume it's a", "# function argument when the char before the whitespace is legal in", "# a function name (alnum + _) and we're not starting a macro. Also ignore", "# pointers and references to arrays and functions coz they're too tricky:", "# we use a very simple way to recognize these:", "# \" (something)(maybe-something)\" or", "# \" (something)(maybe-something,\" or", "# \" (something)[something]\"", "# Note that we assume the contents of [] to be short enough that", "# they'll never need to wrap.", "if", "(", "# Ignore control structures.", "not", "Search", "(", "r'\\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\\b'", ",", "fncall", ")", "and", "# Ignore pointers/references to functions.", "not", "Search", "(", "r' \\([^)]+\\)\\([^)]*(\\)|,$)'", ",", "fncall", ")", "and", "# Ignore pointers/references to arrays.", "not", "Search", "(", "r' \\([^)]+\\)\\[[^\\]]+\\]'", ",", "fncall", ")", ")", ":", "if", "Search", "(", "r'\\w\\s*\\(\\s(?!\\s*\\\\$)'", ",", "fncall", ")", ":", "# a ( used for a fn call", "error", "(", "filename", ",", "linenum", ",", "'whitespace/parens'", ",", "4", ",", "'Extra space after ( in function call'", ")", "elif", "Search", "(", "r'\\(\\s+(?!(\\s*\\\\)|\\()'", ",", "fncall", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/parens'", ",", "2", ",", "'Extra space after ('", ")", "if", "(", "Search", "(", "r'\\w\\s+\\('", ",", "fncall", ")", "and", "not", "Search", "(", "r'_{0,2}asm_{0,2}\\s+_{0,2}volatile_{0,2}\\s+\\('", ",", "fncall", ")", "and", "not", "Search", "(", "r'#\\s*define|typedef|using\\s+\\w+\\s*='", ",", "fncall", ")", "and", "not", "Search", "(", "r'\\w\\s+\\((\\w+::)*\\*\\w+\\)\\('", ",", "fncall", ")", "and", "not", "Search", "(", "r'\\bcase\\s+\\('", ",", "fncall", ")", ")", ":", "# TODO(unknown): Space after an operator function seem to be a common", "# error, silence those for now by restricting them to highest verbosity.", "if", "Search", "(", "r'\\boperator_*\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/parens'", ",", "0", ",", "'Extra space before ( in function call'", ")", "else", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/parens'", ",", "4", ",", "'Extra space before ( in function call'", ")", "# If the ) is followed only by a newline or a { + newline, assume it's", "# part of a control statement (if/while/etc), and don't complain", "if", "Search", "(", "r'[^)]\\s+\\)\\s*[^{\\s]'", ",", "fncall", ")", ":", "# If the closing parenthesis is preceded by only whitespaces,", "# try to give a more descriptive error message.", "if", "Search", "(", "r'^\\s+\\)'", ",", "fncall", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/parens'", ",", "2", ",", "'Closing ) should be moved to the previous line'", ")", "else", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/parens'", ",", "2", ",", "'Extra space before )'", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/cpplint.py#L3464-L3538
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/tensor/math.py
python
isnan
(x, name=None)
return out
Return whether every element of input tensor is `NaN` or not. Args: x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: `Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not. Examples: .. code-block:: python import paddle x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) out = paddle.tensor.isnan(x) print(out) # [False False False False False True True]
[]
def isnan(x, name=None): """ Return whether every element of input tensor is `NaN` or not. Args: x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: `Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not. Examples: .. code-block:: python import paddle x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')]) out = paddle.tensor.isnan(x) print(out) # [False False False False False True True] """ if in_dygraph_mode(): return _C_ops.isnan_v2(x) helper = LayerHelper("isnan_v2", **locals()) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan') out = helper.create_variable_for_type_inference(dtype='bool') helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out}) return out
[ "def", "isnan", "(", "x", ",", "name", "=", "None", ")", ":", "if", "in_dygraph_mode", "(", ")", ":", "return", "_C_ops", ".", "isnan_v2", "(", "x", ")", "helper", "=", "LayerHelper", "(", "\"isnan_v2\"", ",", "*", "*", "locals", "(", ")", ")", "check_variable_and_dtype", "(", "x", ",", "'x'", ",", "[", "'float16'", ",", "'float32'", ",", "'float64'", ",", "'int32'", ",", "'int64'", "]", ",", "'isnan'", ")", "out", "=", "helper", ".", "create_variable_for_type_inference", "(", "dtype", "=", "'bool'", ")", "helper", ".", "append_op", "(", "type", "=", "\"isnan_v2\"", ",", "inputs", "=", "{", "\"X\"", ":", "x", "}", ",", "outputs", "=", "{", "\"Out\"", ":", "out", "}", ")", "return", "out" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/tensor/math.py#L2695-L2721
Slicer/Slicer
ba9fadf332cb0303515b68d8d06a344c82e3e3e5
Base/Python/slicer/util.py
python
confirmRetryCloseDisplay
(text, windowTitle=None, parent=None, **kwargs)
return result == qt.QMessageBox.Retry
Display an error popup asking whether to retry, logging the text at error level. Return if confirmed with Retry. When the application is running in testing mode (``slicer.app.testingEnabled() == True``), the popup is skipped and False ("Close") is returned, with a message being logged to indicate this.
Display an error popup asking whether to retry, logging the text at error level. Return if confirmed with Retry.
[ "Display", "an", "error", "popup", "asking", "whether", "to", "retry", "logging", "the", "text", "at", "error", "level", ".", "Return", "if", "confirmed", "with", "Retry", "." ]
def confirmRetryCloseDisplay(text, windowTitle=None, parent=None, **kwargs): """Display an error popup asking whether to retry, logging the text at error level. Return if confirmed with Retry. When the application is running in testing mode (``slicer.app.testingEnabled() == True``), the popup is skipped and False ("Close") is returned, with a message being logged to indicate this. """ import qt, logging result = _messageDisplay(logging.ERROR, text, False, parent=parent, windowTitle=windowTitle, icon=qt.QMessageBox.Critical, standardButtons=qt.QMessageBox.Retry | qt.QMessageBox.Close, **kwargs) return result == qt.QMessageBox.Retry
[ "def", "confirmRetryCloseDisplay", "(", "text", ",", "windowTitle", "=", "None", ",", "parent", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "qt", ",", "logging", "result", "=", "_messageDisplay", "(", "logging", ".", "ERROR", ",", "text", ",", "False", ",", "parent", "=", "parent", ",", "windowTitle", "=", "windowTitle", ",", "icon", "=", "qt", ".", "QMessageBox", ".", "Critical", ",", "standardButtons", "=", "qt", ".", "QMessageBox", ".", "Retry", "|", "qt", ".", "QMessageBox", ".", "Close", ",", "*", "*", "kwargs", ")", "return", "result", "==", "qt", ".", "QMessageBox", ".", "Retry" ]
https://github.com/Slicer/Slicer/blob/ba9fadf332cb0303515b68d8d06a344c82e3e3e5/Base/Python/slicer/util.py#L2365-L2375
cvxpy/cvxpy
5165b4fb750dfd237de8659383ef24b4b2e33aaf
cvxpy/transforms/indicator.py
python
indicator.is_nonneg
(self)
return True
Is the expression positive?
Is the expression positive?
[ "Is", "the", "expression", "positive?" ]
def is_nonneg(self) -> bool: """Is the expression positive? """ return True
[ "def", "is_nonneg", "(", "self", ")", "->", "bool", ":", "return", "True" ]
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/transforms/indicator.py#L66-L69
swift/swift
12d031cf8177fdec0137f9aa7e2912fa23c4416b
3rdParty/SCons/scons-3.0.1/engine/SCons/Tool/msgmerge.py
python
generate
(env,**kw)
Generate the `xgettext` tool
Generate the `xgettext` tool
[ "Generate", "the", "xgettext", "tool" ]
def generate(env,**kw): """ Generate the `xgettext` tool """ from SCons.Tool.GettextCommon import _detect_msgmerge try: env['MSGMERGE'] = _detect_msgmerge(env) except: env['MSGMERGE'] = 'msgmerge' env.SetDefault( POTSUFFIX = ['.pot'], POSUFFIX = ['.po'], MSGMERGECOM = '$MSGMERGE $MSGMERGEFLAGS --update $TARGET $SOURCE', MSGMERGECOMSTR = '', MSGMERGEFLAGS = [ ], POUPDATE_ALIAS = 'po-update' ) env.Append(BUILDERS = { '_POUpdateBuilder':_POUpdateBuilder(env) }) env.AddMethod(_POUpdateBuilderWrapper, 'POUpdate') env.AlwaysBuild(env.Alias('$POUPDATE_ALIAS'))
[ "def", "generate", "(", "env", ",", "*", "*", "kw", ")", ":", "from", "SCons", ".", "Tool", ".", "GettextCommon", "import", "_detect_msgmerge", "try", ":", "env", "[", "'MSGMERGE'", "]", "=", "_detect_msgmerge", "(", "env", ")", "except", ":", "env", "[", "'MSGMERGE'", "]", "=", "'msgmerge'", "env", ".", "SetDefault", "(", "POTSUFFIX", "=", "[", "'.pot'", "]", ",", "POSUFFIX", "=", "[", "'.po'", "]", ",", "MSGMERGECOM", "=", "'$MSGMERGE $MSGMERGEFLAGS --update $TARGET $SOURCE'", ",", "MSGMERGECOMSTR", "=", "''", ",", "MSGMERGEFLAGS", "=", "[", "]", ",", "POUPDATE_ALIAS", "=", "'po-update'", ")", "env", ".", "Append", "(", "BUILDERS", "=", "{", "'_POUpdateBuilder'", ":", "_POUpdateBuilder", "(", "env", ")", "}", ")", "env", ".", "AddMethod", "(", "_POUpdateBuilderWrapper", ",", "'POUpdate'", ")", "env", ".", "AlwaysBuild", "(", "env", ".", "Alias", "(", "'$POUPDATE_ALIAS'", ")", ")" ]
https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Tool/msgmerge.py#L70-L87
avast/retdec
b9879088a5f0278508185ec645494e6c5c57a455
scripts/type_extractor/type_extractor/header_text_filters.py
python
filter_cplusplus_ifdefs
(text)
return text
Removes code for C++.
Removes code for C++.
[ "Removes", "code", "for", "C", "++", "." ]
def filter_cplusplus_ifdefs(text): """Removes code for C++.""" text = re.sub( r'#\s*if(def)?\s*\(?__cplusplus\)?.*?#\s*(else|elif|endif)', ';', text, flags=re.S) return text
[ "def", "filter_cplusplus_ifdefs", "(", "text", ")", ":", "text", "=", "re", ".", "sub", "(", "r'#\\s*if(def)?\\s*\\(?__cplusplus\\)?.*?#\\s*(else|elif|endif)'", ",", "';'", ",", "text", ",", "flags", "=", "re", ".", "S", ")", "return", "text" ]
https://github.com/avast/retdec/blob/b9879088a5f0278508185ec645494e6c5c57a455/scripts/type_extractor/type_extractor/header_text_filters.py#L49-L53
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/retdec-3.2/scripts/type_extractor/type_extractor/parse_includes.py
python
get_typedefs
(text)
return re.findall('typedef\s*([\w\s\*\[\]\(\),.+-/]+?)\s*;', text)
Gets typedefs from text except struct, union and enum typedefs.
Gets typedefs from text except struct, union and enum typedefs.
[ "Gets", "typedefs", "from", "text", "except", "struct", "union", "and", "enum", "typedefs", "." ]
def get_typedefs(text): """Gets typedefs from text except struct, union and enum typedefs.""" return re.findall('typedef\s*([\w\s\*\[\]\(\),.+-/]+?)\s*;', text)
[ "def", "get_typedefs", "(", "text", ")", ":", "return", "re", ".", "findall", "(", "'typedef\\s*([\\w\\s\\*\\[\\]\\(\\),.+-/]+?)\\s*;'", ",", "text", ")" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/retdec-3.2/scripts/type_extractor/type_extractor/parse_includes.py#L215-L217