nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
google/mysql-protobuf
467cda676afaa49e762c5c9164a43f6ad31a1fbf
protobuf/python/google/protobuf/text_format.py
python
_Tokenizer.ConsumeInt32
(self)
return result
Consumes a signed 32bit integer number. Returns: The integer parsed. Raises: ParseError: If a signed 32bit integer couldn't be consumed.
Consumes a signed 32bit integer number.
[ "Consumes", "a", "signed", "32bit", "integer", "number", "." ]
def ConsumeInt32(self): """Consumes a signed 32bit integer number. Returns: The integer parsed. Raises: ParseError: If a signed 32bit integer couldn't be consumed. """ try: result = ParseInteger(self.token, is_signed=True, is_long=False) except ValueError, e: raise self._ParseError(str(e)) self.NextToken() return result
[ "def", "ConsumeInt32", "(", "self", ")", ":", "try", ":", "result", "=", "ParseInteger", "(", "self", ".", "token", ",", "is_signed", "=", "True", ",", "is_long", "=", "False", ")", "except", "ValueError", ",", "e", ":", "raise", "self", ".", "_ParseError", "(", "str", "(", "e", ")", ")", "self", ".", "NextToken", "(", ")", "return", "result" ]
https://github.com/google/mysql-protobuf/blob/467cda676afaa49e762c5c9164a43f6ad31a1fbf/protobuf/python/google/protobuf/text_format.py#L599-L613
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_core.py
python
PyApp.WakeUpIdle
(*args, **kwargs)
return _core_.PyApp_WakeUpIdle(*args, **kwargs)
WakeUpIdle(self) Make sure that idle events are sent again. :see: `wx.WakeUpIdle`
WakeUpIdle(self)
[ "WakeUpIdle", "(", "self", ")" ]
def WakeUpIdle(*args, **kwargs): """ WakeUpIdle(self) Make sure that idle events are sent again. :see: `wx.WakeUpIdle` """ return _core_.PyApp_WakeUpIdle(*args, **kwargs)
[ "def", "WakeUpIdle", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "PyApp_WakeUpIdle", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L7926-L7933
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py
python
BucketAccessControlsUpdate.RunWithArgs
(self, bucket, entity)
Updates an ACL entry on the specified bucket. Args: bucket: The name of the bucket. entity: The entity holding the permission, in one of the following forms: - user-userId - user-email - group-groupId - group-email - domain-domain - project-team-projectId - allUsers - allAuthenticatedUsers Examples: - The user [email protected] would be [email protected]. - The group [email protected] would be [email protected]. - To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com. Flags: domain: The domain associated with the entity, if any. email: The email address associated with the entity, if any. entityId: The ID for the entity, if any. etag: HTTP 1.1 Entity tag for the access-control entry. id: The ID of the access-control entry. kind: The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl. projectTeam: The project team associated with the entity, if any. role: The access permission for the entity. Can be READER, WRITER, or OWNER. selfLink: The link to this access-control entry.
Updates an ACL entry on the specified bucket.
[ "Updates", "an", "ACL", "entry", "on", "the", "specified", "bucket", "." ]
def RunWithArgs(self, bucket, entity): """Updates an ACL entry on the specified bucket. Args: bucket: The name of the bucket. entity: The entity holding the permission, in one of the following forms: - user-userId - user-email - group-groupId - group-email - domain-domain - project-team-projectId - allUsers - allAuthenticatedUsers Examples: - The user [email protected] would be [email protected]. - The group [email protected] would be [email protected]. - To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com. Flags: domain: The domain associated with the entity, if any. email: The email address associated with the entity, if any. entityId: The ID for the entity, if any. etag: HTTP 1.1 Entity tag for the access-control entry. id: The ID of the access-control entry. kind: The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl. projectTeam: The project team associated with the entity, if any. role: The access permission for the entity. Can be READER, WRITER, or OWNER. selfLink: The link to this access-control entry. """ client = GetClientFromFlags() global_params = GetGlobalParamsFromFlags() request = messages.BucketAccessControl( bucket=bucket.decode('utf8'), entity=entity.decode('utf8'), ) if FLAGS['domain'].present: request.domain = FLAGS.domain.decode('utf8') if FLAGS['email'].present: request.email = FLAGS.email.decode('utf8') if FLAGS['entityId'].present: request.entityId = FLAGS.entityId.decode('utf8') if FLAGS['etag'].present: request.etag = FLAGS.etag.decode('utf8') if FLAGS['id'].present: request.id = FLAGS.id.decode('utf8') if FLAGS['kind'].present: request.kind = FLAGS.kind.decode('utf8') if FLAGS['projectTeam'].present: request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam) if FLAGS['role'].present: request.role = FLAGS.role.decode('utf8') if FLAGS['selfLink'].present: request.selfLink = FLAGS.selfLink.decode('utf8') result = client.bucketAccessControls.Update( request, global_params=global_params) print apitools_base_cli.FormatOutput(result)
[ "def", "RunWithArgs", "(", "self", ",", "bucket", ",", "entity", ")", ":", "client", "=", "GetClientFromFlags", "(", ")", "global_params", "=", "GetGlobalParamsFromFlags", "(", ")", "request", "=", "messages", ".", "BucketAccessControl", "(", "bucket", "=", "bucket", ".", "decode", "(", "'utf8'", ")", ",", "entity", "=", "entity", ".", "decode", "(", "'utf8'", ")", ",", ")", "if", "FLAGS", "[", "'domain'", "]", ".", "present", ":", "request", ".", "domain", "=", "FLAGS", ".", "domain", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'email'", "]", ".", "present", ":", "request", ".", "email", "=", "FLAGS", ".", "email", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'entityId'", "]", ".", "present", ":", "request", ".", "entityId", "=", "FLAGS", ".", "entityId", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'etag'", "]", ".", "present", ":", "request", ".", "etag", "=", "FLAGS", ".", "etag", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'id'", "]", ".", "present", ":", "request", ".", "id", "=", "FLAGS", ".", "id", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'kind'", "]", ".", "present", ":", "request", ".", "kind", "=", "FLAGS", ".", "kind", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'projectTeam'", "]", ".", "present", ":", "request", ".", "projectTeam", "=", "apitools_base", ".", "JsonToMessage", "(", "messages", ".", "BucketAccessControl", ".", "ProjectTeamValue", ",", "FLAGS", ".", "projectTeam", ")", "if", "FLAGS", "[", "'role'", "]", ".", "present", ":", "request", ".", "role", "=", "FLAGS", ".", "role", ".", "decode", "(", "'utf8'", ")", "if", "FLAGS", "[", "'selfLink'", "]", ".", "present", ":", "request", ".", "selfLink", "=", "FLAGS", ".", "selfLink", ".", "decode", "(", "'utf8'", ")", "result", "=", "client", ".", "bucketAccessControls", ".", "Update", "(", "request", ",", "global_params", "=", "global_params", ")", "print", "apitools_base_cli", ".", "FormatOutput", "(", "result", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/apitools/samples/storage_sample/storage/storage_v1.py#L538-L591
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/resmokelib/logging/handlers.py
python
BufferedHandler.__init__
(self, capacity, interval_secs)
Initialize the handler with the buffer size and timeout. These values determine when the buffer is flushed regardless.
Initialize the handler with the buffer size and timeout.
[ "Initialize", "the", "handler", "with", "the", "buffer", "size", "and", "timeout", "." ]
def __init__(self, capacity, interval_secs): """Initialize the handler with the buffer size and timeout. These values determine when the buffer is flushed regardless. """ logging.Handler.__init__(self) if not isinstance(capacity, int): raise TypeError("capacity must be an integer") elif capacity <= 0: raise ValueError("capacity must be a positive integer") if not isinstance(interval_secs, (int, float)): raise TypeError("interval_secs must be a number") elif interval_secs <= 0.0: raise ValueError("interval_secs must be a positive number") self.capacity = capacity self.interval_secs = interval_secs # self.__emit_lock prohibits concurrent access to 'self.__emit_buffer', # 'self.__flush_event', and self.__flush_scheduled_by_emit. self.__emit_lock = threading.Lock() self.__emit_buffer = [] self.__flush_event = None # A handle to the event that calls self.flush(). self.__flush_scheduled_by_emit = False self.__close_called = False self.__flush_lock = threading.Lock()
[ "def", "__init__", "(", "self", ",", "capacity", ",", "interval_secs", ")", ":", "logging", ".", "Handler", ".", "__init__", "(", "self", ")", "if", "not", "isinstance", "(", "capacity", ",", "int", ")", ":", "raise", "TypeError", "(", "\"capacity must be an integer\"", ")", "elif", "capacity", "<=", "0", ":", "raise", "ValueError", "(", "\"capacity must be a positive integer\"", ")", "if", "not", "isinstance", "(", "interval_secs", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "TypeError", "(", "\"interval_secs must be a number\"", ")", "elif", "interval_secs", "<=", "0.0", ":", "raise", "ValueError", "(", "\"interval_secs must be a positive number\"", ")", "self", ".", "capacity", "=", "capacity", "self", ".", "interval_secs", "=", "interval_secs", "# self.__emit_lock prohibits concurrent access to 'self.__emit_buffer',", "# 'self.__flush_event', and self.__flush_scheduled_by_emit.", "self", ".", "__emit_lock", "=", "threading", ".", "Lock", "(", ")", "self", ".", "__emit_buffer", "=", "[", "]", "self", ".", "__flush_event", "=", "None", "# A handle to the event that calls self.flush().", "self", ".", "__flush_scheduled_by_emit", "=", "False", "self", ".", "__close_called", "=", "False", "self", ".", "__flush_lock", "=", "threading", ".", "Lock", "(", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/logging/handlers.py#L35-L64
netket/netket
0d534e54ecbf25b677ea72af6b85947979420652
netket/graph/lattice.py
python
Lattice.basis_vectors
(self)
return self._basis_vectors
Basis vectors of the lattice
Basis vectors of the lattice
[ "Basis", "vectors", "of", "the", "lattice" ]
def basis_vectors(self): """Basis vectors of the lattice""" return self._basis_vectors
[ "def", "basis_vectors", "(", "self", ")", ":", "return", "self", ".", "_basis_vectors" ]
https://github.com/netket/netket/blob/0d534e54ecbf25b677ea72af6b85947979420652/netket/graph/lattice.py#L389-L391
numworks/epsilon
8952d2f8b1de1c3f064eec8ffcea804c5594ba4c
build/device/usb/control.py
python
get_descriptor
(dev, desc_size, desc_type, desc_index, wIndex = 0)
return desc
r"""Return the specified descriptor. dev is the Device object to which the request will be sent to. desc_size is the descriptor size. desc_type and desc_index are the descriptor type and index, respectively. wIndex index is used for string descriptors and represents the Language ID. For other types of descriptors, it is zero.
r"""Return the specified descriptor.
[ "r", "Return", "the", "specified", "descriptor", "." ]
def get_descriptor(dev, desc_size, desc_type, desc_index, wIndex = 0): r"""Return the specified descriptor. dev is the Device object to which the request will be sent to. desc_size is the descriptor size. desc_type and desc_index are the descriptor type and index, respectively. wIndex index is used for string descriptors and represents the Language ID. For other types of descriptors, it is zero. """ wValue = desc_index | (desc_type << 8) bmRequestType = util.build_request_type( util.CTRL_IN, util.CTRL_TYPE_STANDARD, util.CTRL_RECIPIENT_DEVICE) desc = dev.ctrl_transfer( bmRequestType = bmRequestType, bRequest = 0x06, wValue = wValue, wIndex = wIndex, data_or_wLength = desc_size) if len(desc) < 2: raise USBError('Invalid descriptor') return desc
[ "def", "get_descriptor", "(", "dev", ",", "desc_size", ",", "desc_type", ",", "desc_index", ",", "wIndex", "=", "0", ")", ":", "wValue", "=", "desc_index", "|", "(", "desc_type", "<<", "8", ")", "bmRequestType", "=", "util", ".", "build_request_type", "(", "util", ".", "CTRL_IN", ",", "util", ".", "CTRL_TYPE_STANDARD", ",", "util", ".", "CTRL_RECIPIENT_DEVICE", ")", "desc", "=", "dev", ".", "ctrl_transfer", "(", "bmRequestType", "=", "bmRequestType", ",", "bRequest", "=", "0x06", ",", "wValue", "=", "wValue", ",", "wIndex", "=", "wIndex", ",", "data_or_wLength", "=", "desc_size", ")", "if", "len", "(", "desc", ")", "<", "2", ":", "raise", "USBError", "(", "'Invalid descriptor'", ")", "return", "desc" ]
https://github.com/numworks/epsilon/blob/8952d2f8b1de1c3f064eec8ffcea804c5594ba4c/build/device/usb/control.py#L150-L180
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/animate.py
python
AnimationBase.GetDelay
(*args, **kwargs)
return _animate.AnimationBase_GetDelay(*args, **kwargs)
GetDelay(self, int i) -> int
GetDelay(self, int i) -> int
[ "GetDelay", "(", "self", "int", "i", ")", "-", ">", "int" ]
def GetDelay(*args, **kwargs): """GetDelay(self, int i) -> int""" return _animate.AnimationBase_GetDelay(*args, **kwargs)
[ "def", "GetDelay", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_animate", ".", "AnimationBase_GetDelay", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/animate.py#L82-L84
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/AddonManager/AddonManager.py
python
CommandAddonManager.startup
(self)
Downloads the available packages listings and populates the table This proceeds in four stages: first, the main GitHub repository is queried for a list of possible addons. Each addon is specified as a git submodule with name and branch information. The actual specific commit ID of the submodule (as listed on Github) is ignored. Any extra repositories specified by the user are appended to this list. Second, the list of macros is downloaded from the FreeCAD/FreeCAD-macros repository and the wiki Third, each of these items is queried for a package.xml metadata file. If that file exists it is downloaded, cached, and any icons that it references are also downloaded and cached. Finally, for workbenches that are not contained within a package (e.g. they provide no metadata), an additional git query is made to see if an update is available. Macros are checked for file changes. Each of these stages is launched in a separate thread to ensure that the UI remains responsive, and the operation can be cancelled. Each stage is also subject to caching, so may return immediately, if no cache update has been requested.
Downloads the available packages listings and populates the table
[ "Downloads", "the", "available", "packages", "listings", "and", "populates", "the", "table" ]
def startup(self) -> None: """Downloads the available packages listings and populates the table This proceeds in four stages: first, the main GitHub repository is queried for a list of possible addons. Each addon is specified as a git submodule with name and branch information. The actual specific commit ID of the submodule (as listed on Github) is ignored. Any extra repositories specified by the user are appended to this list. Second, the list of macros is downloaded from the FreeCAD/FreeCAD-macros repository and the wiki Third, each of these items is queried for a package.xml metadata file. If that file exists it is downloaded, cached, and any icons that it references are also downloaded and cached. Finally, for workbenches that are not contained within a package (e.g. they provide no metadata), an additional git query is made to see if an update is available. Macros are checked for file changes. Each of these stages is launched in a separate thread to ensure that the UI remains responsive, and the operation can be cancelled. Each stage is also subject to caching, so may return immediately, if no cache update has been requested. """ # Each function in this list is expected to launch a thread and connect its completion signal # to self.do_next_startup_phase, or to shortcut to calling self.do_next_startup_phase if it # is not launching a worker self.startup_sequence = [ self.populate_packages_table, self.activate_table_widgets, self.populate_macros, self.update_metadata_cache, self.check_updates, ] pref = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Addons") if pref.GetBool("DownloadMacros", False): self.startup_sequence.append(self.load_macro_metadata) self.current_progress_region = 0 self.number_of_progress_regions = len(self.startup_sequence) self.do_next_startup_phase()
[ "def", "startup", "(", "self", ")", "->", "None", ":", "# Each function in this list is expected to launch a thread and connect its completion signal", "# to self.do_next_startup_phase, or to shortcut to calling self.do_next_startup_phase if it", "# is not launching a worker", "self", ".", "startup_sequence", "=", "[", "self", ".", "populate_packages_table", ",", "self", ".", "activate_table_widgets", ",", "self", ".", "populate_macros", ",", "self", ".", "update_metadata_cache", ",", "self", ".", "check_updates", ",", "]", "pref", "=", "FreeCAD", ".", "ParamGet", "(", "\"User parameter:BaseApp/Preferences/Addons\"", ")", "if", "pref", ".", "GetBool", "(", "\"DownloadMacros\"", ",", "False", ")", ":", "self", ".", "startup_sequence", ".", "append", "(", "self", ".", "load_macro_metadata", ")", "self", ".", "current_progress_region", "=", "0", "self", ".", "number_of_progress_regions", "=", "len", "(", "self", ".", "startup_sequence", ")", "self", ".", "do_next_startup_phase", "(", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/AddonManager/AddonManager.py#L508-L546
junhyukoh/caffe-lstm
598d45456fa2a1b127a644f4aa38daa8fb9fc722
tools/extra/resize_and_crop_images.py
python
OpenCVResizeCrop.resize_and_crop_image
(self, input_file, output_file, output_side_length = 256)
Takes an image name, resize it and crop the center square
Takes an image name, resize it and crop the center square
[ "Takes", "an", "image", "name", "resize", "it", "and", "crop", "the", "center", "square" ]
def resize_and_crop_image(self, input_file, output_file, output_side_length = 256): '''Takes an image name, resize it and crop the center square ''' img = cv2.imread(input_file) height, width, depth = img.shape new_height = output_side_length new_width = output_side_length if height > width: new_height = output_side_length * height / width else: new_width = output_side_length * width / height resized_img = cv2.resize(img, (new_width, new_height)) height_offset = (new_height - output_side_length) / 2 width_offset = (new_width - output_side_length) / 2 cropped_img = resized_img[height_offset:height_offset + output_side_length, width_offset:width_offset + output_side_length] cv2.imwrite(output_file, cropped_img)
[ "def", "resize_and_crop_image", "(", "self", ",", "input_file", ",", "output_file", ",", "output_side_length", "=", "256", ")", ":", "img", "=", "cv2", ".", "imread", "(", "input_file", ")", "height", ",", "width", ",", "depth", "=", "img", ".", "shape", "new_height", "=", "output_side_length", "new_width", "=", "output_side_length", "if", "height", ">", "width", ":", "new_height", "=", "output_side_length", "*", "height", "/", "width", "else", ":", "new_width", "=", "output_side_length", "*", "width", "/", "height", "resized_img", "=", "cv2", ".", "resize", "(", "img", ",", "(", "new_width", ",", "new_height", ")", ")", "height_offset", "=", "(", "new_height", "-", "output_side_length", ")", "/", "2", "width_offset", "=", "(", "new_width", "-", "output_side_length", ")", "/", "2", "cropped_img", "=", "resized_img", "[", "height_offset", ":", "height_offset", "+", "output_side_length", ",", "width_offset", ":", "width_offset", "+", "output_side_length", "]", "cv2", ".", "imwrite", "(", "output_file", ",", "cropped_img", ")" ]
https://github.com/junhyukoh/caffe-lstm/blob/598d45456fa2a1b127a644f4aa38daa8fb9fc722/tools/extra/resize_and_crop_images.py#L20-L36
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_core.py
python
Sizer.ShowItems
(*args, **kwargs)
return _core_.Sizer_ShowItems(*args, **kwargs)
ShowItems(self, bool show) Recursively call `wx.SizerItem.Show` on all sizer items.
ShowItems(self, bool show)
[ "ShowItems", "(", "self", "bool", "show", ")" ]
def ShowItems(*args, **kwargs): """ ShowItems(self, bool show) Recursively call `wx.SizerItem.Show` on all sizer items. """ return _core_.Sizer_ShowItems(*args, **kwargs)
[ "def", "ShowItems", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Sizer_ShowItems", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_core.py#L14991-L14997
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/dir_util.py
python
remove_tree
(directory, verbose=1, dry_run=0)
Recursively remove an entire directory tree. Any errors are ignored (apart from being reported to stdout if 'verbose' is true).
Recursively remove an entire directory tree.
[ "Recursively", "remove", "an", "entire", "directory", "tree", "." ]
def remove_tree(directory, verbose=1, dry_run=0): """Recursively remove an entire directory tree. Any errors are ignored (apart from being reported to stdout if 'verbose' is true). """ global _path_created if verbose >= 1: log.info("removing '%s' (and everything under it)", directory) if dry_run: return cmdtuples = [] _build_cmdtuple(directory, cmdtuples) for cmd in cmdtuples: try: cmd[0](cmd[1]) # remove dir from cache if it's already there abspath = os.path.abspath(cmd[1]) if abspath in _path_created: del _path_created[abspath] except OSError as exc: log.warn("error removing %s: %s", directory, exc)
[ "def", "remove_tree", "(", "directory", ",", "verbose", "=", "1", ",", "dry_run", "=", "0", ")", ":", "global", "_path_created", "if", "verbose", ">=", "1", ":", "log", ".", "info", "(", "\"removing '%s' (and everything under it)\"", ",", "directory", ")", "if", "dry_run", ":", "return", "cmdtuples", "=", "[", "]", "_build_cmdtuple", "(", "directory", ",", "cmdtuples", ")", "for", "cmd", "in", "cmdtuples", ":", "try", ":", "cmd", "[", "0", "]", "(", "cmd", "[", "1", "]", ")", "# remove dir from cache if it's already there", "abspath", "=", "os", ".", "path", ".", "abspath", "(", "cmd", "[", "1", "]", ")", "if", "abspath", "in", "_path_created", ":", "del", "_path_created", "[", "abspath", "]", "except", "OSError", "as", "exc", ":", "log", ".", "warn", "(", "\"error removing %s: %s\"", ",", "directory", ",", "exc", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/distutils/dir_util.py#L178-L200
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/Inelastic/Direct/RunDescriptor.py
python
RunDescriptor.get_workspace
(self)
Method returns workspace correspondent to current run number(s) and loads this workspace if it has not been loaded Returns Mantid pointer to the workspace, corresponding to this run number
Method returns workspace correspondent to current run number(s) and loads this workspace if it has not been loaded
[ "Method", "returns", "workspace", "correspondent", "to", "current", "run", "number", "(", "s", ")", "and", "loads", "this", "workspace", "if", "it", "has", "not", "been", "loaded" ]
def get_workspace(self): """Method returns workspace correspondent to current run number(s) and loads this workspace if it has not been loaded Returns Mantid pointer to the workspace, corresponding to this run number """ if not self._ws_name: self._ws_name = self._build_ws_name() if self._ws_name in mtd: ws = mtd[self._ws_name] if not ws.run().hasProperty("calibrated"): prefer_ws_calibration = self._check_calibration_source() self.apply_calibration(ws,RunDescriptor._holder.det_cal_file,prefer_ws_calibration) return ws else: if self._run_number is not None: prefer_ws_calibration = self._check_calibration_source() inst_name = RunDescriptor._holder.short_inst_name calibration = RunDescriptor._holder.det_cal_file if self._run_list and RunDescriptor._holder.sum_runs : # Sum runs ws = self._load_and_sum_runs(inst_name,RunDescriptor._holder.load_monitors_with_workspace) else: # load current workspace ws = self.load_run(inst_name, calibration,False, RunDescriptor._holder.load_monitors_with_workspace,prefer_ws_calibration) self.synchronize_ws(ws) self.apply_calibration(ws,calibration,prefer_ws_calibration) return ws else: return None
[ "def", "get_workspace", "(", "self", ")", ":", "if", "not", "self", ".", "_ws_name", ":", "self", ".", "_ws_name", "=", "self", ".", "_build_ws_name", "(", ")", "if", "self", ".", "_ws_name", "in", "mtd", ":", "ws", "=", "mtd", "[", "self", ".", "_ws_name", "]", "if", "not", "ws", ".", "run", "(", ")", ".", "hasProperty", "(", "\"calibrated\"", ")", ":", "prefer_ws_calibration", "=", "self", ".", "_check_calibration_source", "(", ")", "self", ".", "apply_calibration", "(", "ws", ",", "RunDescriptor", ".", "_holder", ".", "det_cal_file", ",", "prefer_ws_calibration", ")", "return", "ws", "else", ":", "if", "self", ".", "_run_number", "is", "not", "None", ":", "prefer_ws_calibration", "=", "self", ".", "_check_calibration_source", "(", ")", "inst_name", "=", "RunDescriptor", ".", "_holder", ".", "short_inst_name", "calibration", "=", "RunDescriptor", ".", "_holder", ".", "det_cal_file", "if", "self", ".", "_run_list", "and", "RunDescriptor", ".", "_holder", ".", "sum_runs", ":", "# Sum runs", "ws", "=", "self", ".", "_load_and_sum_runs", "(", "inst_name", ",", "RunDescriptor", ".", "_holder", ".", "load_monitors_with_workspace", ")", "else", ":", "# load current workspace", "ws", "=", "self", ".", "load_run", "(", "inst_name", ",", "calibration", ",", "False", ",", "RunDescriptor", ".", "_holder", ".", "load_monitors_with_workspace", ",", "prefer_ws_calibration", ")", "self", ".", "synchronize_ws", "(", "ws", ")", "self", ".", "apply_calibration", "(", "ws", ",", "calibration", ",", "prefer_ws_calibration", ")", "return", "ws", "else", ":", "return", "None" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/Inelastic/Direct/RunDescriptor.py#L788-L819
alexgkendall/caffe-segnet
344c113bf1832886f1cbe9f33ffe28a3beeaf412
scripts/cpp_lint.py
python
_FunctionState.Check
(self, error, filename, linenum)
Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check.
Report if too many lines in function body.
[ "Report", "if", "too", "many", "lines", "in", "function", "body", "." ]
def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger))
[ "def", "Check", "(", "self", ",", "error", ",", "filename", ",", "linenum", ")", ":", "if", "Match", "(", "r'T(EST|est)'", ",", "self", ".", "current_function", ")", ":", "base_trigger", "=", "self", ".", "_TEST_TRIGGER", "else", ":", "base_trigger", "=", "self", ".", "_NORMAL_TRIGGER", "trigger", "=", "base_trigger", "*", "2", "**", "_VerboseLevel", "(", ")", "if", "self", ".", "lines_in_function", ">", "trigger", ":", "error_level", "=", "int", "(", "math", ".", "log", "(", "self", ".", "lines_in_function", "/", "base_trigger", ",", "2", ")", ")", "# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...", "if", "error_level", ">", "5", ":", "error_level", "=", "5", "error", "(", "filename", ",", "linenum", ",", "'readability/fn_size'", ",", "error_level", ",", "'Small and focused functions are preferred:'", "' %s has %d non-comment lines'", "' (error triggered by exceeding %d lines).'", "%", "(", "self", ".", "current_function", ",", "self", ".", "lines_in_function", ",", "trigger", ")", ")" ]
https://github.com/alexgkendall/caffe-segnet/blob/344c113bf1832886f1cbe9f33ffe28a3beeaf412/scripts/cpp_lint.py#L836-L859
cvxpy/cvxpy
5165b4fb750dfd237de8659383ef24b4b2e33aaf
cvxpy/atoms/affine/conv.py
python
conv.validate_arguments
(self)
Checks that both arguments are vectors, and the first is constant.
Checks that both arguments are vectors, and the first is constant.
[ "Checks", "that", "both", "arguments", "are", "vectors", "and", "the", "first", "is", "constant", "." ]
def validate_arguments(self) -> None: """Checks that both arguments are vectors, and the first is constant. """ if not self.args[0].is_vector() or not self.args[1].is_vector(): raise ValueError("The arguments to conv must resolve to vectors.") if not self.args[0].is_constant(): raise ValueError("The first argument to conv must be constant.")
[ "def", "validate_arguments", "(", "self", ")", "->", "None", ":", "if", "not", "self", ".", "args", "[", "0", "]", ".", "is_vector", "(", ")", "or", "not", "self", ".", "args", "[", "1", "]", ".", "is_vector", "(", ")", ":", "raise", "ValueError", "(", "\"The arguments to conv must resolve to vectors.\"", ")", "if", "not", "self", ".", "args", "[", "0", "]", ".", "is_constant", "(", ")", ":", "raise", "ValueError", "(", "\"The first argument to conv must be constant.\"", ")" ]
https://github.com/cvxpy/cvxpy/blob/5165b4fb750dfd237de8659383ef24b4b2e33aaf/cvxpy/atoms/affine/conv.py#L61-L67
metashell/metashell
f4177e4854ea00c8dbc722cadab26ef413d798ea
3rd/templight/clang/tools/scan-build-py/libscanbuild/clang.py
python
is_ctu_capable
(extdef_map_cmd)
return True
Detects if the current (or given) clang and external definition mapping executables are CTU compatible.
Detects if the current (or given) clang and external definition mapping executables are CTU compatible.
[ "Detects", "if", "the", "current", "(", "or", "given", ")", "clang", "and", "external", "definition", "mapping", "executables", "are", "CTU", "compatible", "." ]
def is_ctu_capable(extdef_map_cmd): """ Detects if the current (or given) clang and external definition mapping executables are CTU compatible. """ try: run_command([extdef_map_cmd, '-version']) except (OSError, subprocess.CalledProcessError): return False return True
[ "def", "is_ctu_capable", "(", "extdef_map_cmd", ")", ":", "try", ":", "run_command", "(", "[", "extdef_map_cmd", ",", "'-version'", "]", ")", "except", "(", "OSError", ",", "subprocess", ".", "CalledProcessError", ")", ":", "return", "False", "return", "True" ]
https://github.com/metashell/metashell/blob/f4177e4854ea00c8dbc722cadab26ef413d798ea/3rd/templight/clang/tools/scan-build-py/libscanbuild/clang.py#L164-L172
MegEngine/MegEngine
ce9ad07a27ec909fb8db4dd67943d24ba98fb93a
imperative/python/megengine/module/init.py
python
calculate_fan_in_and_fan_out
(tensor: Tensor)
return fan_in, fan_out
r"""Calculates fan_in / fan_out value for given weight tensor. This function assumes input tensor is stored in ``NCHW`` format. Note: The group conv2d kernel shape in MegEngine is ``(G, O/G, I/G, K, K)``. This function calculates ``fan_out = O/G * K * K`` as default, but PyTorch uses ``fan_out = O * K * K``. Args: tensor: weight tensor in ``NCHW`` format.
r"""Calculates fan_in / fan_out value for given weight tensor. This function assumes input tensor is stored in ``NCHW`` format.
[ "r", "Calculates", "fan_in", "/", "fan_out", "value", "for", "given", "weight", "tensor", ".", "This", "function", "assumes", "input", "tensor", "is", "stored", "in", "NCHW", "format", "." ]
def calculate_fan_in_and_fan_out(tensor: Tensor) -> Tuple[float, float]: r"""Calculates fan_in / fan_out value for given weight tensor. This function assumes input tensor is stored in ``NCHW`` format. Note: The group conv2d kernel shape in MegEngine is ``(G, O/G, I/G, K, K)``. This function calculates ``fan_out = O/G * K * K`` as default, but PyTorch uses ``fan_out = O * K * K``. Args: tensor: weight tensor in ``NCHW`` format. """ shape = tensor.shape ndim = len(shape) if ndim < 2: raise ValueError( "fan_in and fan_out can not be computed for tensor with fewer than 2 " "dimensions" ) if ndim == 2: # Linear fan_in = shape[1] fan_out = shape[0] else: if ndim >= 5: # ignore the groups dimension of group conv2d and group conv3d # FIXME: will be wrong for conv3d shape = shape[1:] num_input_fmaps = shape[1] num_output_fmaps = shape[0] receptive_field_size = 1 if ndim > 2: receptive_field_size = reduce(lambda x, y: x * y, shape[2:], 1) fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out
[ "def", "calculate_fan_in_and_fan_out", "(", "tensor", ":", "Tensor", ")", "->", "Tuple", "[", "float", ",", "float", "]", ":", "shape", "=", "tensor", ".", "shape", "ndim", "=", "len", "(", "shape", ")", "if", "ndim", "<", "2", ":", "raise", "ValueError", "(", "\"fan_in and fan_out can not be computed for tensor with fewer than 2 \"", "\"dimensions\"", ")", "if", "ndim", "==", "2", ":", "# Linear", "fan_in", "=", "shape", "[", "1", "]", "fan_out", "=", "shape", "[", "0", "]", "else", ":", "if", "ndim", ">=", "5", ":", "# ignore the groups dimension of group conv2d and group conv3d", "# FIXME: will be wrong for conv3d", "shape", "=", "shape", "[", "1", ":", "]", "num_input_fmaps", "=", "shape", "[", "1", "]", "num_output_fmaps", "=", "shape", "[", "0", "]", "receptive_field_size", "=", "1", "if", "ndim", ">", "2", ":", "receptive_field_size", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "shape", "[", "2", ":", "]", ",", "1", ")", "fan_in", "=", "num_input_fmaps", "*", "receptive_field_size", "fan_out", "=", "num_output_fmaps", "*", "receptive_field_size", "return", "fan_in", ",", "fan_out" ]
https://github.com/MegEngine/MegEngine/blob/ce9ad07a27ec909fb8db4dd67943d24ba98fb93a/imperative/python/megengine/module/init.py#L125-L160
hyperledger-archives/iroha
ed579f85126d0e86532a1f4f1f6ce5681bbcd3a9
example/python/tx-example.py
python
create_domain_and_asset
()
Creates domain 'domain' and asset 'coin#domain' with precision 2
Creates domain 'domain' and asset 'coin#domain' with precision 2
[ "Creates", "domain", "domain", "and", "asset", "coin#domain", "with", "precision", "2" ]
def create_domain_and_asset(): """ Creates domain 'domain' and asset 'coin#domain' with precision 2 """ commands = [ iroha.command('CreateDomain', domain_id='domain', default_role='user'), iroha.command('CreateAsset', asset_name='coin', domain_id='domain', precision=2) ] tx = IrohaCrypto.sign_transaction( iroha.transaction(commands), admin_private_key) send_transaction_and_print_status(tx)
[ "def", "create_domain_and_asset", "(", ")", ":", "commands", "=", "[", "iroha", ".", "command", "(", "'CreateDomain'", ",", "domain_id", "=", "'domain'", ",", "default_role", "=", "'user'", ")", ",", "iroha", ".", "command", "(", "'CreateAsset'", ",", "asset_name", "=", "'coin'", ",", "domain_id", "=", "'domain'", ",", "precision", "=", "2", ")", "]", "tx", "=", "IrohaCrypto", ".", "sign_transaction", "(", "iroha", ".", "transaction", "(", "commands", ")", ",", "admin_private_key", ")", "send_transaction_and_print_status", "(", "tx", ")" ]
https://github.com/hyperledger-archives/iroha/blob/ed579f85126d0e86532a1f4f1f6ce5681bbcd3a9/example/python/tx-example.py#L49-L60
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/util/observer.py
python
Observable.has_changed
(self)
return self.changed
Return whether the object has changed.
Return whether the object has changed.
[ "Return", "whether", "the", "object", "has", "changed", "." ]
def has_changed(self): """ Return whether the object has changed. """ return self.changed
[ "def", "has_changed", "(", "self", ")", ":", "return", "self", ".", "changed" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/util/observer.py#L88-L92
numenta/nupic.core
949950cf2c6d8d894c7eabfa2860aae679bf91f7
bindings/py/src/nupic/bindings/regions/PyRegion.py
python
PyRegion.serializeExtraData
(self, filePath)
This method is called during network serialization with an external filename that can be used to bypass pickle for saving large binary states. :param filePath: (string) full filepath and name
This method is called during network serialization with an external filename that can be used to bypass pickle for saving large binary states.
[ "This", "method", "is", "called", "during", "network", "serialization", "with", "an", "external", "filename", "that", "can", "be", "used", "to", "bypass", "pickle", "for", "saving", "large", "binary", "states", "." ]
def serializeExtraData(self, filePath): """This method is called during network serialization with an external filename that can be used to bypass pickle for saving large binary states. :param filePath: (string) full filepath and name """ pass
[ "def", "serializeExtraData", "(", "self", ",", "filePath", ")", ":", "pass" ]
https://github.com/numenta/nupic.core/blob/949950cf2c6d8d894c7eabfa2860aae679bf91f7/bindings/py/src/nupic/bindings/regions/PyRegion.py#L309-L315
raymondlu/super-animation-samples
04234269112ff0dc32447f27a761dbbb00b8ba17
samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py
python
Diagnostic.category_name
(self)
return conf.lib.clang_getDiagnosticCategoryName(self.category_number)
The string name of the category for this diagnostic.
The string name of the category for this diagnostic.
[ "The", "string", "name", "of", "the", "category", "for", "this", "diagnostic", "." ]
def category_name(self): """The string name of the category for this diagnostic.""" return conf.lib.clang_getDiagnosticCategoryName(self.category_number)
[ "def", "category_name", "(", "self", ")", ":", "return", "conf", ".", "lib", ".", "clang_getDiagnosticCategoryName", "(", "self", ".", "category_number", ")" ]
https://github.com/raymondlu/super-animation-samples/blob/04234269112ff0dc32447f27a761dbbb00b8ba17/samples/cocos2d-x-3.1/CocosLuaGame2/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py#L345-L347
microsoft/TSS.MSR
0f2516fca2cd9929c31d5450e39301c9bde43688
TSS.Py/src/TpmTypes.py
python
TPM2_PolicyDuplicationSelect_REQUEST.initFromTpm
(self, buf)
TpmMarshaller method
TpmMarshaller method
[ "TpmMarshaller", "method" ]
def initFromTpm(self, buf): """ TpmMarshaller method """ self.objectName = buf.readSizedByteBuf() self.newParentName = buf.readSizedByteBuf() self.includeObject = buf.readByte()
[ "def", "initFromTpm", "(", "self", ",", "buf", ")", ":", "self", ".", "objectName", "=", "buf", ".", "readSizedByteBuf", "(", ")", "self", ".", "newParentName", "=", "buf", ".", "readSizedByteBuf", "(", ")", "self", ".", "includeObject", "=", "buf", ".", "readByte", "(", ")" ]
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L14876-L14880
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Rect2D.ContainsRect
(*args, **kwargs)
return _core_.Rect2D_ContainsRect(*args, **kwargs)
ContainsRect(self, Rect2D rect) -> bool
ContainsRect(self, Rect2D rect) -> bool
[ "ContainsRect", "(", "self", "Rect2D", "rect", ")", "-", ">", "bool" ]
def ContainsRect(*args, **kwargs): """ContainsRect(self, Rect2D rect) -> bool""" return _core_.Rect2D_ContainsRect(*args, **kwargs)
[ "def", "ContainsRect", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Rect2D_ContainsRect", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L1971-L1973
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/artmanager.py
python
ArtManager.GetMenuTheme
(self)
return self._menuTheme
Returns the currently used menu theme. :return: A string containining the currently used theme for the menu.
Returns the currently used menu theme.
[ "Returns", "the", "currently", "used", "menu", "theme", "." ]
def GetMenuTheme(self): """ Returns the currently used menu theme. :return: A string containining the currently used theme for the menu. """ return self._menuTheme
[ "def", "GetMenuTheme", "(", "self", ")", ":", "return", "self", ".", "_menuTheme" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/artmanager.py#L1843-L1850
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/linalg/linear_operator.py
python
LinearOperator.__init__
(self, dtype, graph_parents=None, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None)
r"""Initialize the `LinearOperator`. **This is a private method for subclass use.** **Subclasses should copy-paste this `__init__` documentation.** Args: dtype: The type of the this `LinearOperator`. Arguments to `matmul` and `solve` will have to be this type. graph_parents: Python list of graph prerequisites of this `LinearOperator` Typically tensors that are passed during initialization. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `dtype` is real, this is equivalent to being symmetric. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If any member of graph_parents is `None` or not a `Tensor`. ValueError: If hints are set incorrectly.
r"""Initialize the `LinearOperator`.
[ "r", "Initialize", "the", "LinearOperator", "." ]
def __init__(self, dtype, graph_parents=None, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name=None): r"""Initialize the `LinearOperator`. **This is a private method for subclass use.** **Subclasses should copy-paste this `__init__` documentation.** Args: dtype: The type of the this `LinearOperator`. Arguments to `matmul` and `solve` will have to be this type. graph_parents: Python list of graph prerequisites of this `LinearOperator` Typically tensors that are passed during initialization. is_non_singular: Expect that this operator is non-singular. is_self_adjoint: Expect that this operator is equal to its hermitian transpose. If `dtype` is real, this is equivalent to being symmetric. is_positive_definite: Expect that this operator is positive definite, meaning the quadratic form `x^H A x` has positive real part for all nonzero `x`. Note that we do not require the operator to be self-adjoint to be positive-definite. See: https://en.wikipedia.org/wiki/Positive-definite_matrix\ #Extension_for_non_symmetric_matrices is_square: Expect that this operator acts like square [batch] matrices. name: A name for this `LinearOperator`. Raises: ValueError: If any member of graph_parents is `None` or not a `Tensor`. ValueError: If hints are set incorrectly. """ # Check and auto-set flags. if is_positive_definite: if is_non_singular is False: raise ValueError("A positive definite matrix is always non-singular.") is_non_singular = True if is_non_singular: if is_square is False: raise ValueError("A non-singular matrix is always square.") is_square = True if is_self_adjoint: if is_square is False: raise ValueError("A self-adjoint matrix is always square.") is_square = True self._is_square_set_or_implied_by_hints = is_square graph_parents = [] if graph_parents is None else graph_parents for i, t in enumerate(graph_parents): if t is None or not tensor_util.is_tensor(t): raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) self._dtype = dtype self._graph_parents = graph_parents self._is_non_singular = is_non_singular self._is_self_adjoint = is_self_adjoint self._is_positive_definite = is_positive_definite self._name = name or type(self).__name__ # We will cache some tensors to avoid repeatedly adding shape # manipulation ops to the graph. # Naming convention: # self._cached_X_tensor is the cached version of self._X_tensor. self._cached_shape_tensor = None self._cached_batch_shape_tensor = None self._cached_domain_dimension_tensor = None self._cached_range_dimension_tensor = None self._cached_tensor_rank_tensor = None
[ "def", "__init__", "(", "self", ",", "dtype", ",", "graph_parents", "=", "None", ",", "is_non_singular", "=", "None", ",", "is_self_adjoint", "=", "None", ",", "is_positive_definite", "=", "None", ",", "is_square", "=", "None", ",", "name", "=", "None", ")", ":", "# Check and auto-set flags.", "if", "is_positive_definite", ":", "if", "is_non_singular", "is", "False", ":", "raise", "ValueError", "(", "\"A positive definite matrix is always non-singular.\"", ")", "is_non_singular", "=", "True", "if", "is_non_singular", ":", "if", "is_square", "is", "False", ":", "raise", "ValueError", "(", "\"A non-singular matrix is always square.\"", ")", "is_square", "=", "True", "if", "is_self_adjoint", ":", "if", "is_square", "is", "False", ":", "raise", "ValueError", "(", "\"A self-adjoint matrix is always square.\"", ")", "is_square", "=", "True", "self", ".", "_is_square_set_or_implied_by_hints", "=", "is_square", "graph_parents", "=", "[", "]", "if", "graph_parents", "is", "None", "else", "graph_parents", "for", "i", ",", "t", "in", "enumerate", "(", "graph_parents", ")", ":", "if", "t", "is", "None", "or", "not", "tensor_util", ".", "is_tensor", "(", "t", ")", ":", "raise", "ValueError", "(", "\"Graph parent item %d is not a Tensor; %s.\"", "%", "(", "i", ",", "t", ")", ")", "self", ".", "_dtype", "=", "dtype", "self", ".", "_graph_parents", "=", "graph_parents", "self", ".", "_is_non_singular", "=", "is_non_singular", "self", ".", "_is_self_adjoint", "=", "is_self_adjoint", "self", ".", "_is_positive_definite", "=", "is_positive_definite", "self", ".", "_name", "=", "name", "or", "type", "(", "self", ")", ".", "__name__", "# We will cache some tensors to avoid repeatedly adding shape", "# manipulation ops to the graph.", "# Naming convention:", "# self._cached_X_tensor is the cached version of self._X_tensor.", "self", ".", "_cached_shape_tensor", "=", "None", "self", ".", "_cached_batch_shape_tensor", "=", "None", "self", ".", "_cached_domain_dimension_tensor", "=", "None", "self", ".", "_cached_range_dimension_tensor", "=", "None", "self", ".", "_cached_tensor_rank_tensor", "=", "None" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/linalg/linear_operator.py#L142-L213
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/util/fslike/directory.py
python
Directory.resolve
(self, parts)
return os.path.join(self.path, *parts)
resolves parts to an actual path name.
resolves parts to an actual path name.
[ "resolves", "parts", "to", "an", "actual", "path", "name", "." ]
def resolve(self, parts): """ resolves parts to an actual path name. """ return os.path.join(self.path, *parts)
[ "def", "resolve", "(", "self", ",", "parts", ")", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "*", "parts", ")" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/util/fslike/directory.py#L44-L46
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
ImmediateFunction.WriteCmdSetHeader
(self, file)
Overridden from Function
Overridden from Function
[ "Overridden", "from", "Function" ]
def WriteCmdSetHeader(self, file): """Overridden from Function""" self.type_handler.WriteImmediateCmdSetHeader(self, file)
[ "def", "WriteCmdSetHeader", "(", "self", ",", "file", ")", ":", "self", ".", "type_handler", ".", "WriteImmediateCmdSetHeader", "(", "self", ",", "file", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/gpu/command_buffer/build_gles2_cmd_buffer.py#L5450-L5452
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/package/package_exporter.py
python
PackageExporter.save_pickle
( self, package: str, resource: str, obj: Any, dependencies: bool = True, pickle_protocol: int = 3, )
Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects. If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required to reconstruct them and save the relevant code. To be able to save an object where ``type(obj).__name__`` is ``my_module.MyObject``, ``my_module.MyObject`` must resolve to the class of the object according to the ``importer`` order. When saving objects that have previously been packaged, the importer's ``import_module`` method will need to be present in the ``importer`` list for this to work. Args: package (str): The name of module package this resource should go in (e.g. ``"my_package.my_subpackage"``). resource (str): A unique name for the resource, used to identify it to load. obj (Any): The object to save, must be picklable. dependencies (bool, optional): If ``True``, we scan the source for dependencies.
Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects. If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required to reconstruct them and save the relevant code.
[ "Save", "a", "python", "object", "to", "the", "archive", "using", "pickle", ".", "Equivalent", "to", ":", "func", ":", "torch", ".", "save", "but", "saving", "into", "the", "archive", "rather", "than", "a", "stand", "-", "alone", "file", ".", "Stanard", "pickle", "does", "not", "save", "the", "code", "only", "the", "objects", ".", "If", "dependencies", "is", "true", "this", "method", "will", "also", "scan", "the", "pickled", "objects", "for", "which", "modules", "are", "required", "to", "reconstruct", "them", "and", "save", "the", "relevant", "code", "." ]
def save_pickle( self, package: str, resource: str, obj: Any, dependencies: bool = True, pickle_protocol: int = 3, ): """Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into the archive rather than a stand-alone file. Stanard pickle does not save the code, only the objects. If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required to reconstruct them and save the relevant code. To be able to save an object where ``type(obj).__name__`` is ``my_module.MyObject``, ``my_module.MyObject`` must resolve to the class of the object according to the ``importer`` order. When saving objects that have previously been packaged, the importer's ``import_module`` method will need to be present in the ``importer`` list for this to work. Args: package (str): The name of module package this resource should go in (e.g. ``"my_package.my_subpackage"``). resource (str): A unique name for the resource, used to identify it to load. obj (Any): The object to save, must be picklable. dependencies (bool, optional): If ``True``, we scan the source for dependencies. """ assert (pickle_protocol == 4) or ( pickle_protocol == 3 ), "torch.package only supports pickle protocols 3 and 4" filename = self._filename(package, resource) # Write the pickle data for `obj` data_buf = io.BytesIO() pickler = create_pickler(data_buf, self.importer, protocol=pickle_protocol) pickler.persistent_id = self._persistent_id pickler.dump(obj) data_value = data_buf.getvalue() name_in_dependency_graph = f"<{package}.{resource}>" self.dependency_graph.add_node( name_in_dependency_graph, action=_ModuleProviderAction.INTERN, provided=True, is_pickle=True, ) def _check_mocked_error(module: Optional[str], field: Optional[str]): assert isinstance(module, str) assert isinstance(field, str) if self._can_implicitly_extern(module): return for pattern, pattern_info in self.patterns.items(): if pattern.matches(module): if pattern_info.action == _ModuleProviderAction.MOCK: raise NotImplementedError( f"Object '{field}' from module {module} was mocked out during packaging " f"but is being used in resource - {resource} in package {package}. " "If this error is happening during 'save_pickle', please ensure that your " "pickled object doesn't contain any mocked objects." ) else: return if dependencies: all_dependencies = [] module = None field = None memo: DefaultDict[int, str] = defaultdict(None) memo_count = 0 # pickletools.dis(data_value) for opcode, arg, pos in pickletools.genops(data_value): if pickle_protocol == 4: if ( opcode.name == "SHORT_BINUNICODE" or opcode.name == "BINUNICODE8" ): assert isinstance(arg, str) module = field field = arg memo[memo_count] = arg elif ( opcode.name == "BINGET_LONG" or opcode.name == "BINGET" or opcode.name == "GET" ): assert isinstance(arg, int) module = field field = memo.get(arg, None) elif opcode.name == "MEMOIZE": memo_count += 1 elif opcode.name == "STACK_GLOBAL": assert isinstance(module, str) if module not in all_dependencies: all_dependencies.append(module) _check_mocked_error(module, field) elif ( pickle_protocol == 3 and opcode.name == "GLOBAL" ): # a global reference assert isinstance(arg, str) module, field = arg.split(" ") if module not in all_dependencies: all_dependencies.append(module) _check_mocked_error(module, field) for module_name in all_dependencies: self.dependency_graph.add_edge(name_in_dependency_graph, module_name) self.add_dependency(module_name) self._write(filename, data_value)
[ "def", "save_pickle", "(", "self", ",", "package", ":", "str", ",", "resource", ":", "str", ",", "obj", ":", "Any", ",", "dependencies", ":", "bool", "=", "True", ",", "pickle_protocol", ":", "int", "=", "3", ",", ")", ":", "assert", "(", "pickle_protocol", "==", "4", ")", "or", "(", "pickle_protocol", "==", "3", ")", ",", "\"torch.package only supports pickle protocols 3 and 4\"", "filename", "=", "self", ".", "_filename", "(", "package", ",", "resource", ")", "# Write the pickle data for `obj`", "data_buf", "=", "io", ".", "BytesIO", "(", ")", "pickler", "=", "create_pickler", "(", "data_buf", ",", "self", ".", "importer", ",", "protocol", "=", "pickle_protocol", ")", "pickler", ".", "persistent_id", "=", "self", ".", "_persistent_id", "pickler", ".", "dump", "(", "obj", ")", "data_value", "=", "data_buf", ".", "getvalue", "(", ")", "name_in_dependency_graph", "=", "f\"<{package}.{resource}>\"", "self", ".", "dependency_graph", ".", "add_node", "(", "name_in_dependency_graph", ",", "action", "=", "_ModuleProviderAction", ".", "INTERN", ",", "provided", "=", "True", ",", "is_pickle", "=", "True", ",", ")", "def", "_check_mocked_error", "(", "module", ":", "Optional", "[", "str", "]", ",", "field", ":", "Optional", "[", "str", "]", ")", ":", "assert", "isinstance", "(", "module", ",", "str", ")", "assert", "isinstance", "(", "field", ",", "str", ")", "if", "self", ".", "_can_implicitly_extern", "(", "module", ")", ":", "return", "for", "pattern", ",", "pattern_info", "in", "self", ".", "patterns", ".", "items", "(", ")", ":", "if", "pattern", ".", "matches", "(", "module", ")", ":", "if", "pattern_info", ".", "action", "==", "_ModuleProviderAction", ".", "MOCK", ":", "raise", "NotImplementedError", "(", "f\"Object '{field}' from module {module} was mocked out during packaging \"", "f\"but is being used in resource - {resource} in package {package}. \"", "\"If this error is happening during 'save_pickle', please ensure that your \"", "\"pickled object doesn't contain any mocked objects.\"", ")", "else", ":", "return", "if", "dependencies", ":", "all_dependencies", "=", "[", "]", "module", "=", "None", "field", "=", "None", "memo", ":", "DefaultDict", "[", "int", ",", "str", "]", "=", "defaultdict", "(", "None", ")", "memo_count", "=", "0", "# pickletools.dis(data_value)", "for", "opcode", ",", "arg", ",", "pos", "in", "pickletools", ".", "genops", "(", "data_value", ")", ":", "if", "pickle_protocol", "==", "4", ":", "if", "(", "opcode", ".", "name", "==", "\"SHORT_BINUNICODE\"", "or", "opcode", ".", "name", "==", "\"BINUNICODE8\"", ")", ":", "assert", "isinstance", "(", "arg", ",", "str", ")", "module", "=", "field", "field", "=", "arg", "memo", "[", "memo_count", "]", "=", "arg", "elif", "(", "opcode", ".", "name", "==", "\"BINGET_LONG\"", "or", "opcode", ".", "name", "==", "\"BINGET\"", "or", "opcode", ".", "name", "==", "\"GET\"", ")", ":", "assert", "isinstance", "(", "arg", ",", "int", ")", "module", "=", "field", "field", "=", "memo", ".", "get", "(", "arg", ",", "None", ")", "elif", "opcode", ".", "name", "==", "\"MEMOIZE\"", ":", "memo_count", "+=", "1", "elif", "opcode", ".", "name", "==", "\"STACK_GLOBAL\"", ":", "assert", "isinstance", "(", "module", ",", "str", ")", "if", "module", "not", "in", "all_dependencies", ":", "all_dependencies", ".", "append", "(", "module", ")", "_check_mocked_error", "(", "module", ",", "field", ")", "elif", "(", "pickle_protocol", "==", "3", "and", "opcode", ".", "name", "==", "\"GLOBAL\"", ")", ":", "# a global reference", "assert", "isinstance", "(", "arg", ",", "str", ")", "module", ",", "field", "=", "arg", ".", "split", "(", "\" \"", ")", "if", "module", "not", "in", "all_dependencies", ":", "all_dependencies", ".", "append", "(", "module", ")", "_check_mocked_error", "(", "module", ",", "field", ")", "for", "module_name", "in", "all_dependencies", ":", "self", ".", "dependency_graph", ".", "add_edge", "(", "name_in_dependency_graph", ",", "module_name", ")", "self", ".", "add_dependency", "(", "module_name", ")", "self", ".", "_write", "(", "filename", ",", "data_value", ")" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/package/package_exporter.py#L553-L659
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rfc822.py
python
Message.__getitem__
(self, name)
return self.dict[name.lower()]
Get a specific header, as from a dictionary.
Get a specific header, as from a dictionary.
[ "Get", "a", "specific", "header", "as", "from", "a", "dictionary", "." ]
def __getitem__(self, name): """Get a specific header, as from a dictionary.""" return self.dict[name.lower()]
[ "def", "__getitem__", "(", "self", ",", "name", ")", ":", "return", "self", ".", "dict", "[", "name", ".", "lower", "(", ")", "]" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rfc822.py#L386-L388
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBListener.AddEvent
(self, *args)
return _lldb.SBListener_AddEvent(self, *args)
AddEvent(self, SBEvent event)
AddEvent(self, SBEvent event)
[ "AddEvent", "(", "self", "SBEvent", "event", ")" ]
def AddEvent(self, *args): """AddEvent(self, SBEvent event)""" return _lldb.SBListener_AddEvent(self, *args)
[ "def", "AddEvent", "(", "self", ",", "*", "args", ")", ":", "return", "_lldb", ".", "SBListener_AddEvent", "(", "self", ",", "*", "args", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L5728-L5730
google/mozc
7329757e1ad30e327c1ae823a8302c79482d6b9c
src/prediction/gen_zero_query_number_data.py
python
ParseOption
()
return parser.parse_args()[0]
Parses command line options.
Parses command line options.
[ "Parses", "command", "line", "options", "." ]
def ParseOption(): """Parses command line options.""" parser = optparse.OptionParser() parser.add_option('--input', dest='input', help='Input file path') parser.add_option('--output_token_array', dest='output_token_array', help='Output token array file path') parser.add_option('--output_string_array', dest='output_string_array', help='Output string array file path') return parser.parse_args()[0]
[ "def", "ParseOption", "(", ")", ":", "parser", "=", "optparse", ".", "OptionParser", "(", ")", "parser", ".", "add_option", "(", "'--input'", ",", "dest", "=", "'input'", ",", "help", "=", "'Input file path'", ")", "parser", ".", "add_option", "(", "'--output_token_array'", ",", "dest", "=", "'output_token_array'", ",", "help", "=", "'Output token array file path'", ")", "parser", ".", "add_option", "(", "'--output_string_array'", ",", "dest", "=", "'output_string_array'", ",", "help", "=", "'Output string array file path'", ")", "return", "parser", ".", "parse_args", "(", ")", "[", "0", "]" ]
https://github.com/google/mozc/blob/7329757e1ad30e327c1ae823a8302c79482d6b9c/src/prediction/gen_zero_query_number_data.py#L62-L70
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/urllib.py
python
splitport
(host)
return host, None
splitport('host:port') --> 'host', 'port'.
splitport('host:port') --> 'host', 'port'.
[ "splitport", "(", "host", ":", "port", ")", "--", ">", "host", "port", "." ]
def splitport(host): """splitport('host:port') --> 'host', 'port'.""" global _portprog if _portprog is None: import re _portprog = re.compile('^(.*):([0-9]+)$') match = _portprog.match(host) if match: return match.group(1, 2) return host, None
[ "def", "splitport", "(", "host", ")", ":", "global", "_portprog", "if", "_portprog", "is", "None", ":", "import", "re", "_portprog", "=", "re", ".", "compile", "(", "'^(.*):([0-9]+)$'", ")", "match", "=", "_portprog", ".", "match", "(", "host", ")", "if", "match", ":", "return", "match", ".", "group", "(", "1", ",", "2", ")", "return", "host", ",", "None" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/urllib.py#L1090-L1099
Tencent/mars
54969ba56b402a622db123e780a4f760b38c5c36
mars/lint/cpplint.py
python
CheckLanguage
(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error)
Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
Checks rules from the 'C++ language rules' section of cppguide.html.
[ "Checks", "rules", "from", "the", "C", "++", "language", "rules", "section", "of", "cppguide", ".", "html", "." ]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) if match: include_state.ResetSection(match.group(1)) # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # Perform other checks now that we are sure that this is not an include line CheckCasts(filename, clean_lines, linenum, error) CheckGlobalStatic(filename, clean_lines, linenum, error) CheckPrintf(filename, clean_lines, linenum, error) if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes declare or disable copy/assign # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(unknown): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.')
[ "def", "CheckLanguage", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "file_extension", ",", "include_state", ",", "nesting_state", ",", "error", ")", ":", "# If the line is empty or consists of entirely a comment, no need to", "# check it.", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "not", "line", ":", "return", "match", "=", "_RE_PATTERN_INCLUDE", ".", "search", "(", "line", ")", "if", "match", ":", "CheckIncludeLine", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "include_state", ",", "error", ")", "return", "# Reset include state across preprocessor directives. This is meant", "# to silence warnings for conditional includes.", "match", "=", "Match", "(", "r'^\\s*#\\s*(if|ifdef|ifndef|elif|else|endif)\\b'", ",", "line", ")", "if", "match", ":", "include_state", ".", "ResetSection", "(", "match", ".", "group", "(", "1", ")", ")", "# Make Windows paths like Unix.", "fullname", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# Perform other checks now that we are sure that this is not an include line", "CheckCasts", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckGlobalStatic", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "CheckPrintf", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "if", "file_extension", "==", "'h'", ":", "# TODO(unknown): check that 1-arg constructors are explicit.", "# How to tell it's a constructor?", "# (handled in CheckForNonStandardConstructs for now)", "# TODO(unknown): check that classes declare or disable copy/assign", "# (level 1 error)", "pass", "# Check if people are using the verboten C basic types. The only exception", "# we regularly allow is \"unsigned short port\" for port.", "if", "Search", "(", "r'\\bshort port\\b'", ",", "line", ")", ":", "if", "not", "Search", "(", "r'\\bunsigned short port\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/int'", ",", "4", ",", "'Use \"unsigned short\" for ports, not \"short\"'", ")", "else", ":", "match", "=", "Search", "(", "r'\\b(short|long(?! +double)|long long)\\b'", ",", "line", ")", "if", "match", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/int'", ",", "4", ",", "'Use int16/int64/etc, rather than the C type %s'", "%", "match", ".", "group", "(", "1", ")", ")", "# Check if some verboten operator overloading is going on", "# TODO(unknown): catch out-of-line unary operator&:", "# class X {};", "# int operator&(const X& x) { return 42; } // unary operator&", "# The trick is it's hard to tell apart from binary operator&:", "# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&", "if", "Search", "(", "r'\\boperator\\s*&\\s*\\(\\s*\\)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/operator'", ",", "4", ",", "'Unary operator& is dangerous. Do not use it.'", ")", "# Check for suspicious usage of \"if\" like", "# } if (a == b) {", "if", "Search", "(", "r'\\}\\s*if\\s*\\('", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/braces'", ",", "4", ",", "'Did you mean \"else if\"? If not, start a new line for \"if\".'", ")", "# Check for potential format string bugs like printf(foo).", "# We constrain the pattern not to pick things like DocidForPrintf(foo).", "# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())", "# TODO(unknown): Catch the following case. Need to change the calling", "# convention of the whole function to process multiple line to handle it.", "# printf(", "# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);", "printf_args", "=", "_GetTextInside", "(", "line", ",", "r'(?i)\\b(string)?printf\\s*\\('", ")", "if", "printf_args", ":", "match", "=", "Match", "(", "r'([\\w.\\->()]+)$'", ",", "printf_args", ")", "if", "match", "and", "match", ".", "group", "(", "1", ")", "!=", "'__VA_ARGS__'", ":", "function_name", "=", "re", ".", "search", "(", "r'\\b((?:string)?printf)\\s*\\('", ",", "line", ",", "re", ".", "I", ")", ".", "group", "(", "1", ")", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "4", ",", "'Potential format string bug. Do %s(\"%%s\", %s) instead.'", "%", "(", "function_name", ",", "match", ".", "group", "(", "1", ")", ")", ")", "# Check for potential memset bugs like memset(buf, sizeof(buf), 0).", "match", "=", "Search", "(", "r'memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)'", ",", "line", ")", "if", "match", "and", "not", "Match", "(", "r\"^''|-?[0-9]+|0x[0-9A-Fa-f]$\"", ",", "match", ".", "group", "(", "2", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/memset'", ",", "4", ",", "'Did you mean \"memset(%s, 0, %s)\"?'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "if", "Search", "(", "r'\\busing namespace\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/namespaces'", ",", "5", ",", "'Do not use namespace using-directives. '", "'Use using-declarations instead.'", ")", "# Detect variable-length arrays.", "match", "=", "Match", "(", "r'\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];'", ",", "line", ")", "if", "(", "match", "and", "match", ".", "group", "(", "2", ")", "!=", "'return'", "and", "match", ".", "group", "(", "2", ")", "!=", "'delete'", "and", "match", ".", "group", "(", "3", ")", ".", "find", "(", "']'", ")", "==", "-", "1", ")", ":", "# Split the size using space and arithmetic operators as delimiters.", "# If any of the resulting tokens are not compile time constants then", "# report the error.", "tokens", "=", "re", ".", "split", "(", "r'\\s|\\+|\\-|\\*|\\/|<<|>>]'", ",", "match", ".", "group", "(", "3", ")", ")", "is_const", "=", "True", "skip_next", "=", "False", "for", "tok", "in", "tokens", ":", "if", "skip_next", ":", "skip_next", "=", "False", "continue", "if", "Search", "(", "r'sizeof\\(.+\\)'", ",", "tok", ")", ":", "continue", "if", "Search", "(", "r'arraysize\\(\\w+\\)'", ",", "tok", ")", ":", "continue", "tok", "=", "tok", ".", "lstrip", "(", "'('", ")", "tok", "=", "tok", ".", "rstrip", "(", "')'", ")", "if", "not", "tok", ":", "continue", "if", "Match", "(", "r'\\d+'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'0[xX][0-9a-fA-F]+'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'k[A-Z0-9]\\w*'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'(.+::)?k[A-Z0-9]\\w*'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'(.+::)?[A-Z][A-Z0-9_]*'", ",", "tok", ")", ":", "continue", "# A catch all for tricky sizeof cases, including 'sizeof expression',", "# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'", "# requires skipping the next token because we split on ' ' and '*'.", "if", "tok", ".", "startswith", "(", "'sizeof'", ")", ":", "skip_next", "=", "True", "continue", "is_const", "=", "False", "break", "if", "not", "is_const", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/arrays'", ",", "1", ",", "'Do not use variable-length arrays. Use an appropriately named '", "\"('k' followed by CamelCase) compile-time constant for the size.\"", ")", "# Check for use of unnamed namespaces in header files. Registration", "# macros are typically OK, so we allow use of \"namespace {\" on lines", "# that end with backslashes.", "if", "(", "file_extension", "==", "'h'", "and", "Search", "(", "r'\\bnamespace\\s*{'", ",", "line", ")", "and", "line", "[", "-", "1", "]", "!=", "'\\\\'", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/namespaces'", ",", "4", ",", "'Do not use unnamed namespaces in header files. See '", "'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'", "' for more information.'", ")" ]
https://github.com/Tencent/mars/blob/54969ba56b402a622db123e780a4f760b38c5c36/mars/lint/cpplint.py#L4766-L4921
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py
python
_FetchRemapping
(type_name, mapping_type, python_name=None, json_name=None, mappings=None)
return None
Common code for fetching a key or value from a remapping dict.
Common code for fetching a key or value from a remapping dict.
[ "Common", "code", "for", "fetching", "a", "key", "or", "value", "from", "a", "remapping", "dict", "." ]
def _FetchRemapping(type_name, mapping_type, python_name=None, json_name=None, mappings=None): """Common code for fetching a key or value from a remapping dict.""" if python_name and json_name: raise exceptions.InvalidDataError( 'Cannot specify both python_name and json_name ' 'for %s remapping' % mapping_type) if not (python_name or json_name): raise exceptions.InvalidDataError( 'Must specify either python_name or json_name for %s remapping' % ( mapping_type,)) field_remappings = mappings.get(type_name, {}) if field_remappings: if python_name: return field_remappings.get(python_name) elif json_name: if json_name in list(field_remappings.values()): return [k for k in field_remappings if field_remappings[k] == json_name][0] return None
[ "def", "_FetchRemapping", "(", "type_name", ",", "mapping_type", ",", "python_name", "=", "None", ",", "json_name", "=", "None", ",", "mappings", "=", "None", ")", ":", "if", "python_name", "and", "json_name", ":", "raise", "exceptions", ".", "InvalidDataError", "(", "'Cannot specify both python_name and json_name '", "'for %s remapping'", "%", "mapping_type", ")", "if", "not", "(", "python_name", "or", "json_name", ")", ":", "raise", "exceptions", ".", "InvalidDataError", "(", "'Must specify either python_name or json_name for %s remapping'", "%", "(", "mapping_type", ",", ")", ")", "field_remappings", "=", "mappings", ".", "get", "(", "type_name", ",", "{", "}", ")", "if", "field_remappings", ":", "if", "python_name", ":", "return", "field_remappings", ".", "get", "(", "python_name", ")", "elif", "json_name", ":", "if", "json_name", "in", "list", "(", "field_remappings", ".", "values", "(", ")", ")", ":", "return", "[", "k", "for", "k", "in", "field_remappings", "if", "field_remappings", "[", "k", "]", "==", "json_name", "]", "[", "0", "]", "return", "None" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py#L624-L643
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/misc_util.py
python
mingw32
()
return False
Return true when using mingw32 environment.
Return true when using mingw32 environment.
[ "Return", "true", "when", "using", "mingw32", "environment", "." ]
def mingw32(): """Return true when using mingw32 environment. """ if sys.platform=='win32': if os.environ.get('OSTYPE', '')=='msys': return True if os.environ.get('MSYSTEM', '')=='MINGW32': return True return False
[ "def", "mingw32", "(", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "if", "os", ".", "environ", ".", "get", "(", "'OSTYPE'", ",", "''", ")", "==", "'msys'", ":", "return", "True", "if", "os", ".", "environ", ".", "get", "(", "'MSYSTEM'", ",", "''", ")", "==", "'MINGW32'", ":", "return", "True", "return", "False" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/misc_util.py#L391-L399
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/fluid/layers/nn.py
python
deformable_conv
(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None)
return output
r""" :api_attr: Static Graph **Deformable Convolution op** Compute 2-D deformable convolution on 4-D input. Given input image x, output feature map y, the deformable convolution operation can be expressed as follow: Deformable Convolution v2: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k} Deformable Convolution v1: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)} Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location, Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_. Example: - Input: Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` - Output: Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 Args: input (Variable): The input image with [N, C, H, W] format. A Tensor with type float32, float64. offset (Variable): The input coordinate offset of deformable convolution layer. A Tensor with type float32, float64. Mask (Variable, Optional): The input mask of deformable convolution layer. A Tensor with type float32, float64. It should be None when you use deformable convolution v1. num_filters(int): The number of filter. It is as same as the output image channel. filter_size (int|tuple): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square. stride (int|tuple): The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1. padding (int|tuple): The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0. dilation (int|tuple): The dilation size. If dilation is a tuple, it must contain two integers, (dilation_H, dilation_W). Otherwise, the dilation_H = dilation_W = dilation. Default: dilation = 1. groups (int): The groups number of the deformable conv layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1. deformable_groups (int): The number of deformable group partitions. Default: deformable_groups = 1. im2col_step (int): Maximum number of images per im2col computation; The total batch size should be devisable by this value or smaller than this value; if you face out of memory problem, you can try to use a smaller value here. Default: im2col_step = 64. param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights of deformable conv. If it is set to None or one attribute of ParamAttr, deformable conv will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of deformable conv layer. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv2d will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \ used while True. Default: True. name(str, Optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: Variable: The tensor variable storing the deformable convolution \ result. A Tensor with type float32, float64. Raises: ValueError: If the shapes of input, filter_size, stride, padding and groups mismatch. Examples: .. code-block:: python #deformable conv v2: import paddle.fluid as fluid import paddle paddle.enable_static() C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask, num_filters=2, filter_size=filter_size, padding=1, modulated=True) #deformable conv v1: import paddle.fluid as fluid C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None, num_filters=2, filter_size=filter_size, padding=1, modulated=False)
r""" :api_attr: Static Graph
[ "r", ":", "api_attr", ":", "Static", "Graph" ]
def deformable_conv(input, offset, mask, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=None, deformable_groups=None, im2col_step=None, param_attr=None, bias_attr=None, modulated=True, name=None): r""" :api_attr: Static Graph **Deformable Convolution op** Compute 2-D deformable convolution on 4-D input. Given input image x, output feature map y, the deformable convolution operation can be expressed as follow: Deformable Convolution v2: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k} Deformable Convolution v1: .. math:: y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)} Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location, Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_. Example: - Input: Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})` Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})` - Output: Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 Args: input (Variable): The input image with [N, C, H, W] format. A Tensor with type float32, float64. offset (Variable): The input coordinate offset of deformable convolution layer. A Tensor with type float32, float64. Mask (Variable, Optional): The input mask of deformable convolution layer. A Tensor with type float32, float64. It should be None when you use deformable convolution v1. num_filters(int): The number of filter. It is as same as the output image channel. filter_size (int|tuple): The filter size. If filter_size is a tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, the filter will be a square. stride (int|tuple): The stride size. If stride is a tuple, it must contain two integers, (stride_H, stride_W). Otherwise, the stride_H = stride_W = stride. Default: stride = 1. padding (int|tuple): The padding size. If padding is a tuple, it must contain two integers, (padding_H, padding_W). Otherwise, the padding_H = padding_W = padding. Default: padding = 0. dilation (int|tuple): The dilation size. If dilation is a tuple, it must contain two integers, (dilation_H, dilation_W). Otherwise, the dilation_H = dilation_W = dilation. Default: dilation = 1. groups (int): The groups number of the deformable conv layer. According to grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2, the first half of the filters is only connected to the first half of the input channels, while the second half of the filters is only connected to the second half of the input channels. Default: groups=1. deformable_groups (int): The number of deformable group partitions. Default: deformable_groups = 1. im2col_step (int): Maximum number of images per im2col computation; The total batch size should be devisable by this value or smaller than this value; if you face out of memory problem, you can try to use a smaller value here. Default: im2col_step = 64. param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights of deformable conv. If it is set to None or one attribute of ParamAttr, deformable conv will create ParamAttr as param_attr. If the Initializer of the param_attr is not set, the parameter is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None. bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of deformable conv layer. If it is set to False, no bias will be added to the output units. If it is set to None or one attribute of ParamAttr, conv2d will create ParamAttr as bias_attr. If the Initializer of the bias_attr is not set, the bias is initialized zero. Default: None. modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \ used while True. Default: True. name(str, Optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None. Returns: Variable: The tensor variable storing the deformable convolution \ result. A Tensor with type float32, float64. Raises: ValueError: If the shapes of input, filter_size, stride, padding and groups mismatch. Examples: .. code-block:: python #deformable conv v2: import paddle.fluid as fluid import paddle paddle.enable_static() C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask, num_filters=2, filter_size=filter_size, padding=1, modulated=True) #deformable conv v1: import paddle.fluid as fluid C_in, H_in, W_in = 3, 32, 32 filter_size, deformable_groups = 3, 1 data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None, num_filters=2, filter_size=filter_size, padding=1, modulated=False) """ check_variable_and_dtype(input, "input", ['float32', 'float64'], 'deformable_conv') check_variable_and_dtype(offset, "offset", ['float32', 'float64'], 'deformable_conv') check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv') num_channels = input.shape[1] assert param_attr is not False, "param_attr should not be False here." helper = LayerHelper('deformable_conv', **locals()) dtype = helper.input_dtype() if not isinstance(input, Variable): raise TypeError("Input of deformable_conv must be Variable") if not isinstance(offset, Variable): raise TypeError("Input Offset of deformable_conv must be Variable") if groups is None: num_filter_channels = num_channels else: if num_channels % groups != 0: raise ValueError("num_channels must be divisible by groups.") num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') padding = utils.convert_to_list(padding, 2, 'padding') dilation = utils.convert_to_list(dilation, 2, 'dilation') input_shape = input.shape filter_shape = [num_filters, int(num_filter_channels)] + filter_size def _get_default_param_initializer(): filter_elem_num = filter_size[0] * filter_size[1] * num_channels if filter_elem_num <= 0: raise ValueError( "Invalid filter number, excepted number is larger than 0, but" " received {}, please check the input shape and " "filter size.".format(filter_elem_num)) std = (2.0 / filter_elem_num)**0.5 return Normal(0.0, std, 0) filter_param = helper.create_parameter( attr=helper.param_attr, shape=filter_shape, dtype=dtype, default_initializer=_get_default_param_initializer()) pre_bias = helper.create_variable_for_type_inference(dtype) if modulated: helper.append_op( type='deformable_conv', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, 'Mask': mask, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) else: helper.append_op( type='deformable_conv_v1', inputs={ 'Input': input, 'Filter': filter_param, 'Offset': offset, }, outputs={"Output": pre_bias}, attrs={ 'strides': stride, 'paddings': padding, 'dilations': dilation, 'groups': groups, 'deformable_groups': deformable_groups, 'im2col_step': im2col_step, }) output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) return output
[ "def", "deformable_conv", "(", "input", ",", "offset", ",", "mask", ",", "num_filters", ",", "filter_size", ",", "stride", "=", "1", ",", "padding", "=", "0", ",", "dilation", "=", "1", ",", "groups", "=", "None", ",", "deformable_groups", "=", "None", ",", "im2col_step", "=", "None", ",", "param_attr", "=", "None", ",", "bias_attr", "=", "None", ",", "modulated", "=", "True", ",", "name", "=", "None", ")", ":", "check_variable_and_dtype", "(", "input", ",", "\"input\"", ",", "[", "'float32'", ",", "'float64'", "]", ",", "'deformable_conv'", ")", "check_variable_and_dtype", "(", "offset", ",", "\"offset\"", ",", "[", "'float32'", ",", "'float64'", "]", ",", "'deformable_conv'", ")", "check_type", "(", "mask", ",", "'mask'", ",", "(", "Variable", ",", "type", "(", "None", ")", ")", ",", "'deformable_conv'", ")", "num_channels", "=", "input", ".", "shape", "[", "1", "]", "assert", "param_attr", "is", "not", "False", ",", "\"param_attr should not be False here.\"", "helper", "=", "LayerHelper", "(", "'deformable_conv'", ",", "*", "*", "locals", "(", ")", ")", "dtype", "=", "helper", ".", "input_dtype", "(", ")", "if", "not", "isinstance", "(", "input", ",", "Variable", ")", ":", "raise", "TypeError", "(", "\"Input of deformable_conv must be Variable\"", ")", "if", "not", "isinstance", "(", "offset", ",", "Variable", ")", ":", "raise", "TypeError", "(", "\"Input Offset of deformable_conv must be Variable\"", ")", "if", "groups", "is", "None", ":", "num_filter_channels", "=", "num_channels", "else", ":", "if", "num_channels", "%", "groups", "!=", "0", ":", "raise", "ValueError", "(", "\"num_channels must be divisible by groups.\"", ")", "num_filter_channels", "=", "num_channels", "//", "groups", "filter_size", "=", "utils", ".", "convert_to_list", "(", "filter_size", ",", "2", ",", "'filter_size'", ")", "stride", "=", "utils", ".", "convert_to_list", "(", "stride", ",", "2", ",", "'stride'", ")", "padding", "=", "utils", ".", "convert_to_list", "(", "padding", ",", "2", ",", "'padding'", ")", "dilation", "=", "utils", ".", "convert_to_list", "(", "dilation", ",", "2", ",", "'dilation'", ")", "input_shape", "=", "input", ".", "shape", "filter_shape", "=", "[", "num_filters", ",", "int", "(", "num_filter_channels", ")", "]", "+", "filter_size", "def", "_get_default_param_initializer", "(", ")", ":", "filter_elem_num", "=", "filter_size", "[", "0", "]", "*", "filter_size", "[", "1", "]", "*", "num_channels", "if", "filter_elem_num", "<=", "0", ":", "raise", "ValueError", "(", "\"Invalid filter number, excepted number is larger than 0, but\"", "\" received {}, please check the input shape and \"", "\"filter size.\"", ".", "format", "(", "filter_elem_num", ")", ")", "std", "=", "(", "2.0", "/", "filter_elem_num", ")", "**", "0.5", "return", "Normal", "(", "0.0", ",", "std", ",", "0", ")", "filter_param", "=", "helper", ".", "create_parameter", "(", "attr", "=", "helper", ".", "param_attr", ",", "shape", "=", "filter_shape", ",", "dtype", "=", "dtype", ",", "default_initializer", "=", "_get_default_param_initializer", "(", ")", ")", "pre_bias", "=", "helper", ".", "create_variable_for_type_inference", "(", "dtype", ")", "if", "modulated", ":", "helper", ".", "append_op", "(", "type", "=", "'deformable_conv'", ",", "inputs", "=", "{", "'Input'", ":", "input", ",", "'Filter'", ":", "filter_param", ",", "'Offset'", ":", "offset", ",", "'Mask'", ":", "mask", ",", "}", ",", "outputs", "=", "{", "\"Output\"", ":", "pre_bias", "}", ",", "attrs", "=", "{", "'strides'", ":", "stride", ",", "'paddings'", ":", "padding", ",", "'dilations'", ":", "dilation", ",", "'groups'", ":", "groups", ",", "'deformable_groups'", ":", "deformable_groups", ",", "'im2col_step'", ":", "im2col_step", ",", "}", ")", "else", ":", "helper", ".", "append_op", "(", "type", "=", "'deformable_conv_v1'", ",", "inputs", "=", "{", "'Input'", ":", "input", ",", "'Filter'", ":", "filter_param", ",", "'Offset'", ":", "offset", ",", "}", ",", "outputs", "=", "{", "\"Output\"", ":", "pre_bias", "}", ",", "attrs", "=", "{", "'strides'", ":", "stride", ",", "'paddings'", ":", "padding", ",", "'dilations'", ":", "dilation", ",", "'groups'", ":", "groups", ",", "'deformable_groups'", ":", "deformable_groups", ",", "'im2col_step'", ":", "im2col_step", ",", "}", ")", "output", "=", "helper", ".", "append_bias_op", "(", "pre_bias", ",", "dim_start", "=", "1", ",", "dim_end", "=", "2", ")", "return", "output" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/layers/nn.py#L14469-L14702
sailing-pmls/bosen
06cb58902d011fbea5f9428f10ce30e621492204
style_script/cpplint.py
python
_ClassifyInclude
(fileinfo, include, is_system)
return _OTHER_HEADER
Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER
Figures out what kind of header 'include' is.
[ "Figures", "out", "what", "kind", "of", "header", "include", "is", "." ]
def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) if target_base == include_base and ( include_dir == target_dir or include_dir == os.path.normpath(target_dir + '/../public')): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER
[ "def", "_ClassifyInclude", "(", "fileinfo", ",", "include", ",", "is_system", ")", ":", "# This is a list of all standard c++ header files, except", "# those already checked for above.", "is_cpp_h", "=", "include", "in", "_CPP_HEADERS", "if", "is_system", ":", "if", "is_cpp_h", ":", "return", "_CPP_SYS_HEADER", "else", ":", "return", "_C_SYS_HEADER", "# If the target file and the include we're checking share a", "# basename when we drop common extensions, and the include", "# lives in . , then it's likely to be owned by the target file.", "target_dir", ",", "target_base", "=", "(", "os", ".", "path", ".", "split", "(", "_DropCommonSuffixes", "(", "fileinfo", ".", "RepositoryName", "(", ")", ")", ")", ")", "include_dir", ",", "include_base", "=", "os", ".", "path", ".", "split", "(", "_DropCommonSuffixes", "(", "include", ")", ")", "if", "target_base", "==", "include_base", "and", "(", "include_dir", "==", "target_dir", "or", "include_dir", "==", "os", ".", "path", ".", "normpath", "(", "target_dir", "+", "'/../public'", ")", ")", ":", "return", "_LIKELY_MY_HEADER", "# If the target and include share some initial basename", "# component, it's possible the target is implementing the", "# include, so it's allowed to be first, but we'll never", "# complain if it's not there.", "target_first_component", "=", "_RE_FIRST_COMPONENT", ".", "match", "(", "target_base", ")", "include_first_component", "=", "_RE_FIRST_COMPONENT", ".", "match", "(", "include_base", ")", "if", "(", "target_first_component", "and", "include_first_component", "and", "target_first_component", ".", "group", "(", "0", ")", "==", "include_first_component", ".", "group", "(", "0", ")", ")", ":", "return", "_POSSIBLE_MY_HEADER", "return", "_OTHER_HEADER" ]
https://github.com/sailing-pmls/bosen/blob/06cb58902d011fbea5f9428f10ce30e621492204/style_script/cpplint.py#L4545-L4601
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/richtext.py
python
RichTextCtrl.EndFontSize
(*args, **kwargs)
return _richtext.RichTextCtrl_EndFontSize(*args, **kwargs)
EndFontSize(self) -> bool End using point size
EndFontSize(self) -> bool
[ "EndFontSize", "(", "self", ")", "-", ">", "bool" ]
def EndFontSize(*args, **kwargs): """ EndFontSize(self) -> bool End using point size """ return _richtext.RichTextCtrl_EndFontSize(*args, **kwargs)
[ "def", "EndFontSize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextCtrl_EndFontSize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L3391-L3397
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/keras/utils/tf_inspect.py
python
getmro
(cls)
return _inspect.getmro(cls)
TFDecorator-aware replacement for inspect.getmro.
TFDecorator-aware replacement for inspect.getmro.
[ "TFDecorator", "-", "aware", "replacement", "for", "inspect", ".", "getmro", "." ]
def getmro(cls): """TFDecorator-aware replacement for inspect.getmro.""" return _inspect.getmro(cls)
[ "def", "getmro", "(", "cls", ")", ":", "return", "_inspect", ".", "getmro", "(", "cls", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/keras/utils/tf_inspect.py#L335-L337
forkineye/ESPixelStick
22926f1c0d1131f1369fc7cad405689a095ae3cb
dist/bin/esptool/esptool.py
python
esp8266_crc32
(data)
CRC32 algorithm used by 8266 SDK bootloader (and gen_appbin.py).
CRC32 algorithm used by 8266 SDK bootloader (and gen_appbin.py).
[ "CRC32", "algorithm", "used", "by", "8266", "SDK", "bootloader", "(", "and", "gen_appbin", ".", "py", ")", "." ]
def esp8266_crc32(data): """ CRC32 algorithm used by 8266 SDK bootloader (and gen_appbin.py). """ crc = binascii.crc32(data, 0) & 0xFFFFFFFF if crc & 0x80000000: return crc ^ 0xFFFFFFFF else: return crc + 1
[ "def", "esp8266_crc32", "(", "data", ")", ":", "crc", "=", "binascii", ".", "crc32", "(", "data", ",", "0", ")", "&", "0xFFFFFFFF", "if", "crc", "&", "0x80000000", ":", "return", "crc", "^", "0xFFFFFFFF", "else", ":", "return", "crc", "+", "1" ]
https://github.com/forkineye/ESPixelStick/blob/22926f1c0d1131f1369fc7cad405689a095ae3cb/dist/bin/esptool/esptool.py#L1691-L1699
apache/incubator-mxnet
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
python/mxnet/numpy/random.py
python
shuffle
(x)
Modify a sequence in-place by shuffling its contents. This function only shuffles the array along the first axis of a multi-dimensional array. The order of sub-arrays is changed but their contents remain the same. Parameters ---------- x: ndarray The array or list to be shuffled. Examples -------- >>> arr = np.arange(10) >>> np.random.shuffle(arr) >>> arr array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random Multi-dimensional arrays are only shuffled along the first axis: >>> arr = np.arange(9).reshape((3, 3)) >>> np.random.shuffle(arr) >>> arr array([[6., 7., 8.], # random [3., 4., 5.], [0., 1., 2.]])
Modify a sequence in-place by shuffling its contents.
[ "Modify", "a", "sequence", "in", "-", "place", "by", "shuffling", "its", "contents", "." ]
def shuffle(x): """ Modify a sequence in-place by shuffling its contents. This function only shuffles the array along the first axis of a multi-dimensional array. The order of sub-arrays is changed but their contents remain the same. Parameters ---------- x: ndarray The array or list to be shuffled. Examples -------- >>> arr = np.arange(10) >>> np.random.shuffle(arr) >>> arr array([5., 1., 0., 6., 7., 3., 9., 8., 4., 2.]) # random Multi-dimensional arrays are only shuffled along the first axis: >>> arr = np.arange(9).reshape((3, 3)) >>> np.random.shuffle(arr) >>> arr array([[6., 7., 8.], # random [3., 4., 5.], [0., 1., 2.]]) """ _mx_nd_np.random.shuffle(x)
[ "def", "shuffle", "(", "x", ")", ":", "_mx_nd_np", ".", "random", ".", "shuffle", "(", "x", ")" ]
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/numpy/random.py#L768-L797
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/ros/rosunit/src/rosunit/junitxml.py
python
TestCaseResult.add_failure
(self, failure)
@param failure TestFailure
[]
def add_failure(self, failure): """ @param failure TestFailure """ self.failures.append(failure)
[ "def", "add_failure", "(", "self", ",", "failure", ")", ":", "self", ".", "failures", ".", "append", "(", "failure", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/ros/rosunit/src/rosunit/junitxml.py#L159-L163
deepmind/open_spiel
4ca53bea32bb2875c7385d215424048ae92f78c8
open_spiel/python/games/tic_tac_toe.py
python
TicTacToeState.is_terminal
(self)
return self._is_terminal
Returns True if the game is over.
Returns True if the game is over.
[ "Returns", "True", "if", "the", "game", "is", "over", "." ]
def is_terminal(self): """Returns True if the game is over.""" return self._is_terminal
[ "def", "is_terminal", "(", "self", ")", ":", "return", "self", ".", "_is_terminal" ]
https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/games/tic_tac_toe.py#L121-L123
NeoGeographyToolkit/StereoPipeline
eedf54a919fb5cce1ab0e280bb0df4050763aa11
src/asp/IceBridge/icebridge_common.py
python
updateValidFilesListFromDisk
(filesList, filesSet)
return filesSet
Update the current set of valid files with any new info from disk.
Update the current set of valid files with any new info from disk.
[ "Update", "the", "current", "set", "of", "valid", "files", "with", "any", "new", "info", "from", "disk", "." ]
def updateValidFilesListFromDisk(filesList, filesSet): '''Update the current set of valid files with any new info from disk.''' # Nothing to if not os.path.exists(filesList): return filesSet print("Reading: " + filesList) with open(filesList, 'r') as f: for line in f: line = line.strip() filesSet.add(line) return filesSet
[ "def", "updateValidFilesListFromDisk", "(", "filesList", ",", "filesSet", ")", ":", "# Nothing to ", "if", "not", "os", ".", "path", ".", "exists", "(", "filesList", ")", ":", "return", "filesSet", "print", "(", "\"Reading: \"", "+", "filesList", ")", "with", "open", "(", "filesList", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "line", "=", "line", ".", "strip", "(", ")", "filesSet", ".", "add", "(", "line", ")", "return", "filesSet" ]
https://github.com/NeoGeographyToolkit/StereoPipeline/blob/eedf54a919fb5cce1ab0e280bb0df4050763aa11/src/asp/IceBridge/icebridge_common.py#L306-L319
Tencent/CMONGO
c40380caa14e05509f46993aa8b8da966b09b0b5
src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Scanner/Prog.py
python
_subst_libs
(env, libs)
return libs
Substitute environment variables and split into list.
Substitute environment variables and split into list.
[ "Substitute", "environment", "variables", "and", "split", "into", "list", "." ]
def _subst_libs(env, libs): """ Substitute environment variables and split into list. """ if SCons.Util.is_String(libs): libs = env.subst(libs) if SCons.Util.is_String(libs): libs = libs.split() elif SCons.Util.is_Sequence(libs): _libs = [] for l in libs: _libs += _subst_libs(env, l) libs = _libs else: # libs is an object (Node, for example) libs = [libs] return libs
[ "def", "_subst_libs", "(", "env", ",", "libs", ")", ":", "if", "SCons", ".", "Util", ".", "is_String", "(", "libs", ")", ":", "libs", "=", "env", ".", "subst", "(", "libs", ")", "if", "SCons", ".", "Util", ".", "is_String", "(", "libs", ")", ":", "libs", "=", "libs", ".", "split", "(", ")", "elif", "SCons", ".", "Util", ".", "is_Sequence", "(", "libs", ")", ":", "_libs", "=", "[", "]", "for", "l", "in", "libs", ":", "_libs", "+=", "_subst_libs", "(", "env", ",", "l", ")", "libs", "=", "_libs", "else", ":", "# libs is an object (Node, for example)", "libs", "=", "[", "libs", "]", "return", "libs" ]
https://github.com/Tencent/CMONGO/blob/c40380caa14e05509f46993aa8b8da966b09b0b5/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Scanner/Prog.py#L41-L57
etotheipi/BitcoinArmory
2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98
armoryd.py
python
Armory_Json_Rpc_Server.jsonrpc_getledgersimple
(self, inB58ID, tx_count=10, from_tx=0)
return self.jsonrpc_getledger(inB58ID, tx_count, from_tx, True)
DESCRIPTION: Get a simple version of a wallet or lockbox ledger. PARAMETERS: inB58ID - The Base58 ID of the wallet or lockbox from which to obtain the ledger. The wallet or lockbox must already be loaded. tx_count - (Default=10) The number of entries to get. from_tx - (Default=0) The first entry to get. RETURN: A dictionary with a wallet ledger of type "simple".
DESCRIPTION: Get a simple version of a wallet or lockbox ledger. PARAMETERS: inB58ID - The Base58 ID of the wallet or lockbox from which to obtain the ledger. The wallet or lockbox must already be loaded. tx_count - (Default=10) The number of entries to get. from_tx - (Default=0) The first entry to get. RETURN: A dictionary with a wallet ledger of type "simple".
[ "DESCRIPTION", ":", "Get", "a", "simple", "version", "of", "a", "wallet", "or", "lockbox", "ledger", ".", "PARAMETERS", ":", "inB58ID", "-", "The", "Base58", "ID", "of", "the", "wallet", "or", "lockbox", "from", "which", "to", "obtain", "the", "ledger", ".", "The", "wallet", "or", "lockbox", "must", "already", "be", "loaded", ".", "tx_count", "-", "(", "Default", "=", "10", ")", "The", "number", "of", "entries", "to", "get", ".", "from_tx", "-", "(", "Default", "=", "0", ")", "The", "first", "entry", "to", "get", ".", "RETURN", ":", "A", "dictionary", "with", "a", "wallet", "ledger", "of", "type", "simple", "." ]
def jsonrpc_getledgersimple(self, inB58ID, tx_count=10, from_tx=0): """ DESCRIPTION: Get a simple version of a wallet or lockbox ledger. PARAMETERS: inB58ID - The Base58 ID of the wallet or lockbox from which to obtain the ledger. The wallet or lockbox must already be loaded. tx_count - (Default=10) The number of entries to get. from_tx - (Default=0) The first entry to get. RETURN: A dictionary with a wallet ledger of type "simple". """ return self.jsonrpc_getledger(inB58ID, tx_count, from_tx, True)
[ "def", "jsonrpc_getledgersimple", "(", "self", ",", "inB58ID", ",", "tx_count", "=", "10", ",", "from_tx", "=", "0", ")", ":", "return", "self", ".", "jsonrpc_getledger", "(", "inB58ID", ",", "tx_count", ",", "from_tx", ",", "True", ")" ]
https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/armoryd.py#L1209-L1222
assimp/assimp
97c7e084c2f7f8c9355ea42f73605890481bddc5
port/PyAssimp/scripts/fixed_pipeline_3d_viewer.py
python
GLRenderer.prepare_gl_buffers
(self, mesh)
Creates 3 buffer objets for each mesh, to store the vertices, the normals, and the faces indices.
Creates 3 buffer objets for each mesh, to store the vertices, the normals, and the faces indices.
[ "Creates", "3", "buffer", "objets", "for", "each", "mesh", "to", "store", "the", "vertices", "the", "normals", "and", "the", "faces", "indices", "." ]
def prepare_gl_buffers(self, mesh): """ Creates 3 buffer objets for each mesh, to store the vertices, the normals, and the faces indices. """ mesh.gl = {} # Fill the buffer for vertex positions mesh.gl["vertices"] = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["vertices"]) glBufferData(GL_ARRAY_BUFFER, mesh.vertices, GL_STATIC_DRAW) # Fill the buffer for normals mesh.gl["normals"] = glGenBuffers(1) glBindBuffer(GL_ARRAY_BUFFER, mesh.gl["normals"]) glBufferData(GL_ARRAY_BUFFER, mesh.normals, GL_STATIC_DRAW) # Fill the buffer for vertex positions mesh.gl["triangles"] = glGenBuffers(1) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mesh.gl["triangles"]) glBufferData(GL_ELEMENT_ARRAY_BUFFER, mesh.faces, GL_STATIC_DRAW) # Unbind buffers glBindBuffer(GL_ARRAY_BUFFER,0) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0)
[ "def", "prepare_gl_buffers", "(", "self", ",", "mesh", ")", ":", "mesh", ".", "gl", "=", "{", "}", "# Fill the buffer for vertex positions", "mesh", ".", "gl", "[", "\"vertices\"", "]", "=", "glGenBuffers", "(", "1", ")", "glBindBuffer", "(", "GL_ARRAY_BUFFER", ",", "mesh", ".", "gl", "[", "\"vertices\"", "]", ")", "glBufferData", "(", "GL_ARRAY_BUFFER", ",", "mesh", ".", "vertices", ",", "GL_STATIC_DRAW", ")", "# Fill the buffer for normals", "mesh", ".", "gl", "[", "\"normals\"", "]", "=", "glGenBuffers", "(", "1", ")", "glBindBuffer", "(", "GL_ARRAY_BUFFER", ",", "mesh", ".", "gl", "[", "\"normals\"", "]", ")", "glBufferData", "(", "GL_ARRAY_BUFFER", ",", "mesh", ".", "normals", ",", "GL_STATIC_DRAW", ")", "# Fill the buffer for vertex positions", "mesh", ".", "gl", "[", "\"triangles\"", "]", "=", "glGenBuffers", "(", "1", ")", "glBindBuffer", "(", "GL_ELEMENT_ARRAY_BUFFER", ",", "mesh", ".", "gl", "[", "\"triangles\"", "]", ")", "glBufferData", "(", "GL_ELEMENT_ARRAY_BUFFER", ",", "mesh", ".", "faces", ",", "GL_STATIC_DRAW", ")", "# Unbind buffers", "glBindBuffer", "(", "GL_ARRAY_BUFFER", ",", "0", ")", "glBindBuffer", "(", "GL_ELEMENT_ARRAY_BUFFER", ",", "0", ")" ]
https://github.com/assimp/assimp/blob/97c7e084c2f7f8c9355ea42f73605890481bddc5/port/PyAssimp/scripts/fixed_pipeline_3d_viewer.py#L64-L96
YunzhuLi/InfoGAIL
68058c17d9ca9d959435082f1c48cd7b637d6f40
wgail_info_0/snakeoil_gym.py
python
Client.get_servers_input
(self)
Server's input is stored in a ServerState object
Server's input is stored in a ServerState object
[ "Server", "s", "input", "is", "stored", "in", "a", "ServerState", "object" ]
def get_servers_input(self): '''Server's input is stored in a ServerState object''' if not self.so: return sockdata = str() while True: try: # Receive server data sockdata,addr= self.so.recvfrom(1024) except socket.error, emsg: print "Waiting for data .............." if '***identified***' in sockdata: print "Client connected .............." continue elif '***shutdown***' in sockdata: print "Server has stopped the race. You were in %d place." % self.S.d['racePos'] self.shutdown() return elif '***restart***' in sockdata: # What do I do here? print "Server has restarted the race." # I haven't actually caught the server doing this. self.shutdown() return elif not sockdata: # Empty? continue # Try again. else: self.S.parse_server_str(sockdata) if self.debug: print self.S break
[ "def", "get_servers_input", "(", "self", ")", ":", "if", "not", "self", ".", "so", ":", "return", "sockdata", "=", "str", "(", ")", "while", "True", ":", "try", ":", "# Receive server data ", "sockdata", ",", "addr", "=", "self", ".", "so", ".", "recvfrom", "(", "1024", ")", "except", "socket", ".", "error", ",", "emsg", ":", "print", "\"Waiting for data ..............\"", "if", "'***identified***'", "in", "sockdata", ":", "print", "\"Client connected ..............\"", "continue", "elif", "'***shutdown***'", "in", "sockdata", ":", "print", "\"Server has stopped the race. You were in %d place.\"", "%", "self", ".", "S", ".", "d", "[", "'racePos'", "]", "self", ".", "shutdown", "(", ")", "return", "elif", "'***restart***'", "in", "sockdata", ":", "# What do I do here?", "print", "\"Server has restarted the race.\"", "# I haven't actually caught the server doing this.", "self", ".", "shutdown", "(", ")", "return", "elif", "not", "sockdata", ":", "# Empty?", "continue", "# Try again.", "else", ":", "self", ".", "S", ".", "parse_server_str", "(", "sockdata", ")", "if", "self", ".", "debug", ":", "print", "self", ".", "S", "break" ]
https://github.com/YunzhuLi/InfoGAIL/blob/68058c17d9ca9d959435082f1c48cd7b637d6f40/wgail_info_0/snakeoil_gym.py#L193-L221
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/concurrent/futures/process.py
python
_chain_from_iterable_of_lists
(iterable)
Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects.
Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects.
[ "Specialized", "implementation", "of", "itertools", ".", "chain", ".", "from_iterable", ".", "Each", "item", "in", "*", "iterable", "*", "should", "be", "a", "list", ".", "This", "function", "is", "careful", "not", "to", "keep", "references", "to", "yielded", "objects", "." ]
def _chain_from_iterable_of_lists(iterable): """ Specialized implementation of itertools.chain.from_iterable. Each item in *iterable* should be a list. This function is careful not to keep references to yielded objects. """ for element in iterable: element.reverse() while element: yield element.pop()
[ "def", "_chain_from_iterable_of_lists", "(", "iterable", ")", ":", "for", "element", "in", "iterable", ":", "element", ".", "reverse", "(", ")", "while", "element", ":", "yield", "element", ".", "pop", "(", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/concurrent/futures/process.py#L553-L562
rampageX/firmware-mod-kit
c94cd6aeee50d92ec5280a6dba6d74828fd3606b
src/binwalk-2.1.1/src/binwalk/modules/hashmatch.py
python
HashMatch.hash_files
(self, needle, haystack)
Compare one file against a list of other files. Returns a list of tuple results.
Compare one file against a list of other files. Returns a list of tuple results.
[ "Compare", "one", "file", "against", "a", "list", "of", "other", "files", ".", "Returns", "a", "list", "of", "tuple", "results", "." ]
def hash_files(self, needle, haystack): ''' Compare one file against a list of other files. Returns a list of tuple results. ''' self.total = 0 for f in haystack: m = self._compare_files(needle, f) if m is not None and self.is_match(m): self._show_result(m, f) self.total += 1 if self.max_results and self.total >= self.max_results: break
[ "def", "hash_files", "(", "self", ",", "needle", ",", "haystack", ")", ":", "self", ".", "total", "=", "0", "for", "f", "in", "haystack", ":", "m", "=", "self", ".", "_compare_files", "(", "needle", ",", "f", ")", "if", "m", "is", "not", "None", "and", "self", ".", "is_match", "(", "m", ")", ":", "self", ".", "_show_result", "(", "m", ",", "f", ")", "self", ".", "total", "+=", "1", "if", "self", ".", "max_results", "and", "self", ".", "total", ">=", "self", ".", "max_results", ":", "break" ]
https://github.com/rampageX/firmware-mod-kit/blob/c94cd6aeee50d92ec5280a6dba6d74828fd3606b/src/binwalk-2.1.1/src/binwalk/modules/hashmatch.py#L233-L248
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py
python
_ValidateSourcesForMSVSProject
(spec, version)
Makes sure if duplicate basenames are not specified in the source list. Arguments: spec: The target dictionary containing the properties of the target. version: The VisualStudioVersion object.
Makes sure if duplicate basenames are not specified in the source list.
[ "Makes", "sure", "if", "duplicate", "basenames", "are", "not", "specified", "in", "the", "source", "list", "." ]
def _ValidateSourcesForMSVSProject(spec, version): """Makes sure if duplicate basenames are not specified in the source list. Arguments: spec: The target dictionary containing the properties of the target. version: The VisualStudioVersion object. """ # This validation should not be applied to MSVC2010 and later. assert not version.UsesVcxproj() # TODO: Check if MSVC allows this for loadable_module targets. if spec.get('type', None) not in ('static_library', 'shared_library'): return sources = spec.get('sources', []) basenames = {} for source in sources: name, ext = os.path.splitext(source) is_compiled_file = ext in [ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] if not is_compiled_file: continue basename = os.path.basename(name) # Don't include extension. basenames.setdefault(basename, []).append(source) error = '' for basename, files in basenames.iteritems(): if len(files) > 1: error += ' %s: %s\n' % (basename, ' '.join(files)) if error: print('static library %s has several files with the same basename:\n' % spec['target_name'] + error + 'MSVC08 cannot handle that.') raise GypError('Duplicate basenames in sources section, see list above')
[ "def", "_ValidateSourcesForMSVSProject", "(", "spec", ",", "version", ")", ":", "# This validation should not be applied to MSVC2010 and later.", "assert", "not", "version", ".", "UsesVcxproj", "(", ")", "# TODO: Check if MSVC allows this for loadable_module targets.", "if", "spec", ".", "get", "(", "'type'", ",", "None", ")", "not", "in", "(", "'static_library'", ",", "'shared_library'", ")", ":", "return", "sources", "=", "spec", ".", "get", "(", "'sources'", ",", "[", "]", ")", "basenames", "=", "{", "}", "for", "source", "in", "sources", ":", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "source", ")", "is_compiled_file", "=", "ext", "in", "[", "'.c'", ",", "'.cc'", ",", "'.cpp'", ",", "'.cxx'", ",", "'.m'", ",", "'.mm'", ",", "'.s'", ",", "'.S'", "]", "if", "not", "is_compiled_file", ":", "continue", "basename", "=", "os", ".", "path", ".", "basename", "(", "name", ")", "# Don't include extension.", "basenames", ".", "setdefault", "(", "basename", ",", "[", "]", ")", ".", "append", "(", "source", ")", "error", "=", "''", "for", "basename", ",", "files", "in", "basenames", ".", "iteritems", "(", ")", ":", "if", "len", "(", "files", ")", ">", "1", ":", "error", "+=", "' %s: %s\\n'", "%", "(", "basename", ",", "' '", ".", "join", "(", "files", ")", ")", "if", "error", ":", "print", "(", "'static library %s has several files with the same basename:\\n'", "%", "spec", "[", "'target_name'", "]", "+", "error", "+", "'MSVC08 cannot handle that.'", ")", "raise", "GypError", "(", "'Duplicate basenames in sources section, see list above'", ")" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py#L947-L979
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/receptive_field/python/util/receptive_field.py
python
_pool_kernel_size
(node)
return kernel_size_x, kernel_size_y
Computes kernel size given a TF pooling node. Args: node: Tensorflow node (NodeDef proto). Returns: kernel_size_x: Kernel size for horizontal direction (integer). kernel_size_y: Kernel size for vertical direction (integer). Raises: ValueError: If pooling is invalid.
Computes kernel size given a TF pooling node.
[ "Computes", "kernel", "size", "given", "a", "TF", "pooling", "node", "." ]
def _pool_kernel_size(node): """Computes kernel size given a TF pooling node. Args: node: Tensorflow node (NodeDef proto). Returns: kernel_size_x: Kernel size for horizontal direction (integer). kernel_size_y: Kernel size for vertical direction (integer). Raises: ValueError: If pooling is invalid. """ ksize = node.attr["ksize"] kernel_size_y = ksize.list.i[1] kernel_size_x = ksize.list.i[2] if ksize.list.i[0] != 1: raise ValueError("pool ksize for first dim is not 1") if ksize.list.i[3] != 1: raise ValueError("pool ksize for last dim is not 1") return kernel_size_x, kernel_size_y
[ "def", "_pool_kernel_size", "(", "node", ")", ":", "ksize", "=", "node", ".", "attr", "[", "\"ksize\"", "]", "kernel_size_y", "=", "ksize", ".", "list", ".", "i", "[", "1", "]", "kernel_size_x", "=", "ksize", ".", "list", ".", "i", "[", "2", "]", "if", "ksize", ".", "list", ".", "i", "[", "0", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"pool ksize for first dim is not 1\"", ")", "if", "ksize", ".", "list", ".", "i", "[", "3", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"pool ksize for last dim is not 1\"", ")", "return", "kernel_size_x", ",", "kernel_size_y" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/receptive_field/python/util/receptive_field.py#L131-L151
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
net/tools/quic/benchmark/run_client.py
python
PageloadExperiment.__init__
(self, use_wget, quic_binary_dir, quic_server_address, quic_server_port)
Initialize PageloadExperiment. Args: use_wget: Whether to use wget. quic_binary_dir: Directory for quic_binary. quic_server_address: IP address of quic server. quic_server_port: Port of the quic server.
Initialize PageloadExperiment.
[ "Initialize", "PageloadExperiment", "." ]
def __init__(self, use_wget, quic_binary_dir, quic_server_address, quic_server_port): """Initialize PageloadExperiment. Args: use_wget: Whether to use wget. quic_binary_dir: Directory for quic_binary. quic_server_address: IP address of quic server. quic_server_port: Port of the quic server. """ self.use_wget = use_wget self.quic_binary_dir = quic_binary_dir self.quic_server_address = quic_server_address self.quic_server_port = quic_server_port if not use_wget and not os.path.isfile(quic_binary_dir + '/quic_client'): raise IOError('There is no quic_client in the given dir: %s.' % quic_binary_dir)
[ "def", "__init__", "(", "self", ",", "use_wget", ",", "quic_binary_dir", ",", "quic_server_address", ",", "quic_server_port", ")", ":", "self", ".", "use_wget", "=", "use_wget", "self", ".", "quic_binary_dir", "=", "quic_binary_dir", "self", ".", "quic_server_address", "=", "quic_server_address", "self", ".", "quic_server_port", "=", "quic_server_port", "if", "not", "use_wget", "and", "not", "os", ".", "path", ".", "isfile", "(", "quic_binary_dir", "+", "'/quic_client'", ")", ":", "raise", "IOError", "(", "'There is no quic_client in the given dir: %s.'", "%", "quic_binary_dir", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/net/tools/quic/benchmark/run_client.py#L44-L60
OPAE/opae-sdk
221124343c8275243a249eb72d69e0ea2d568d1b
python/opae.admin/opae/admin/tools/fpgaotsu.py
python
otsu_manifest_loader.load_and_validate
(self)
return obj
Load and Verify contents Verify that the required keys are present. Verify that all "filename" keys correspond to files that are present on the system. Verify that all address fields are hex numbers.
Load and Verify contents
[ "Load", "and", "Verify", "contents" ]
def load_and_validate(self): """Load and Verify contents Verify that the required keys are present. Verify that all "filename" keys correspond to files that are present on the system. Verify that all address fields are hex numbers. """ try: obj = json.load(self._fp) except ValueError: msg = 'Invalid JSON format in {}'.format(self._fp.name) LOG.exception(msg) return None try: self.validate_mandatory_keys(obj) self.validate_requires_section(obj) except (KeyError, TypeError, ValueError) as exc: LOG.exception(exc) return None directory = os.path.dirname(self._fp.name) for item in obj['flash']: try: self.validate_flash_section(item, directory) except (KeyError, TypeError, OSError, ValueError) as flsh_exc: LOG.exception(flsh_exc) return None return obj
[ "def", "load_and_validate", "(", "self", ")", ":", "try", ":", "obj", "=", "json", ".", "load", "(", "self", ".", "_fp", ")", "except", "ValueError", ":", "msg", "=", "'Invalid JSON format in {}'", ".", "format", "(", "self", ".", "_fp", ".", "name", ")", "LOG", ".", "exception", "(", "msg", ")", "return", "None", "try", ":", "self", ".", "validate_mandatory_keys", "(", "obj", ")", "self", ".", "validate_requires_section", "(", "obj", ")", "except", "(", "KeyError", ",", "TypeError", ",", "ValueError", ")", "as", "exc", ":", "LOG", ".", "exception", "(", "exc", ")", "return", "None", "directory", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "_fp", ".", "name", ")", "for", "item", "in", "obj", "[", "'flash'", "]", ":", "try", ":", "self", ".", "validate_flash_section", "(", "item", ",", "directory", ")", "except", "(", "KeyError", ",", "TypeError", ",", "OSError", ",", "ValueError", ")", "as", "flsh_exc", ":", "LOG", ".", "exception", "(", "flsh_exc", ")", "return", "None", "return", "obj" ]
https://github.com/OPAE/opae-sdk/blob/221124343c8275243a249eb72d69e0ea2d568d1b/python/opae.admin/opae/admin/tools/fpgaotsu.py#L220-L250
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_aarch64/python2.7/dist-packages/geographic_msgs/msg/_GeographicMap.py
python
GeographicMap.deserialize
(self, str)
unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str``
unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str``
[ "unpack", "serialized", "message", "in", "str", "into", "this", "message", "instance", ":", "param", "str", ":", "byte", "array", "of", "serialized", "message", "str" ]
def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: if self.header is None: self.header = std_msgs.msg.Header() if self.id is None: self.id = uuid_msgs.msg.UniqueID() if self.bounds is None: self.bounds = geographic_msgs.msg.BoundingBox() if self.points is None: self.points = None if self.features is None: self.features = None if self.props is None: self.props = None end = 0 _x = self start = end end += 12 (_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.header.frame_id = str[start:end].decode('utf-8') else: self.header.frame_id = str[start:end] start = end end += 16 self.id.uuid = str[start:end] _x = self start = end end += 48 (_x.bounds.min_pt.latitude, _x.bounds.min_pt.longitude, _x.bounds.min_pt.altitude, _x.bounds.max_pt.latitude, _x.bounds.max_pt.longitude, _x.bounds.max_pt.altitude,) = _struct_6d.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.points = [] for i in range(0, length): val1 = geographic_msgs.msg.WayPoint() _v4 = val1.id start = end end += 16 _v4.uuid = str[start:end] _v5 = val1.position _x = _v5 start = end end += 24 (_x.latitude, _x.longitude, _x.altitude,) = _struct_3d.unpack(str[start:end]) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.props = [] for i in range(0, length): val2 = geographic_msgs.msg.KeyValue() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.key = str[start:end].decode('utf-8') else: val2.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.value = str[start:end].decode('utf-8') else: val2.value = str[start:end] val1.props.append(val2) self.points.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.features = [] for i in range(0, length): val1 = geographic_msgs.msg.MapFeature() _v6 = val1.id start = end end += 16 _v6.uuid = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.components = [] for i in range(0, length): val2 = uuid_msgs.msg.UniqueID() start = end end += 16 val2.uuid = str[start:end] val1.components.append(val2) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) val1.props = [] for i in range(0, length): val2 = geographic_msgs.msg.KeyValue() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.key = str[start:end].decode('utf-8') else: val2.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val2.value = str[start:end].decode('utf-8') else: val2.value = str[start:end] val1.props.append(val2) self.features.append(val1) start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) self.props = [] for i in range(0, length): val1 = geographic_msgs.msg.KeyValue() start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.key = str[start:end].decode('utf-8') else: val1.key = str[start:end] start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: val1.value = str[start:end].decode('utf-8') else: val1.value = str[start:end] self.props.append(val1) return self except struct.error as e: raise genpy.DeserializationError(e)
[ "def", "deserialize", "(", "self", ",", "str", ")", ":", "try", ":", "if", "self", ".", "header", "is", "None", ":", "self", ".", "header", "=", "std_msgs", ".", "msg", ".", "Header", "(", ")", "if", "self", ".", "id", "is", "None", ":", "self", ".", "id", "=", "uuid_msgs", ".", "msg", ".", "UniqueID", "(", ")", "if", "self", ".", "bounds", "is", "None", ":", "self", ".", "bounds", "=", "geographic_msgs", ".", "msg", ".", "BoundingBox", "(", ")", "if", "self", ".", "points", "is", "None", ":", "self", ".", "points", "=", "None", "if", "self", ".", "features", "is", "None", ":", "self", ".", "features", "=", "None", "if", "self", ".", "props", "is", "None", ":", "self", ".", "props", "=", "None", "end", "=", "0", "_x", "=", "self", "start", "=", "end", "end", "+=", "12", "(", "_x", ".", "header", ".", "seq", ",", "_x", ".", "header", ".", "stamp", ".", "secs", ",", "_x", ".", "header", ".", "stamp", ".", "nsecs", ",", ")", "=", "_struct_3I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "self", ".", "header", ".", "frame_id", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "self", ".", "header", ".", "frame_id", "=", "str", "[", "start", ":", "end", "]", "start", "=", "end", "end", "+=", "16", "self", ".", "id", ".", "uuid", "=", "str", "[", "start", ":", "end", "]", "_x", "=", "self", "start", "=", "end", "end", "+=", "48", "(", "_x", ".", "bounds", ".", "min_pt", ".", "latitude", ",", "_x", ".", "bounds", ".", "min_pt", ".", "longitude", ",", "_x", ".", "bounds", ".", "min_pt", ".", "altitude", ",", "_x", ".", "bounds", ".", "max_pt", ".", "latitude", ",", "_x", ".", "bounds", ".", "max_pt", ".", "longitude", ",", "_x", ".", "bounds", ".", "max_pt", ".", "altitude", ",", ")", "=", "_struct_6d", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "self", ".", "points", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "val1", "=", "geographic_msgs", ".", "msg", ".", "WayPoint", "(", ")", "_v4", "=", "val1", ".", "id", "start", "=", "end", "end", "+=", "16", "_v4", ".", "uuid", "=", "str", "[", "start", ":", "end", "]", "_v5", "=", "val1", ".", "position", "_x", "=", "_v5", "start", "=", "end", "end", "+=", "24", "(", "_x", ".", "latitude", ",", "_x", ".", "longitude", ",", "_x", ".", "altitude", ",", ")", "=", "_struct_3d", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "val1", ".", "props", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "val2", "=", "geographic_msgs", ".", "msg", ".", "KeyValue", "(", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "val2", ".", "key", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "val2", ".", "key", "=", "str", "[", "start", ":", "end", "]", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "val2", ".", "value", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "val2", ".", "value", "=", "str", "[", "start", ":", "end", "]", "val1", ".", "props", ".", "append", "(", "val2", ")", "self", ".", "points", ".", "append", "(", "val1", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "self", ".", "features", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "val1", "=", "geographic_msgs", ".", "msg", ".", "MapFeature", "(", ")", "_v6", "=", "val1", ".", "id", "start", "=", "end", "end", "+=", "16", "_v6", ".", "uuid", "=", "str", "[", "start", ":", "end", "]", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "val1", ".", "components", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "val2", "=", "uuid_msgs", ".", "msg", ".", "UniqueID", "(", ")", "start", "=", "end", "end", "+=", "16", "val2", ".", "uuid", "=", "str", "[", "start", ":", "end", "]", "val1", ".", "components", ".", "append", "(", "val2", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "val1", ".", "props", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "val2", "=", "geographic_msgs", ".", "msg", ".", "KeyValue", "(", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "val2", ".", "key", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "val2", ".", "key", "=", "str", "[", "start", ":", "end", "]", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "val2", ".", "value", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "val2", ".", "value", "=", "str", "[", "start", ":", "end", "]", "val1", ".", "props", ".", "append", "(", "val2", ")", "self", ".", "features", ".", "append", "(", "val1", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "self", ".", "props", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "length", ")", ":", "val1", "=", "geographic_msgs", ".", "msg", ".", "KeyValue", "(", ")", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "val1", ".", "key", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "val1", ".", "key", "=", "str", "[", "start", ":", "end", "]", "start", "=", "end", "end", "+=", "4", "(", "length", ",", ")", "=", "_struct_I", ".", "unpack", "(", "str", "[", "start", ":", "end", "]", ")", "start", "=", "end", "end", "+=", "length", "if", "python3", ":", "val1", ".", "value", "=", "str", "[", "start", ":", "end", "]", ".", "decode", "(", "'utf-8'", ")", "else", ":", "val1", ".", "value", "=", "str", "[", "start", ":", "end", "]", "self", ".", "props", ".", "append", "(", "val1", ")", "return", "self", "except", "struct", ".", "error", "as", "e", ":", "raise", "genpy", ".", "DeserializationError", "(", "e", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_aarch64/python2.7/dist-packages/geographic_msgs/msg/_GeographicMap.py#L289-L442
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/config.py
python
_get_option
(target_obj, key)
return getter()
Given a target object and option key, get that option from the target object, either through a get_{key} method or from an attribute directly.
Given a target object and option key, get that option from the target object, either through a get_{key} method or from an attribute directly.
[ "Given", "a", "target", "object", "and", "option", "key", "get", "that", "option", "from", "the", "target", "object", "either", "through", "a", "get_", "{", "key", "}", "method", "or", "from", "an", "attribute", "directly", "." ]
def _get_option(target_obj, key): """ Given a target object and option key, get that option from the target object, either through a get_{key} method or from an attribute directly. """ getter_name = 'get_{key}'.format(**locals()) by_attribute = functools.partial(getattr, target_obj, key) getter = getattr(target_obj, getter_name, by_attribute) return getter()
[ "def", "_get_option", "(", "target_obj", ",", "key", ")", ":", "getter_name", "=", "'get_{key}'", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "by_attribute", "=", "functools", ".", "partial", "(", "getattr", ",", "target_obj", ",", "key", ")", "getter", "=", "getattr", "(", "target_obj", ",", "getter_name", ",", "by_attribute", ")", "return", "getter", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/setuptools/config.py#L109-L118
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/_extends/graph_kernel/expanders/square_sum_all.py
python
SquareSumAll._expand
(self, graph_builder)
return result0, result1
do expand
do expand
[ "do", "expand" ]
def _expand(self, graph_builder): """do expand""" x0 = self.inputs[0] x1 = self.inputs[1] ori_shape = x0.shape axis = [] for i, _ in enumerate(ori_shape): axis.append(i) square_res0 = graph_builder.emit('Mul', [x0, x0]) square_res1 = graph_builder.emit('Mul', [x1, x1]) result0 = graph_builder.emit('ReduceSum', [square_res0], attrs={'reduce_axis': axis, 'keep_dims': False}) result1 = graph_builder.emit('ReduceSum', [square_res1], attrs={'reduce_axis': axis, 'keep_dims': False}) return result0, result1
[ "def", "_expand", "(", "self", ",", "graph_builder", ")", ":", "x0", "=", "self", ".", "inputs", "[", "0", "]", "x1", "=", "self", ".", "inputs", "[", "1", "]", "ori_shape", "=", "x0", ".", "shape", "axis", "=", "[", "]", "for", "i", ",", "_", "in", "enumerate", "(", "ori_shape", ")", ":", "axis", ".", "append", "(", "i", ")", "square_res0", "=", "graph_builder", ".", "emit", "(", "'Mul'", ",", "[", "x0", ",", "x0", "]", ")", "square_res1", "=", "graph_builder", ".", "emit", "(", "'Mul'", ",", "[", "x1", ",", "x1", "]", ")", "result0", "=", "graph_builder", ".", "emit", "(", "'ReduceSum'", ",", "[", "square_res0", "]", ",", "attrs", "=", "{", "'reduce_axis'", ":", "axis", ",", "'keep_dims'", ":", "False", "}", ")", "result1", "=", "graph_builder", ".", "emit", "(", "'ReduceSum'", ",", "[", "square_res1", "]", ",", "attrs", "=", "{", "'reduce_axis'", ":", "axis", ",", "'keep_dims'", ":", "False", "}", ")", "return", "result0", ",", "result1" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/_extends/graph_kernel/expanders/square_sum_all.py#L28-L43
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/attr/_make.py
python
fields_dict
(cls)
return ordered_dict(((a.name, a) for a in attrs))
Return an ordered dictionary of ``attrs`` attributes for a class, whose keys are the attribute names. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` class. :rtype: an ordered dict where keys are attribute names and values are `attr.Attribute`\\ s. This will be a `dict` if it's naturally ordered like on Python 3.6+ or an :class:`~collections.OrderedDict` otherwise. .. versionadded:: 18.1.0
Return an ordered dictionary of ``attrs`` attributes for a class, whose keys are the attribute names.
[ "Return", "an", "ordered", "dictionary", "of", "attrs", "attributes", "for", "a", "class", "whose", "keys", "are", "the", "attribute", "names", "." ]
def fields_dict(cls): """ Return an ordered dictionary of ``attrs`` attributes for a class, whose keys are the attribute names. :param type cls: Class to introspect. :raise TypeError: If *cls* is not a class. :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` class. :rtype: an ordered dict where keys are attribute names and values are `attr.Attribute`\\ s. This will be a `dict` if it's naturally ordered like on Python 3.6+ or an :class:`~collections.OrderedDict` otherwise. .. versionadded:: 18.1.0 """ if not isclass(cls): raise TypeError("Passed object must be a class.") attrs = getattr(cls, "__attrs_attrs__", None) if attrs is None: raise NotAnAttrsClassError( "{cls!r} is not an attrs-decorated class.".format(cls=cls) ) return ordered_dict(((a.name, a) for a in attrs))
[ "def", "fields_dict", "(", "cls", ")", ":", "if", "not", "isclass", "(", "cls", ")", ":", "raise", "TypeError", "(", "\"Passed object must be a class.\"", ")", "attrs", "=", "getattr", "(", "cls", ",", "\"__attrs_attrs__\"", ",", "None", ")", "if", "attrs", "is", "None", ":", "raise", "NotAnAttrsClassError", "(", "\"{cls!r} is not an attrs-decorated class.\"", ".", "format", "(", "cls", "=", "cls", ")", ")", "return", "ordered_dict", "(", "(", "(", "a", ".", "name", ",", "a", ")", "for", "a", "in", "attrs", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/attr/_make.py#L1405-L1430
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/integrate/_ivp/common.py
python
validate_tol
(rtol, atol, n)
return rtol, atol
Validate tolerance values.
Validate tolerance values.
[ "Validate", "tolerance", "values", "." ]
def validate_tol(rtol, atol, n): """Validate tolerance values.""" if rtol < 100 * EPS: warn("`rtol` is too low, setting to {}".format(100 * EPS)) rtol = 100 * EPS atol = np.asarray(atol) if atol.ndim > 0 and atol.shape != (n,): raise ValueError("`atol` has wrong shape.") if np.any(atol < 0): raise ValueError("`atol` must be positive.") return rtol, atol
[ "def", "validate_tol", "(", "rtol", ",", "atol", ",", "n", ")", ":", "if", "rtol", "<", "100", "*", "EPS", ":", "warn", "(", "\"`rtol` is too low, setting to {}\"", ".", "format", "(", "100", "*", "EPS", ")", ")", "rtol", "=", "100", "*", "EPS", "atol", "=", "np", ".", "asarray", "(", "atol", ")", "if", "atol", ".", "ndim", ">", "0", "and", "atol", ".", "shape", "!=", "(", "n", ",", ")", ":", "raise", "ValueError", "(", "\"`atol` has wrong shape.\"", ")", "if", "np", ".", "any", "(", "atol", "<", "0", ")", ":", "raise", "ValueError", "(", "\"`atol` must be positive.\"", ")", "return", "rtol", ",", "atol" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/integrate/_ivp/common.py#L44-L57
fluffos/fluffos
bf54d5d4acef4de49dbed7d184849a7b7b354156
src/thirdparty/widecharwidth/generate.py
python
set_emoji_widths
(emoji_data_lines, cps)
Read from emoji-data.txt, set codepoint widths
Read from emoji-data.txt, set codepoint widths
[ "Read", "from", "emoji", "-", "data", ".", "txt", "set", "codepoint", "widths" ]
def set_emoji_widths(emoji_data_lines, cps): """ Read from emoji-data.txt, set codepoint widths """ for line in emoji_data_lines: for (cp, version) in parse_emoji_line(line): # Don't consider <=1F000 values as emoji. These can only be made # emoji through the variation selector which interacts terribly # with wcwidth(). if cp < 0x1F000: continue # Skip codepoints that have a version of 0.0 as they were marked # in the emoji-data file as reserved/unused: if version <= 1.0: continue # Skip codepoints that are explicitly not wide. # For example U+1F336 ("Hot Pepper") renders like any emoji but is # marked as neutral in EAW so has width 1 for some reason. if cps[cp].width == 1: continue # If this emoji was introduced before Unicode 9, then it was widened in 9. cps[cp].width = 2 if version >= 9.0 else WIDTH_WIDENED_IN_9
[ "def", "set_emoji_widths", "(", "emoji_data_lines", ",", "cps", ")", ":", "for", "line", "in", "emoji_data_lines", ":", "for", "(", "cp", ",", "version", ")", "in", "parse_emoji_line", "(", "line", ")", ":", "# Don't consider <=1F000 values as emoji. These can only be made", "# emoji through the variation selector which interacts terribly", "# with wcwidth().", "if", "cp", "<", "0x1F000", ":", "continue", "# Skip codepoints that have a version of 0.0 as they were marked", "# in the emoji-data file as reserved/unused:", "if", "version", "<=", "1.0", ":", "continue", "# Skip codepoints that are explicitly not wide.", "# For example U+1F336 (\"Hot Pepper\") renders like any emoji but is", "# marked as neutral in EAW so has width 1 for some reason.", "if", "cps", "[", "cp", "]", ".", "width", "==", "1", ":", "continue", "# If this emoji was introduced before Unicode 9, then it was widened in 9.", "cps", "[", "cp", "]", ".", "width", "=", "2", "if", "version", ">=", "9.0", "else", "WIDTH_WIDENED_IN_9" ]
https://github.com/fluffos/fluffos/blob/bf54d5d4acef4de49dbed7d184849a7b7b354156/src/thirdparty/widecharwidth/generate.py#L451-L473
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py
python
MH.get_sequences
(self)
return results
Return a name-to-key-list dictionary to define each sequence.
Return a name-to-key-list dictionary to define each sequence.
[ "Return", "a", "name", "-", "to", "-", "key", "-", "list", "dictionary", "to", "define", "each", "sequence", "." ]
def get_sequences(self): """Return a name-to-key-list dictionary to define each sequence.""" results = {} f = open(os.path.join(self._path, '.mh_sequences'), 'r') try: all_keys = set(self.keys()) for line in f: try: name, contents = line.split(':') keys = set() for spec in contents.split(): if spec.isdigit(): keys.add(int(spec)) else: start, stop = (int(x) for x in spec.split('-')) keys.update(range(start, stop + 1)) results[name] = [key for key in sorted(keys) \ if key in all_keys] if len(results[name]) == 0: del results[name] except ValueError: raise FormatError('Invalid sequence specification: %s' % line.rstrip()) finally: f.close() return results
[ "def", "get_sequences", "(", "self", ")", ":", "results", "=", "{", "}", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_path", ",", "'.mh_sequences'", ")", ",", "'r'", ")", "try", ":", "all_keys", "=", "set", "(", "self", ".", "keys", "(", ")", ")", "for", "line", "in", "f", ":", "try", ":", "name", ",", "contents", "=", "line", ".", "split", "(", "':'", ")", "keys", "=", "set", "(", ")", "for", "spec", "in", "contents", ".", "split", "(", ")", ":", "if", "spec", ".", "isdigit", "(", ")", ":", "keys", ".", "add", "(", "int", "(", "spec", ")", ")", "else", ":", "start", ",", "stop", "=", "(", "int", "(", "x", ")", "for", "x", "in", "spec", ".", "split", "(", "'-'", ")", ")", "keys", ".", "update", "(", "range", "(", "start", ",", "stop", "+", "1", ")", ")", "results", "[", "name", "]", "=", "[", "key", "for", "key", "in", "sorted", "(", "keys", ")", "if", "key", "in", "all_keys", "]", "if", "len", "(", "results", "[", "name", "]", ")", "==", "0", ":", "del", "results", "[", "name", "]", "except", "ValueError", ":", "raise", "FormatError", "(", "'Invalid sequence specification: %s'", "%", "line", ".", "rstrip", "(", ")", ")", "finally", ":", "f", ".", "close", "(", ")", "return", "results" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py#L1122-L1147
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/pydoc.py
python
ispackage
(path)
return False
Guess whether a path refers to a package directory.
Guess whether a path refers to a package directory.
[ "Guess", "whether", "a", "path", "refers", "to", "a", "package", "directory", "." ]
def ispackage(path): """Guess whether a path refers to a package directory.""" if os.path.isdir(path): for ext in ('.py', '.pyc'): if os.path.isfile(os.path.join(path, '__init__' + ext)): return True return False
[ "def", "ispackage", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "ext", "in", "(", "'.py'", ",", "'.pyc'", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'__init__'", "+", "ext", ")", ")", ":", "return", "True", "return", "False" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/pydoc.py#L230-L236
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/database.py
python
BaseInstalledDistribution.__init__
(self, metadata, path, env=None)
Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found.
Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found.
[ "Initialise", "an", "instance", ".", ":", "param", "metadata", ":", "An", "instance", "of", ":", "class", ":", "Metadata", "which", "describes", "the", "distribution", ".", "This", "will", "normally", "have", "been", "initialised", "from", "a", "metadata", "file", "in", "the", "path", ".", ":", "param", "path", ":", "The", "path", "of", "the", ".", "dist", "-", "info", "or", ".", "egg", "-", "info", "directory", "for", "the", "distribution", ".", ":", "param", "env", ":", "This", "is", "normally", "the", ":", "class", ":", "DistributionPath", "instance", "where", "this", "distribution", "was", "found", "." ]
def __init__(self, metadata, path, env=None): """ Initialise an instance. :param metadata: An instance of :class:`Metadata` which describes the distribution. This will normally have been initialised from a metadata file in the ``path``. :param path: The path of the ``.dist-info`` or ``.egg-info`` directory for the distribution. :param env: This is normally the :class:`DistributionPath` instance where this distribution was found. """ super(BaseInstalledDistribution, self).__init__(metadata) self.path = path self.dist_path = env
[ "def", "__init__", "(", "self", ",", "metadata", ",", "path", ",", "env", "=", "None", ")", ":", "super", "(", "BaseInstalledDistribution", ",", "self", ")", ".", "__init__", "(", "metadata", ")", "self", ".", "path", "=", "path", "self", ".", "dist_path", "=", "env" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/pip/vendor/distlib/database.py#L463-L476
SoarGroup/Soar
a1c5e249499137a27da60533c72969eef3b8ab6b
scons/scons-local-4.1.0/SCons/Tool/javac.py
python
emit_java_classes
(target, source, env)
return full_tlist, slist
Create and return lists of source java files and their corresponding target class files.
Create and return lists of source java files and their corresponding target class files.
[ "Create", "and", "return", "lists", "of", "source", "java", "files", "and", "their", "corresponding", "target", "class", "files", "." ]
def emit_java_classes(target, source, env): """Create and return lists of source java files and their corresponding target class files. """ java_suffix = env.get('JAVASUFFIX', '.java') class_suffix = env.get('JAVACLASSSUFFIX', '.class') target[0].must_be_same(SCons.Node.FS.Dir) classdir = target[0] s = source[0].rentry().disambiguate() if isinstance(s, SCons.Node.FS.File): sourcedir = s.dir.rdir() elif isinstance(s, SCons.Node.FS.Dir): sourcedir = s.rdir() else: raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__) slist = [] js = _my_normcase(java_suffix) for entry in source: entry = entry.rentry().disambiguate() if isinstance(entry, SCons.Node.FS.File): slist.append(entry) elif isinstance(entry, SCons.Node.FS.Dir): result = OrderedDict() dirnode = entry.rdir() def find_java_files(arg, dirpath, filenames): java_files = sorted([n for n in filenames if _my_normcase(n).endswith(js)]) mydir = dirnode.Dir(dirpath) java_paths = [mydir.File(f) for f in java_files] for jp in java_paths: arg[jp] = True for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()): find_java_files(result, dirpath, filenames) entry.walk(find_java_files, result) slist.extend(list(result.keys())) else: raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__) version = env.get('JAVAVERSION', '1.4') full_tlist = [] for f in slist: tlist = [] source_file_based = True pkg_dir = None if not f.is_derived(): pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version) if classes: source_file_based = False if pkg_dir: d = target[0].Dir(pkg_dir) p = pkg_dir + os.sep else: d = target[0] p = '' for c in classes: t = d.File(c + class_suffix) t.attributes.java_classdir = classdir t.attributes.java_sourcedir = sourcedir t.attributes.java_classname = classname(p + c) tlist.append(t) if source_file_based: base = f.name[:-len(java_suffix)] if pkg_dir: t = target[0].Dir(pkg_dir).File(base + class_suffix) else: t = target[0].File(base + class_suffix) t.attributes.java_classdir = classdir t.attributes.java_sourcedir = f.dir t.attributes.java_classname = classname(base) tlist.append(t) for t in tlist: t.set_specific_source([f]) full_tlist.extend(tlist) return full_tlist, slist
[ "def", "emit_java_classes", "(", "target", ",", "source", ",", "env", ")", ":", "java_suffix", "=", "env", ".", "get", "(", "'JAVASUFFIX'", ",", "'.java'", ")", "class_suffix", "=", "env", ".", "get", "(", "'JAVACLASSSUFFIX'", ",", "'.class'", ")", "target", "[", "0", "]", ".", "must_be_same", "(", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", "classdir", "=", "target", "[", "0", "]", "s", "=", "source", "[", "0", "]", ".", "rentry", "(", ")", ".", "disambiguate", "(", ")", "if", "isinstance", "(", "s", ",", "SCons", ".", "Node", ".", "FS", ".", "File", ")", ":", "sourcedir", "=", "s", ".", "dir", ".", "rdir", "(", ")", "elif", "isinstance", "(", "s", ",", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", ":", "sourcedir", "=", "s", ".", "rdir", "(", ")", "else", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Java source must be File or Dir, not '%s'\"", "%", "s", ".", "__class__", ")", "slist", "=", "[", "]", "js", "=", "_my_normcase", "(", "java_suffix", ")", "for", "entry", "in", "source", ":", "entry", "=", "entry", ".", "rentry", "(", ")", ".", "disambiguate", "(", ")", "if", "isinstance", "(", "entry", ",", "SCons", ".", "Node", ".", "FS", ".", "File", ")", ":", "slist", ".", "append", "(", "entry", ")", "elif", "isinstance", "(", "entry", ",", "SCons", ".", "Node", ".", "FS", ".", "Dir", ")", ":", "result", "=", "OrderedDict", "(", ")", "dirnode", "=", "entry", ".", "rdir", "(", ")", "def", "find_java_files", "(", "arg", ",", "dirpath", ",", "filenames", ")", ":", "java_files", "=", "sorted", "(", "[", "n", "for", "n", "in", "filenames", "if", "_my_normcase", "(", "n", ")", ".", "endswith", "(", "js", ")", "]", ")", "mydir", "=", "dirnode", ".", "Dir", "(", "dirpath", ")", "java_paths", "=", "[", "mydir", ".", "File", "(", "f", ")", "for", "f", "in", "java_files", "]", "for", "jp", "in", "java_paths", ":", "arg", "[", "jp", "]", "=", "True", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "dirnode", ".", "get_abspath", "(", ")", ")", ":", "find_java_files", "(", "result", ",", "dirpath", ",", "filenames", ")", "entry", ".", "walk", "(", "find_java_files", ",", "result", ")", "slist", ".", "extend", "(", "list", "(", "result", ".", "keys", "(", ")", ")", ")", "else", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Java source must be File or Dir, not '%s'\"", "%", "entry", ".", "__class__", ")", "version", "=", "env", ".", "get", "(", "'JAVAVERSION'", ",", "'1.4'", ")", "full_tlist", "=", "[", "]", "for", "f", "in", "slist", ":", "tlist", "=", "[", "]", "source_file_based", "=", "True", "pkg_dir", "=", "None", "if", "not", "f", ".", "is_derived", "(", ")", ":", "pkg_dir", ",", "classes", "=", "parse_java_file", "(", "f", ".", "rfile", "(", ")", ".", "get_abspath", "(", ")", ",", "version", ")", "if", "classes", ":", "source_file_based", "=", "False", "if", "pkg_dir", ":", "d", "=", "target", "[", "0", "]", ".", "Dir", "(", "pkg_dir", ")", "p", "=", "pkg_dir", "+", "os", ".", "sep", "else", ":", "d", "=", "target", "[", "0", "]", "p", "=", "''", "for", "c", "in", "classes", ":", "t", "=", "d", ".", "File", "(", "c", "+", "class_suffix", ")", "t", ".", "attributes", ".", "java_classdir", "=", "classdir", "t", ".", "attributes", ".", "java_sourcedir", "=", "sourcedir", "t", ".", "attributes", ".", "java_classname", "=", "classname", "(", "p", "+", "c", ")", "tlist", ".", "append", "(", "t", ")", "if", "source_file_based", ":", "base", "=", "f", ".", "name", "[", ":", "-", "len", "(", "java_suffix", ")", "]", "if", "pkg_dir", ":", "t", "=", "target", "[", "0", "]", ".", "Dir", "(", "pkg_dir", ")", ".", "File", "(", "base", "+", "class_suffix", ")", "else", ":", "t", "=", "target", "[", "0", "]", ".", "File", "(", "base", "+", "class_suffix", ")", "t", ".", "attributes", ".", "java_classdir", "=", "classdir", "t", ".", "attributes", ".", "java_sourcedir", "=", "f", ".", "dir", "t", ".", "attributes", ".", "java_classname", "=", "classname", "(", "base", ")", "tlist", ".", "append", "(", "t", ")", "for", "t", "in", "tlist", ":", "t", ".", "set_specific_source", "(", "[", "f", "]", ")", "full_tlist", ".", "extend", "(", "tlist", ")", "return", "full_tlist", ",", "slist" ]
https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Tool/javac.py#L49-L130
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/lib-tk/ttk.py
python
_convert_stringval
(value)
return value
Converts a value to, hopefully, a more appropriate Python object.
Converts a value to, hopefully, a more appropriate Python object.
[ "Converts", "a", "value", "to", "hopefully", "a", "more", "appropriate", "Python", "object", "." ]
def _convert_stringval(value): """Converts a value to, hopefully, a more appropriate Python object.""" value = unicode(value) try: value = int(value) except (ValueError, TypeError): pass return value
[ "def", "_convert_stringval", "(", "value", ")", ":", "value", "=", "unicode", "(", "value", ")", "try", ":", "value", "=", "int", "(", "value", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "return", "value" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/ttk.py#L306-L314
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2class.py
python
catalog.convertSGMLCatalog
(self)
return ret
Convert all the SGML catalog entries as XML ones
Convert all the SGML catalog entries as XML ones
[ "Convert", "all", "the", "SGML", "catalog", "entries", "as", "XML", "ones" ]
def convertSGMLCatalog(self): """Convert all the SGML catalog entries as XML ones """ ret = libxml2mod.xmlConvertSGMLCatalog(self._o) return ret
[ "def", "convertSGMLCatalog", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlConvertSGMLCatalog", "(", "self", ".", "_o", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L4886-L4889
neoml-lib/neoml
a0d370fba05269a1b2258cef126f77bbd2054a3e
NeoML/Python/neoml/Dnn/Solver.py
python
AdaptiveGradient.epsilon
(self)
return self._internal.get_epsilon()
Gets the small value used to avoid division by zero when calculating second moment.
Gets the small value used to avoid division by zero when calculating second moment.
[ "Gets", "the", "small", "value", "used", "to", "avoid", "division", "by", "zero", "when", "calculating", "second", "moment", "." ]
def epsilon(self): """Gets the small value used to avoid division by zero when calculating second moment. """ return self._internal.get_epsilon()
[ "def", "epsilon", "(", "self", ")", ":", "return", "self", ".", "_internal", ".", "get_epsilon", "(", ")" ]
https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/Solver.py#L211-L215
apple/swift
469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893
utils/swift_build_support/swift_build_support/shell.py
python
call_without_sleeping
(command, env=None, dry_run=False, echo=False)
Execute a command during which system sleep is disabled. By default, this ignores the state of the `shell.dry_run` flag.
Execute a command during which system sleep is disabled.
[ "Execute", "a", "command", "during", "which", "system", "sleep", "is", "disabled", "." ]
def call_without_sleeping(command, env=None, dry_run=False, echo=False): """ Execute a command during which system sleep is disabled. By default, this ignores the state of the `shell.dry_run` flag. """ # Disable system sleep, if possible. if platform.system() == 'Darwin': # Don't mutate the caller's copy of the arguments. command = ["caffeinate"] + list(command) call(command, env=env, dry_run=dry_run, echo=echo)
[ "def", "call_without_sleeping", "(", "command", ",", "env", "=", "None", ",", "dry_run", "=", "False", ",", "echo", "=", "False", ")", ":", "# Disable system sleep, if possible.", "if", "platform", ".", "system", "(", ")", "==", "'Darwin'", ":", "# Don't mutate the caller's copy of the arguments.", "command", "=", "[", "\"caffeinate\"", "]", "+", "list", "(", "command", ")", "call", "(", "command", ",", "env", "=", "env", ",", "dry_run", "=", "dry_run", ",", "echo", "=", "echo", ")" ]
https://github.com/apple/swift/blob/469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893/utils/swift_build_support/swift_build_support/shell.py#L101-L113
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/grid.py
python
Grid.HideRow
(*args, **kwargs)
return _grid.Grid_HideRow(*args, **kwargs)
HideRow(self, int row)
HideRow(self, int row)
[ "HideRow", "(", "self", "int", "row", ")" ]
def HideRow(*args, **kwargs): """HideRow(self, int row)""" return _grid.Grid_HideRow(*args, **kwargs)
[ "def", "HideRow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "Grid_HideRow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/grid.py#L1822-L1824
etternagame/etterna
8775f74ac9c353320128609d4b4150672e9a6d04
extern/SQLiteCpp/cpplint.py
python
_IncludeState.CanonicalizeAlphabeticalOrder
(self, header_path)
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path.
Returns a path canonicalized for alphabetical comparison.
[ "Returns", "a", "path", "canonicalized", "for", "alphabetical", "comparison", "." ]
def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
[ "def", "CanonicalizeAlphabeticalOrder", "(", "self", ",", "header_path", ")", ":", "return", "header_path", ".", "replace", "(", "'-inl.h'", ",", "'.h'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "lower", "(", ")" ]
https://github.com/etternagame/etterna/blob/8775f74ac9c353320128609d4b4150672e9a6d04/extern/SQLiteCpp/cpplint.py#L585-L598
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/lmbrwaflib/build_configurations.py
python
preprocess_target_platforms
(ctx, platforms, auto_populate_empty=False)
return processed_platforms
Preprocess a list of platforms from user input and list the concrete platform(s). The resulting list will contain concrete platforms expanded from platform aliases if any :param ctx: Context :param platforms: The list of platforms to preprocess :param auto_populate_empty: Option- If the list of platforms is empty, then fill the platforms with all of the enabled platforms :return List of concrete platforms from the input platform
Preprocess a list of platforms from user input and list the concrete platform(s). The resulting list will contain concrete platforms expanded from platform aliases if any :param ctx: Context :param platforms: The list of platforms to preprocess :param auto_populate_empty: Option- If the list of platforms is empty, then fill the platforms with all of the enabled platforms :return List of concrete platforms from the input platform
[ "Preprocess", "a", "list", "of", "platforms", "from", "user", "input", "and", "list", "the", "concrete", "platform", "(", "s", ")", ".", "The", "resulting", "list", "will", "contain", "concrete", "platforms", "expanded", "from", "platform", "aliases", "if", "any", ":", "param", "ctx", ":", "Context", ":", "param", "platforms", ":", "The", "list", "of", "platforms", "to", "preprocess", ":", "param", "auto_populate_empty", ":", "Option", "-", "If", "the", "list", "of", "platforms", "is", "empty", "then", "fill", "the", "platforms", "with", "all", "of", "the", "enabled", "platforms", ":", "return", "List", "of", "concrete", "platforms", "from", "the", "input", "platform" ]
def preprocess_target_platforms(ctx, platforms, auto_populate_empty=False): """ Preprocess a list of platforms from user input and list the concrete platform(s). The resulting list will contain concrete platforms expanded from platform aliases if any :param ctx: Context :param platforms: The list of platforms to preprocess :param auto_populate_empty: Option- If the list of platforms is empty, then fill the platforms with all of the enabled platforms :return List of concrete platforms from the input platform """ processed_platforms = set() if (auto_populate_empty and len(platforms) == 0) or 'all' in platforms: for platform in list(PLATFORM_MAP.keys()): processed_platforms.add(platform) else: for platform in platforms: if platform in PLATFORM_MAP: processed_platforms.add(platform) elif platform in ALIAS_TO_PLATFORMS_MAP: aliases_platforms = ALIAS_TO_PLATFORMS_MAP[platform] for aliased_platform in aliases_platforms: processed_platforms.add(aliased_platform) return processed_platforms
[ "def", "preprocess_target_platforms", "(", "ctx", ",", "platforms", ",", "auto_populate_empty", "=", "False", ")", ":", "processed_platforms", "=", "set", "(", ")", "if", "(", "auto_populate_empty", "and", "len", "(", "platforms", ")", "==", "0", ")", "or", "'all'", "in", "platforms", ":", "for", "platform", "in", "list", "(", "PLATFORM_MAP", ".", "keys", "(", ")", ")", ":", "processed_platforms", ".", "add", "(", "platform", ")", "else", ":", "for", "platform", "in", "platforms", ":", "if", "platform", "in", "PLATFORM_MAP", ":", "processed_platforms", ".", "add", "(", "platform", ")", "elif", "platform", "in", "ALIAS_TO_PLATFORMS_MAP", ":", "aliases_platforms", "=", "ALIAS_TO_PLATFORMS_MAP", "[", "platform", "]", "for", "aliased_platform", "in", "aliases_platforms", ":", "processed_platforms", ".", "add", "(", "aliased_platform", ")", "return", "processed_platforms" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/build_configurations.py#L882-L903
WeitaoVan/L-GM-loss
598582f0631bac876b3eeb8d6c4cd1d780269e03
scripts/cpp_lint.py
python
CheckLanguage
(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error)
Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
Checks rules from the 'C++ language rules' section of cppguide.html.
[ "Checks", "rules", "from", "the", "C", "++", "language", "rules", "section", "of", "cppguide", ".", "html", "." ]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line): include_state.ResetSection() # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # TODO(unknown): figure out if they're using default arguments in fn proto. # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) if match: matched_new = match.group(1) matched_type = match.group(2) matched_funcptr = match.group(3) # gMock methods are defined using some variant of MOCK_METHODx(name, type) # where type may be float(), int(string), etc. Without context they are # virtually indistinguishable from int(x) casts. Likewise, gMock's # MockCallback takes a template parameter of the form return_type(arg_type), # which looks much like the cast we're trying to detect. # # std::function<> wrapper has a similar problem. # # Return types for function pointers also look like casts if they # don't have an extra space. if (matched_new is None and # If new operator, then this isn't a cast not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or Search(r'\bMockCallback<.*>', line) or Search(r'\bstd::function<.*>', line)) and not (matched_funcptr and Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr))): # Try a bit harder to catch gmock lines: the only place where # something looks like an old-style cast is where we declare the # return type of the mocked method, and the only time when we # are missing context is if MOCK_METHOD was split across # multiple lines. The missing MOCK_METHOD is usually one or two # lines back, so scan back one or two lines. # # It's not possible for gmock macros to appear in the first 2 # lines, since the class head + section name takes up 2 lines. if (linenum < 2 or not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]))): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. match = Search( r'(?:&\(([^)]+)\)[\w(])|' r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line) if match and match.group(1) != '*': error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) # Create an extended_line, which is the concatenation of the current and # next lines, for more effective checking of code that may span more than one # line. if linenum + 1 < clean_lines.NumLines(): extended_line = line + clean_lines.elided[linenum + 1] else: extended_line = line # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access. match = Match( r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', line) # Make sure it's not a function. # Function template specialization looks like: "string foo<Type>(...". # Class template definitions look like: "string Foo<Type>::Method(...". # # Also ignore things that look like operators. These are matched separately # because operator names cross non-word boundaries. If we change the pattern # above, we would decrease the accuracy of matching identifiers. if (match and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (match.group(1), match.group(2))) if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\b', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\b', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(sugawarayu): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing # in the class declaration. match = Match( (r'\s*' r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' r'\(.*\);$'), line) if match and linenum + 1 < clean_lines.NumLines(): next_line = clean_lines.elided[linenum + 1] # We allow some, but not all, declarations of variables to be present # in the statement that defines the class. The [\w\*,\s]* fragment of # the regular expression below allows users to declare instances of # the class or pointers to instances, but not less common types such # as function pointers or arrays. It's a tradeoff between allowing # reasonable code and avoiding trying to parse more C++ using regexps. if not Search(r'^\s*}[\w\*,\s]*;', next_line): error(filename, linenum, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.')
[ "def", "CheckLanguage", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "file_extension", ",", "include_state", ",", "nesting_state", ",", "error", ")", ":", "# If the line is empty or consists of entirely a comment, no need to", "# check it.", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "if", "not", "line", ":", "return", "match", "=", "_RE_PATTERN_INCLUDE", ".", "search", "(", "line", ")", "if", "match", ":", "CheckIncludeLine", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "include_state", ",", "error", ")", "return", "# Reset include state across preprocessor directives. This is meant", "# to silence warnings for conditional includes.", "if", "Match", "(", "r'^\\s*#\\s*(?:ifdef|elif|else|endif)\\b'", ",", "line", ")", ":", "include_state", ".", "ResetSection", "(", ")", "# Make Windows paths like Unix.", "fullname", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "# TODO(unknown): figure out if they're using default arguments in fn proto.", "# Check to see if they're using an conversion function cast.", "# I just try to capture the most common basic types, though there are more.", "# Parameterless conversion functions, such as bool(), are allowed as they are", "# probably a member operator declaration or default constructor.", "match", "=", "Search", "(", "r'(\\bnew\\s+)?\\b'", "# Grab 'new' operator, if it's there", "r'(int|float|double|bool|char|int32|uint32|int64|uint64)'", "r'(\\([^)].*)'", ",", "line", ")", "if", "match", ":", "matched_new", "=", "match", ".", "group", "(", "1", ")", "matched_type", "=", "match", ".", "group", "(", "2", ")", "matched_funcptr", "=", "match", ".", "group", "(", "3", ")", "# gMock methods are defined using some variant of MOCK_METHODx(name, type)", "# where type may be float(), int(string), etc. Without context they are", "# virtually indistinguishable from int(x) casts. Likewise, gMock's", "# MockCallback takes a template parameter of the form return_type(arg_type),", "# which looks much like the cast we're trying to detect.", "#", "# std::function<> wrapper has a similar problem.", "#", "# Return types for function pointers also look like casts if they", "# don't have an extra space.", "if", "(", "matched_new", "is", "None", "and", "# If new operator, then this isn't a cast", "not", "(", "Match", "(", "r'^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\('", ",", "line", ")", "or", "Search", "(", "r'\\bMockCallback<.*>'", ",", "line", ")", "or", "Search", "(", "r'\\bstd::function<.*>'", ",", "line", ")", ")", "and", "not", "(", "matched_funcptr", "and", "Match", "(", "r'\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\('", ",", "matched_funcptr", ")", ")", ")", ":", "# Try a bit harder to catch gmock lines: the only place where", "# something looks like an old-style cast is where we declare the", "# return type of the mocked method, and the only time when we", "# are missing context is if MOCK_METHOD was split across", "# multiple lines. The missing MOCK_METHOD is usually one or two", "# lines back, so scan back one or two lines.", "#", "# It's not possible for gmock macros to appear in the first 2", "# lines, since the class head + section name takes up 2 lines.", "if", "(", "linenum", "<", "2", "or", "not", "(", "Match", "(", "r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", "or", "Match", "(", "r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "2", "]", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/casting'", ",", "4", ",", "'Using deprecated casting style. '", "'Use static_cast<%s>(...) instead'", "%", "matched_type", ")", "CheckCStyleCast", "(", "filename", ",", "linenum", ",", "line", ",", "clean_lines", ".", "raw_lines", "[", "linenum", "]", ",", "'static_cast'", ",", "r'\\((int|float|double|bool|char|u?int(16|32|64))\\)'", ",", "error", ")", "# This doesn't catch all cases. Consider (const char * const)\"hello\".", "#", "# (char *) \"foo\" should always be a const_cast (reinterpret_cast won't", "# compile).", "if", "CheckCStyleCast", "(", "filename", ",", "linenum", ",", "line", ",", "clean_lines", ".", "raw_lines", "[", "linenum", "]", ",", "'const_cast'", ",", "r'\\((char\\s?\\*+\\s?)\\)\\s*\"'", ",", "error", ")", ":", "pass", "else", ":", "# Check pointer casts for other than string constants", "CheckCStyleCast", "(", "filename", ",", "linenum", ",", "line", ",", "clean_lines", ".", "raw_lines", "[", "linenum", "]", ",", "'reinterpret_cast'", ",", "r'\\((\\w+\\s?\\*+\\s?)\\)'", ",", "error", ")", "# In addition, we look for people taking the address of a cast. This", "# is dangerous -- casts can assign to temporaries, so the pointer doesn't", "# point where you think.", "match", "=", "Search", "(", "r'(?:&\\(([^)]+)\\)[\\w(])|'", "r'(?:&(static|dynamic|down|reinterpret)_cast\\b)'", ",", "line", ")", "if", "match", "and", "match", ".", "group", "(", "1", ")", "!=", "'*'", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/casting'", ",", "4", ",", "(", "'Are you taking an address of a cast? '", "'This is dangerous: could be a temp var. '", "'Take the address before doing the cast, rather than after'", ")", ")", "# Create an extended_line, which is the concatenation of the current and", "# next lines, for more effective checking of code that may span more than one", "# line.", "if", "linenum", "+", "1", "<", "clean_lines", ".", "NumLines", "(", ")", ":", "extended_line", "=", "line", "+", "clean_lines", ".", "elided", "[", "linenum", "+", "1", "]", "else", ":", "extended_line", "=", "line", "# Check for people declaring static/global STL strings at the top level.", "# This is dangerous because the C++ language does not guarantee that", "# globals with constructors are initialized before the first access.", "match", "=", "Match", "(", "r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)'", ",", "line", ")", "# Make sure it's not a function.", "# Function template specialization looks like: \"string foo<Type>(...\".", "# Class template definitions look like: \"string Foo<Type>::Method(...\".", "#", "# Also ignore things that look like operators. These are matched separately", "# because operator names cross non-word boundaries. If we change the pattern", "# above, we would decrease the accuracy of matching identifiers.", "if", "(", "match", "and", "not", "Search", "(", "r'\\boperator\\W'", ",", "line", ")", "and", "not", "Match", "(", "r'\\s*(<.*>)?(::[a-zA-Z0-9_]+)?\\s*\\(([^\"]|$)'", ",", "match", ".", "group", "(", "3", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/string'", ",", "4", ",", "'For a static/global string constant, use a C style string instead: '", "'\"%schar %s[]\".'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "if", "Search", "(", "r'\\b([A-Za-z0-9_]*_)\\(\\1\\)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/init'", ",", "4", ",", "'You seem to be initializing a member variable with itself.'", ")", "if", "file_extension", "==", "'h'", ":", "# TODO(unknown): check that 1-arg constructors are explicit.", "# How to tell it's a constructor?", "# (handled in CheckForNonStandardConstructs for now)", "# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS", "# (level 1 error)", "pass", "# Check if people are using the verboten C basic types. The only exception", "# we regularly allow is \"unsigned short port\" for port.", "if", "Search", "(", "r'\\bshort port\\b'", ",", "line", ")", ":", "if", "not", "Search", "(", "r'\\bunsigned short port\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/int'", ",", "4", ",", "'Use \"unsigned short\" for ports, not \"short\"'", ")", "else", ":", "match", "=", "Search", "(", "r'\\b(short|long(?! +double)|long long)\\b'", ",", "line", ")", "if", "match", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/int'", ",", "4", ",", "'Use int16/int64/etc, rather than the C type %s'", "%", "match", ".", "group", "(", "1", ")", ")", "# When snprintf is used, the second argument shouldn't be a literal.", "match", "=", "Search", "(", "r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,'", ",", "line", ")", "if", "match", "and", "match", ".", "group", "(", "2", ")", "!=", "'0'", ":", "# If 2nd arg is zero, snprintf is used to calculate size.", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "3", ",", "'If you can, use sizeof(%s) instead of %s as the 2nd arg '", "'to snprintf.'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "# Check if some verboten C functions are being used.", "if", "Search", "(", "r'\\bsprintf\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "5", ",", "'Never use sprintf. Use snprintf instead.'", ")", "match", "=", "Search", "(", "r'\\b(strcpy|strcat)\\b'", ",", "line", ")", "if", "match", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "4", ",", "'Almost always, snprintf is better than %s'", "%", "match", ".", "group", "(", "1", ")", ")", "# Check if some verboten operator overloading is going on", "# TODO(unknown): catch out-of-line unary operator&:", "# class X {};", "# int operator&(const X& x) { return 42; } // unary operator&", "# The trick is it's hard to tell apart from binary operator&:", "# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&", "if", "Search", "(", "r'\\boperator\\s*&\\s*\\(\\s*\\)'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/operator'", ",", "4", ",", "'Unary operator& is dangerous. Do not use it.'", ")", "# Check for suspicious usage of \"if\" like", "# } if (a == b) {", "if", "Search", "(", "r'\\}\\s*if\\s*\\('", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/braces'", ",", "4", ",", "'Did you mean \"else if\"? If not, start a new line for \"if\".'", ")", "# Check for potential format string bugs like printf(foo).", "# We constrain the pattern not to pick things like DocidForPrintf(foo).", "# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())", "# TODO(sugawarayu): Catch the following case. Need to change the calling", "# convention of the whole function to process multiple line to handle it.", "# printf(", "# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);", "printf_args", "=", "_GetTextInside", "(", "line", ",", "r'(?i)\\b(string)?printf\\s*\\('", ")", "if", "printf_args", ":", "match", "=", "Match", "(", "r'([\\w.\\->()]+)$'", ",", "printf_args", ")", "if", "match", "and", "match", ".", "group", "(", "1", ")", "!=", "'__VA_ARGS__'", ":", "function_name", "=", "re", ".", "search", "(", "r'\\b((?:string)?printf)\\s*\\('", ",", "line", ",", "re", ".", "I", ")", ".", "group", "(", "1", ")", "error", "(", "filename", ",", "linenum", ",", "'runtime/printf'", ",", "4", ",", "'Potential format string bug. Do %s(\"%%s\", %s) instead.'", "%", "(", "function_name", ",", "match", ".", "group", "(", "1", ")", ")", ")", "# Check for potential memset bugs like memset(buf, sizeof(buf), 0).", "match", "=", "Search", "(", "r'memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)'", ",", "line", ")", "if", "match", "and", "not", "Match", "(", "r\"^''|-?[0-9]+|0x[0-9A-Fa-f]$\"", ",", "match", ".", "group", "(", "2", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/memset'", ",", "4", ",", "'Did you mean \"memset(%s, 0, %s)\"?'", "%", "(", "match", ".", "group", "(", "1", ")", ",", "match", ".", "group", "(", "2", ")", ")", ")", "if", "Search", "(", "r'\\busing namespace\\b'", ",", "line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/namespaces'", ",", "5", ",", "'Do not use namespace using-directives. '", "'Use using-declarations instead.'", ")", "# Detect variable-length arrays.", "match", "=", "Match", "(", "r'\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];'", ",", "line", ")", "if", "(", "match", "and", "match", ".", "group", "(", "2", ")", "!=", "'return'", "and", "match", ".", "group", "(", "2", ")", "!=", "'delete'", "and", "match", ".", "group", "(", "3", ")", ".", "find", "(", "']'", ")", "==", "-", "1", ")", ":", "# Split the size using space and arithmetic operators as delimiters.", "# If any of the resulting tokens are not compile time constants then", "# report the error.", "tokens", "=", "re", ".", "split", "(", "r'\\s|\\+|\\-|\\*|\\/|<<|>>]'", ",", "match", ".", "group", "(", "3", ")", ")", "is_const", "=", "True", "skip_next", "=", "False", "for", "tok", "in", "tokens", ":", "if", "skip_next", ":", "skip_next", "=", "False", "continue", "if", "Search", "(", "r'sizeof\\(.+\\)'", ",", "tok", ")", ":", "continue", "if", "Search", "(", "r'arraysize\\(\\w+\\)'", ",", "tok", ")", ":", "continue", "tok", "=", "tok", ".", "lstrip", "(", "'('", ")", "tok", "=", "tok", ".", "rstrip", "(", "')'", ")", "if", "not", "tok", ":", "continue", "if", "Match", "(", "r'\\d+'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'0[xX][0-9a-fA-F]+'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'k[A-Z0-9]\\w*'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'(.+::)?k[A-Z0-9]\\w*'", ",", "tok", ")", ":", "continue", "if", "Match", "(", "r'(.+::)?[A-Z][A-Z0-9_]*'", ",", "tok", ")", ":", "continue", "# A catch all for tricky sizeof cases, including 'sizeof expression',", "# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'", "# requires skipping the next token because we split on ' ' and '*'.", "if", "tok", ".", "startswith", "(", "'sizeof'", ")", ":", "skip_next", "=", "True", "continue", "is_const", "=", "False", "break", "if", "not", "is_const", ":", "error", "(", "filename", ",", "linenum", ",", "'runtime/arrays'", ",", "1", ",", "'Do not use variable-length arrays. Use an appropriately named '", "\"('k' followed by CamelCase) compile-time constant for the size.\"", ")", "# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or", "# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing", "# in the class declaration.", "match", "=", "Match", "(", "(", "r'\\s*'", "r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'", "r'\\(.*\\);$'", ")", ",", "line", ")", "if", "match", "and", "linenum", "+", "1", "<", "clean_lines", ".", "NumLines", "(", ")", ":", "next_line", "=", "clean_lines", ".", "elided", "[", "linenum", "+", "1", "]", "# We allow some, but not all, declarations of variables to be present", "# in the statement that defines the class. The [\\w\\*,\\s]* fragment of", "# the regular expression below allows users to declare instances of", "# the class or pointers to instances, but not less common types such", "# as function pointers or arrays. It's a tradeoff between allowing", "# reasonable code and avoiding trying to parse more C++ using regexps.", "if", "not", "Search", "(", "r'^\\s*}[\\w\\*,\\s]*;'", ",", "next_line", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/constructors'", ",", "3", ",", "match", ".", "group", "(", "1", ")", "+", "' should be the last thing in the class'", ")", "# Check for use of unnamed namespaces in header files. Registration", "# macros are typically OK, so we allow use of \"namespace {\" on lines", "# that end with backslashes.", "if", "(", "file_extension", "==", "'h'", "and", "Search", "(", "r'\\bnamespace\\s*{'", ",", "line", ")", "and", "line", "[", "-", "1", "]", "!=", "'\\\\'", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'build/namespaces'", ",", "4", ",", "'Do not use unnamed namespaces in header files. See '", "'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'", "' for more information.'", ")" ]
https://github.com/WeitaoVan/L-GM-loss/blob/598582f0631bac876b3eeb8d6c4cd1d780269e03/scripts/cpp_lint.py#L3834-L4132
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
setup/snap.py
python
TMOut.CutBf
(self, *args)
return _snap.TMOut_CutBf(self, *args)
CutBf(TMOut self, int const & CutBfL) Parameters: CutBfL: int const &
CutBf(TMOut self, int const & CutBfL)
[ "CutBf", "(", "TMOut", "self", "int", "const", "&", "CutBfL", ")" ]
def CutBf(self, *args): """ CutBf(TMOut self, int const & CutBfL) Parameters: CutBfL: int const & """ return _snap.TMOut_CutBf(self, *args)
[ "def", "CutBf", "(", "self", ",", "*", "args", ")", ":", "return", "_snap", ".", "TMOut_CutBf", "(", "self", ",", "*", "args", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L3058-L3066
omnisci/omniscidb
b9c95f1bd602b4ffc8b0edf18bfad61031e08d86
python/omnisci/thrift/OmniSci.py
python
Client.execute_query_step
(self, pending_query, subquery_id, start_time_str)
return self.recv_execute_query_step()
Parameters: - pending_query - subquery_id - start_time_str
Parameters: - pending_query - subquery_id - start_time_str
[ "Parameters", ":", "-", "pending_query", "-", "subquery_id", "-", "start_time_str" ]
def execute_query_step(self, pending_query, subquery_id, start_time_str): """ Parameters: - pending_query - subquery_id - start_time_str """ self.send_execute_query_step(pending_query, subquery_id, start_time_str) return self.recv_execute_query_step()
[ "def", "execute_query_step", "(", "self", ",", "pending_query", ",", "subquery_id", ",", "start_time_str", ")", ":", "self", ".", "send_execute_query_step", "(", "pending_query", ",", "subquery_id", ",", "start_time_str", ")", "return", "self", ".", "recv_execute_query_step", "(", ")" ]
https://github.com/omnisci/omniscidb/blob/b9c95f1bd602b4ffc8b0edf18bfad61031e08d86/python/omnisci/thrift/OmniSci.py#L3813-L3822
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/contrib/learn/python/learn/dataframe/series.py
python
Series.build
(self, cache, **kwargs)
Returns a Tensor.
Returns a Tensor.
[ "Returns", "a", "Tensor", "." ]
def build(self, cache, **kwargs): """Returns a Tensor.""" raise NotImplementedError()
[ "def", "build", "(", "self", ",", "cache", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/learn/python/learn/dataframe/series.py#L101-L103
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/estimators/estimator.py
python
_verify_input_args
(x, y, input_fn, feed_fn, batch_size)
Verifies validity of co-existence of input arguments.
Verifies validity of co-existence of input arguments.
[ "Verifies", "validity", "of", "co", "-", "existence", "of", "input", "arguments", "." ]
def _verify_input_args(x, y, input_fn, feed_fn, batch_size): """Verifies validity of co-existence of input arguments.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') else: if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.')
[ "def", "_verify_input_args", "(", "x", ",", "y", ",", "input_fn", ",", "feed_fn", ",", "batch_size", ")", ":", "if", "input_fn", "is", "None", ":", "if", "x", "is", "None", ":", "raise", "ValueError", "(", "'Either x or input_fn must be provided.'", ")", "if", "tensor_util", ".", "is_tensor", "(", "x", ")", "or", "y", "is", "not", "None", "and", "tensor_util", ".", "is_tensor", "(", "y", ")", ":", "raise", "ValueError", "(", "'Inputs cannot be tensors. Please provide input_fn.'", ")", "if", "feed_fn", "is", "not", "None", ":", "raise", "ValueError", "(", "'Can not provide both feed_fn and x or y.'", ")", "else", ":", "if", "(", "x", "is", "not", "None", ")", "or", "(", "y", "is", "not", "None", ")", ":", "raise", "ValueError", "(", "'Can not provide both input_fn and x or y.'", ")", "if", "batch_size", "is", "not", "None", ":", "raise", "ValueError", "(", "'Can not provide both input_fn and batch_size.'", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/learn/python/learn/estimators/estimator.py#L98-L113
nodejs/nan
8db8c8f544f2b6ce1b0859ef6ecdd0a3873a9e62
cpplint.py
python
CheckTrailingSemicolon
(filename, clean_lines, linenum, error)
Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Looks for redundant trailing semicolon.
[ "Looks", "for", "redundant", "trailing", "semicolon", "." ]
def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs # - decltype closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\bdecltype$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. # We need to check the line forward for NOLINT raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1, error) ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, error) error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }")
[ "def", "CheckTrailingSemicolon", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Block bodies should not be followed by a semicolon. Due to C++11", "# brace initialization, there are more places where semicolons are", "# required than not, so we use a whitelist approach to check these", "# rather than a blacklist. These are the places where \"};\" should", "# be replaced by just \"}\":", "# 1. Some flavor of block following closing parenthesis:", "# for (;;) {};", "# while (...) {};", "# switch (...) {};", "# Function(...) {};", "# if (...) {};", "# if (...) else if (...) {};", "#", "# 2. else block:", "# if (...) else {};", "#", "# 3. const member function:", "# Function(...) const {};", "#", "# 4. Block following some statement:", "# x = 42;", "# {};", "#", "# 5. Block at the beginning of a function:", "# Function(...) {", "# {};", "# }", "#", "# Note that naively checking for the preceding \"{\" will also match", "# braces inside multi-dimensional arrays, but this is fine since", "# that expression will not contain semicolons.", "#", "# 6. Block following another block:", "# while (true) {}", "# {};", "#", "# 7. End of namespaces:", "# namespace {};", "#", "# These semicolons seems far more common than other kinds of", "# redundant semicolons, possibly due to people converting classes", "# to namespaces. For now we do not warn for this case.", "#", "# Try matching case 1 first.", "match", "=", "Match", "(", "r'^(.*\\)\\s*)\\{'", ",", "line", ")", "if", "match", ":", "# Matched closing parenthesis (case 1). Check the token before the", "# matching opening parenthesis, and don't warn if it looks like a", "# macro. This avoids these false positives:", "# - macro that defines a base class", "# - multi-line macro that defines a base class", "# - macro that defines the whole class-head", "#", "# But we still issue warnings for macros that we know are safe to", "# warn, specifically:", "# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P", "# - TYPED_TEST", "# - INTERFACE_DEF", "# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:", "#", "# We implement a whitelist of safe macros instead of a blacklist of", "# unsafe macros, even though the latter appears less frequently in", "# google code and would have been easier to implement. This is because", "# the downside for getting the whitelist wrong means some extra", "# semicolons, while the downside for getting the blacklist wrong", "# would result in compile errors.", "#", "# In addition to macros, we also don't want to warn on", "# - Compound literals", "# - Lambdas", "# - alignas specifier with anonymous structs", "# - decltype", "closing_brace_pos", "=", "match", ".", "group", "(", "1", ")", ".", "rfind", "(", "')'", ")", "opening_parenthesis", "=", "ReverseCloseExpression", "(", "clean_lines", ",", "linenum", ",", "closing_brace_pos", ")", "if", "opening_parenthesis", "[", "2", "]", ">", "-", "1", ":", "line_prefix", "=", "opening_parenthesis", "[", "0", "]", "[", "0", ":", "opening_parenthesis", "[", "2", "]", "]", "macro", "=", "Search", "(", "r'\\b([A-Z_][A-Z0-9_]*)\\s*$'", ",", "line_prefix", ")", "func", "=", "Match", "(", "r'^(.*\\])\\s*$'", ",", "line_prefix", ")", "if", "(", "(", "macro", "and", "macro", ".", "group", "(", "1", ")", "not", "in", "(", "'TEST'", ",", "'TEST_F'", ",", "'MATCHER'", ",", "'MATCHER_P'", ",", "'TYPED_TEST'", ",", "'EXCLUSIVE_LOCKS_REQUIRED'", ",", "'SHARED_LOCKS_REQUIRED'", ",", "'LOCKS_EXCLUDED'", ",", "'INTERFACE_DEF'", ")", ")", "or", "(", "func", "and", "not", "Search", "(", "r'\\boperator\\s*\\[\\s*\\]'", ",", "func", ".", "group", "(", "1", ")", ")", ")", "or", "Search", "(", "r'\\b(?:struct|union)\\s+alignas\\s*$'", ",", "line_prefix", ")", "or", "Search", "(", "r'\\bdecltype$'", ",", "line_prefix", ")", "or", "Search", "(", "r'\\s+=\\s*$'", ",", "line_prefix", ")", ")", ":", "match", "=", "None", "if", "(", "match", "and", "opening_parenthesis", "[", "1", "]", ">", "1", "and", "Search", "(", "r'\\]\\s*$'", ",", "clean_lines", ".", "elided", "[", "opening_parenthesis", "[", "1", "]", "-", "1", "]", ")", ")", ":", "# Multi-line lambda-expression", "match", "=", "None", "else", ":", "# Try matching cases 2-3.", "match", "=", "Match", "(", "r'^(.*(?:else|\\)\\s*const)\\s*)\\{'", ",", "line", ")", "if", "not", "match", ":", "# Try matching cases 4-6. These are always matched on separate lines.", "#", "# Note that we can't simply concatenate the previous line to the", "# current line and do a single match, otherwise we may output", "# duplicate warnings for the blank line case:", "# if (cond) {", "# // blank line", "# }", "prevline", "=", "GetPreviousNonBlankLine", "(", "clean_lines", ",", "linenum", ")", "[", "0", "]", "if", "prevline", "and", "Search", "(", "r'[;{}]\\s*$'", ",", "prevline", ")", ":", "match", "=", "Match", "(", "r'^(\\s*)\\{'", ",", "line", ")", "# Check matching closing brace", "if", "match", ":", "(", "endline", ",", "endlinenum", ",", "endpos", ")", "=", "CloseExpression", "(", "clean_lines", ",", "linenum", ",", "len", "(", "match", ".", "group", "(", "1", ")", ")", ")", "if", "endpos", ">", "-", "1", "and", "Match", "(", "r'^\\s*;'", ",", "endline", "[", "endpos", ":", "]", ")", ":", "# Current {} pair is eligible for semicolon check, and we have found", "# the redundant semicolon, output warning here.", "#", "# Note: because we are scanning forward for opening braces, and", "# outputting warnings for the matching closing brace, if there are", "# nested blocks with trailing semicolons, we will get the error", "# messages in reversed order.", "# We need to check the line forward for NOLINT", "raw_lines", "=", "clean_lines", ".", "raw_lines", "ParseNolintSuppressions", "(", "filename", ",", "raw_lines", "[", "endlinenum", "-", "1", "]", ",", "endlinenum", "-", "1", ",", "error", ")", "ParseNolintSuppressions", "(", "filename", ",", "raw_lines", "[", "endlinenum", "]", ",", "endlinenum", ",", "error", ")", "error", "(", "filename", ",", "endlinenum", ",", "'readability/braces'", ",", "4", ",", "\"You don't need a ; after a }\"", ")" ]
https://github.com/nodejs/nan/blob/8db8c8f544f2b6ce1b0859ef6ecdd0a3873a9e62/cpplint.py#L4091-L4235
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_misc.py
python
DropTarget.OnDrop
(*args, **kwargs)
return _misc_.DropTarget_OnDrop(*args, **kwargs)
OnDrop(self, int x, int y) -> bool
OnDrop(self, int x, int y) -> bool
[ "OnDrop", "(", "self", "int", "x", "int", "y", ")", "-", ">", "bool" ]
def OnDrop(*args, **kwargs): """OnDrop(self, int x, int y) -> bool""" return _misc_.DropTarget_OnDrop(*args, **kwargs)
[ "def", "OnDrop", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "DropTarget_OnDrop", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_misc.py#L5587-L5589
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/lib-tk/Tkinter.py
python
Misc.tk_setPalette
(self, *args, **kw)
Set a new color scheme for all widget elements. A single color as argument will cause that all colors of Tk widget elements are derived from this. Alternatively several keyword parameters and its associated colors can be given. The following keywords are valid: activeBackground, foreground, selectColor, activeForeground, highlightBackground, selectBackground, background, highlightColor, selectForeground, disabledForeground, insertBackground, troughColor.
Set a new color scheme for all widget elements.
[ "Set", "a", "new", "color", "scheme", "for", "all", "widget", "elements", "." ]
def tk_setPalette(self, *args, **kw): """Set a new color scheme for all widget elements. A single color as argument will cause that all colors of Tk widget elements are derived from this. Alternatively several keyword parameters and its associated colors can be given. The following keywords are valid: activeBackground, foreground, selectColor, activeForeground, highlightBackground, selectBackground, background, highlightColor, selectForeground, disabledForeground, insertBackground, troughColor.""" self.tk.call(('tk_setPalette',) + _flatten(args) + _flatten(kw.items()))
[ "def", "tk_setPalette", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "self", ".", "tk", ".", "call", "(", "(", "'tk_setPalette'", ",", ")", "+", "_flatten", "(", "args", ")", "+", "_flatten", "(", "kw", ".", "items", "(", ")", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/Tkinter.py#L472-L484
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
android_webview/tools/webview_licenses.py
python
_ReadFile
(path)
return open(os.path.join(REPOSITORY_ROOT, path), 'rb').read()
Reads a file from disk. Args: path: The path of the file to read, relative to the root of the repository. Returns: The contents of the file as a string.
Reads a file from disk. Args: path: The path of the file to read, relative to the root of the repository. Returns: The contents of the file as a string.
[ "Reads", "a", "file", "from", "disk", ".", "Args", ":", "path", ":", "The", "path", "of", "the", "file", "to", "read", "relative", "to", "the", "root", "of", "the", "repository", ".", "Returns", ":", "The", "contents", "of", "the", "file", "as", "a", "string", "." ]
def _ReadFile(path): """Reads a file from disk. Args: path: The path of the file to read, relative to the root of the repository. Returns: The contents of the file as a string. """ return open(os.path.join(REPOSITORY_ROOT, path), 'rb').read()
[ "def", "_ReadFile", "(", "path", ")", ":", "return", "open", "(", "os", ".", "path", ".", "join", "(", "REPOSITORY_ROOT", ",", "path", ")", ",", "'rb'", ")", ".", "read", "(", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/android_webview/tools/webview_licenses.py#L190-L198
hfinkel/llvm-project-cxxjit
91084ef018240bbb8e24235ff5cd8c355a9c1a1e
libcxx/utils/google-benchmark/tools/strip_asm.py
python
process_identifiers
(l)
return new_line
process_identifiers - process all identifiers and modify them to have consistent names across all platforms; specifically across ELF and MachO. For example, MachO inserts an additional understore at the beginning of names. This function removes that.
process_identifiers - process all identifiers and modify them to have consistent names across all platforms; specifically across ELF and MachO. For example, MachO inserts an additional understore at the beginning of names. This function removes that.
[ "process_identifiers", "-", "process", "all", "identifiers", "and", "modify", "them", "to", "have", "consistent", "names", "across", "all", "platforms", ";", "specifically", "across", "ELF", "and", "MachO", ".", "For", "example", "MachO", "inserts", "an", "additional", "understore", "at", "the", "beginning", "of", "names", ".", "This", "function", "removes", "that", "." ]
def process_identifiers(l): """ process_identifiers - process all identifiers and modify them to have consistent names across all platforms; specifically across ELF and MachO. For example, MachO inserts an additional understore at the beginning of names. This function removes that. """ parts = re.split(r'([a-zA-Z0-9_]+)', l) new_line = '' for tk in parts: if is_identifier(tk): if tk.startswith('__Z'): tk = tk[1:] elif tk.startswith('_') and len(tk) > 1 and \ tk[1].isalpha() and tk[1] != 'Z': tk = tk[1:] new_line += tk return new_line
[ "def", "process_identifiers", "(", "l", ")", ":", "parts", "=", "re", ".", "split", "(", "r'([a-zA-Z0-9_]+)'", ",", "l", ")", "new_line", "=", "''", "for", "tk", "in", "parts", ":", "if", "is_identifier", "(", "tk", ")", ":", "if", "tk", ".", "startswith", "(", "'__Z'", ")", ":", "tk", "=", "tk", "[", "1", ":", "]", "elif", "tk", ".", "startswith", "(", "'_'", ")", "and", "len", "(", "tk", ")", ">", "1", "and", "tk", "[", "1", "]", ".", "isalpha", "(", ")", "and", "tk", "[", "1", "]", "!=", "'Z'", ":", "tk", "=", "tk", "[", "1", ":", "]", "new_line", "+=", "tk", "return", "new_line" ]
https://github.com/hfinkel/llvm-project-cxxjit/blob/91084ef018240bbb8e24235ff5cd8c355a9c1a1e/libcxx/utils/google-benchmark/tools/strip_asm.py#L64-L81
fatih/subvim
241b6d170597857105da219c9b7d36059e9f11fb
vim/base/ultisnips/plugin/UltiSnips/text_objects/_lexer.py
python
_parse_number
(stream)
return int(rv)
Expects the stream to contain a number next, returns the number without consuming any more bytes
Expects the stream to contain a number next, returns the number without consuming any more bytes
[ "Expects", "the", "stream", "to", "contain", "a", "number", "next", "returns", "the", "number", "without", "consuming", "any", "more", "bytes" ]
def _parse_number(stream): """ Expects the stream to contain a number next, returns the number without consuming any more bytes """ rv = "" while stream.peek() and stream.peek() in string.digits: rv += stream.next() return int(rv)
[ "def", "_parse_number", "(", "stream", ")", ":", "rv", "=", "\"\"", "while", "stream", ".", "peek", "(", ")", "and", "stream", ".", "peek", "(", ")", "in", "string", ".", "digits", ":", "rv", "+=", "stream", ".", "next", "(", ")", "return", "int", "(", "rv", ")" ]
https://github.com/fatih/subvim/blob/241b6d170597857105da219c9b7d36059e9f11fb/vim/base/ultisnips/plugin/UltiSnips/text_objects/_lexer.py#L72-L81
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/ConfigSet.py
python
ConfigSet.keys
(self)
return keys
Dict interface (unknown purpose)
Dict interface (unknown purpose)
[ "Dict", "interface", "(", "unknown", "purpose", ")" ]
def keys(self): """Dict interface (unknown purpose)""" keys = set() cur = self while cur: keys.update(cur.table.keys()) cur = getattr(cur, 'parent', None) keys = list(keys) keys.sort() return keys
[ "def", "keys", "(", "self", ")", ":", "keys", "=", "set", "(", ")", "cur", "=", "self", "while", "cur", ":", "keys", ".", "update", "(", "cur", ".", "table", ".", "keys", "(", ")", ")", "cur", "=", "getattr", "(", "cur", ",", "'parent'", ",", "None", ")", "keys", "=", "list", "(", "keys", ")", "keys", ".", "sort", "(", ")", "return", "keys" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/ConfigSet.py#L51-L60
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/framework/function.py
python
_parse_kwargs_as_attrs
(func_name, **kwargs)
return attrs
Parses **kwargs into a node's attributes.
Parses **kwargs into a node's attributes.
[ "Parses", "**", "kwargs", "into", "a", "node", "s", "attributes", "." ]
def _parse_kwargs_as_attrs(func_name, **kwargs): """Parses **kwargs into a node's attributes.""" attrs = {} noinline = kwargs.pop("noinline", None) if noinline is not None: attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline)) compiled = kwargs.pop("compiled", None) separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None) if compiled is not None: attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled)) attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue( b=bool(separate_compiled_gradients)) attrs["_XlaScope"] = attr_value_pb2.AttrValue( s=("function_%s" % func_name).encode()) if kwargs: raise ValueError("Unknown keyword arguments: %s" % kwargs.keys()) return attrs
[ "def", "_parse_kwargs_as_attrs", "(", "func_name", ",", "*", "*", "kwargs", ")", ":", "attrs", "=", "{", "}", "noinline", "=", "kwargs", ".", "pop", "(", "\"noinline\"", ",", "None", ")", "if", "noinline", "is", "not", "None", ":", "attrs", "[", "\"_noinline\"", "]", "=", "attr_value_pb2", ".", "AttrValue", "(", "b", "=", "bool", "(", "noinline", ")", ")", "compiled", "=", "kwargs", ".", "pop", "(", "\"compiled\"", ",", "None", ")", "separate_compiled_gradients", "=", "kwargs", ".", "pop", "(", "\"separate_compiled_gradients\"", ",", "None", ")", "if", "compiled", "is", "not", "None", ":", "attrs", "[", "\"_XlaCompile\"", "]", "=", "attr_value_pb2", ".", "AttrValue", "(", "b", "=", "bool", "(", "compiled", ")", ")", "attrs", "[", "\"_XlaSeparateCompiledGradients\"", "]", "=", "attr_value_pb2", ".", "AttrValue", "(", "b", "=", "bool", "(", "separate_compiled_gradients", ")", ")", "attrs", "[", "\"_XlaScope\"", "]", "=", "attr_value_pb2", ".", "AttrValue", "(", "s", "=", "(", "\"function_%s\"", "%", "func_name", ")", ".", "encode", "(", ")", ")", "if", "kwargs", ":", "raise", "ValueError", "(", "\"Unknown keyword arguments: %s\"", "%", "kwargs", ".", "keys", "(", ")", ")", "return", "attrs" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/framework/function.py#L981-L1000
synfig/synfig
a5ec91db5b751dc12e4400ccfb5c063fd6d2d928
synfig-studio/plugins/lottie-exporter/common/Canvas.py
python
Canvas.get_bone
(self, key)
return None
Given a guid, returns the corresponding bone from the canvas
Given a guid, returns the corresponding bone from the canvas
[ "Given", "a", "guid", "returns", "the", "corresponding", "bone", "from", "the", "canvas" ]
def get_bone(self, key): """ Given a guid, returns the corresponding bone from the canvas """ if key in self.bones.keys(): return self.bones[key] return None
[ "def", "get_bone", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "bones", ".", "keys", "(", ")", ":", "return", "self", ".", "bones", "[", "key", "]", "return", "None" ]
https://github.com/synfig/synfig/blob/a5ec91db5b751dc12e4400ccfb5c063fd6d2d928/synfig-studio/plugins/lottie-exporter/common/Canvas.py#L106-L112
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/contrib/distributions/python/ops/binomial.py
python
Binomial.event_shape
(self, name="event_shape")
Shape of a sample from a single distribution as a 1-D int32 `Tensor`. Args: name: name to give to the op Returns: `Tensor` `event_shape`
Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
[ "Shape", "of", "a", "sample", "from", "a", "single", "distribution", "as", "a", "1", "-", "D", "int32", "Tensor", "." ]
def event_shape(self, name="event_shape"): """Shape of a sample from a single distribution as a 1-D int32 `Tensor`. Args: name: name to give to the op Returns: `Tensor` `event_shape` """ with ops.name_scope(self.name): with ops.op_scope([], name): return constant_op.constant([], name=name, dtype=dtypes.int32)
[ "def", "event_shape", "(", "self", ",", "name", "=", "\"event_shape\"", ")", ":", "with", "ops", ".", "name_scope", "(", "self", ".", "name", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "]", ",", "name", ")", ":", "return", "constant_op", ".", "constant", "(", "[", "]", ",", "name", "=", "name", ",", "dtype", "=", "dtypes", ".", "int32", ")" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/distributions/python/ops/binomial.py#L196-L207
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pydoc.py
python
describe
(thing)
return type(thing).__name__
Produce a short description of the given thing.
Produce a short description of the given thing.
[ "Produce", "a", "short", "description", "of", "the", "given", "thing", "." ]
def describe(thing): """Produce a short description of the given thing.""" if inspect.ismodule(thing): if thing.__name__ in sys.builtin_module_names: return 'built-in module ' + thing.__name__ if hasattr(thing, '__path__'): return 'package ' + thing.__name__ else: return 'module ' + thing.__name__ if inspect.isbuiltin(thing): return 'built-in function ' + thing.__name__ if inspect.isgetsetdescriptor(thing): return 'getset descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.ismemberdescriptor(thing): return 'member descriptor %s.%s.%s' % ( thing.__objclass__.__module__, thing.__objclass__.__name__, thing.__name__) if inspect.isclass(thing): return 'class ' + thing.__name__ if inspect.isfunction(thing): return 'function ' + thing.__name__ if inspect.ismethod(thing): return 'method ' + thing.__name__ return type(thing).__name__
[ "def", "describe", "(", "thing", ")", ":", "if", "inspect", ".", "ismodule", "(", "thing", ")", ":", "if", "thing", ".", "__name__", "in", "sys", ".", "builtin_module_names", ":", "return", "'built-in module '", "+", "thing", ".", "__name__", "if", "hasattr", "(", "thing", ",", "'__path__'", ")", ":", "return", "'package '", "+", "thing", ".", "__name__", "else", ":", "return", "'module '", "+", "thing", ".", "__name__", "if", "inspect", ".", "isbuiltin", "(", "thing", ")", ":", "return", "'built-in function '", "+", "thing", ".", "__name__", "if", "inspect", ".", "isgetsetdescriptor", "(", "thing", ")", ":", "return", "'getset descriptor %s.%s.%s'", "%", "(", "thing", ".", "__objclass__", ".", "__module__", ",", "thing", ".", "__objclass__", ".", "__name__", ",", "thing", ".", "__name__", ")", "if", "inspect", ".", "ismemberdescriptor", "(", "thing", ")", ":", "return", "'member descriptor %s.%s.%s'", "%", "(", "thing", ".", "__objclass__", ".", "__module__", ",", "thing", ".", "__objclass__", ".", "__name__", ",", "thing", ".", "__name__", ")", "if", "inspect", ".", "isclass", "(", "thing", ")", ":", "return", "'class '", "+", "thing", ".", "__name__", "if", "inspect", ".", "isfunction", "(", "thing", ")", ":", "return", "'function '", "+", "thing", ".", "__name__", "if", "inspect", ".", "ismethod", "(", "thing", ")", ":", "return", "'method '", "+", "thing", ".", "__name__", "return", "type", "(", "thing", ")", ".", "__name__" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pydoc.py#L1575-L1600
KratosMultiphysics/Kratos
0000833054ed0503424eb28205d6508d9ca6cbbc
applications/ContactStructuralMechanicsApplication/python_scripts/explicit_penalty_contact_process.py
python
ExplicitPenaltyContactProcess.ExecuteBeforeSolutionLoop
(self)
This method is executed before starting the time loop Keyword arguments: self -- It signifies an instance of a class.
This method is executed before starting the time loop
[ "This", "method", "is", "executed", "before", "starting", "the", "time", "loop" ]
def ExecuteBeforeSolutionLoop(self): """ This method is executed before starting the time loop Keyword arguments: self -- It signifies an instance of a class. """ # We call to the base process super().ExecuteBeforeSolutionLoop()
[ "def", "ExecuteBeforeSolutionLoop", "(", "self", ")", ":", "# We call to the base process", "super", "(", ")", ".", "ExecuteBeforeSolutionLoop", "(", ")" ]
https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/applications/ContactStructuralMechanicsApplication/python_scripts/explicit_penalty_contact_process.py#L145-L152
facebook/ThreatExchange
31914a51820c73c8a0daffe62ccca29a6e3d359e
api-reference-examples/python/pytx/pytx/request.py
python
Broker.get_new
(klass, attrs)
return n
Return a new instance of klass. :param klass: The class to create a new instance of. :type klass: :class: :param attrs: The attributes to set for this new instance. :type attrs: dict :returns: new instance of klass
Return a new instance of klass.
[ "Return", "a", "new", "instance", "of", "klass", "." ]
def get_new(klass, attrs): """ Return a new instance of klass. :param klass: The class to create a new instance of. :type klass: :class: :param attrs: The attributes to set for this new instance. :type attrs: dict :returns: new instance of klass """ n = klass(**attrs) n._new = False n._changed = [] return n
[ "def", "get_new", "(", "klass", ",", "attrs", ")", ":", "n", "=", "klass", "(", "*", "*", "attrs", ")", "n", ".", "_new", "=", "False", "n", ".", "_changed", "=", "[", "]", "return", "n" ]
https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/api-reference-examples/python/pytx/pytx/request.py#L35-L49
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/cudadrv/driver.py
python
_is_datetime_dtype
(obj)
return dtype is not None and dtype.char in 'Mm'
Returns True if the obj.dtype is datetime64 or timedelta64
Returns True if the obj.dtype is datetime64 or timedelta64
[ "Returns", "True", "if", "the", "obj", ".", "dtype", "is", "datetime64", "or", "timedelta64" ]
def _is_datetime_dtype(obj): """Returns True if the obj.dtype is datetime64 or timedelta64 """ dtype = getattr(obj, 'dtype', None) return dtype is not None and dtype.char in 'Mm'
[ "def", "_is_datetime_dtype", "(", "obj", ")", ":", "dtype", "=", "getattr", "(", "obj", ",", "'dtype'", ",", "None", ")", "return", "dtype", "is", "not", "None", "and", "dtype", ".", "char", "in", "'Mm'" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/cuda/cudadrv/driver.py#L1787-L1791
facebook/watchman
0917460c71b000b96be9b9575d77f06f2f6053bb
build/fbcode_builder/getdeps/manifest.py
python
ManifestParser.get_section_as_args
(self, section, ctx=None)
return args
Intended for use with the make.[build_args/install_args] and autoconf.args sections, this method collects the entries and returns an array of strings. If the manifest contains conditional sections, ctx is used to evaluate the condition and merge in the values.
Intended for use with the make.[build_args/install_args] and autoconf.args sections, this method collects the entries and returns an array of strings. If the manifest contains conditional sections, ctx is used to evaluate the condition and merge in the values.
[ "Intended", "for", "use", "with", "the", "make", ".", "[", "build_args", "/", "install_args", "]", "and", "autoconf", ".", "args", "sections", "this", "method", "collects", "the", "entries", "and", "returns", "an", "array", "of", "strings", ".", "If", "the", "manifest", "contains", "conditional", "sections", "ctx", "is", "used", "to", "evaluate", "the", "condition", "and", "merge", "in", "the", "values", "." ]
def get_section_as_args(self, section, ctx=None) -> List[str]: """Intended for use with the make.[build_args/install_args] and autoconf.args sections, this method collects the entries and returns an array of strings. If the manifest contains conditional sections, ctx is used to evaluate the condition and merge in the values. """ args = [] ctx = ctx or {} for s in self._config.sections(): if s != section: if not s.startswith(section + "."): continue expr = parse_conditional_section_name(s, section) if not expr.eval(ctx): continue for field in self._config.options(s): value = self._config.get(s, field) if value is None: args.append(field) else: args.append("%s=%s" % (field, value)) return args
[ "def", "get_section_as_args", "(", "self", ",", "section", ",", "ctx", "=", "None", ")", "->", "List", "[", "str", "]", ":", "args", "=", "[", "]", "ctx", "=", "ctx", "or", "{", "}", "for", "s", "in", "self", ".", "_config", ".", "sections", "(", ")", ":", "if", "s", "!=", "section", ":", "if", "not", "s", ".", "startswith", "(", "section", "+", "\".\"", ")", ":", "continue", "expr", "=", "parse_conditional_section_name", "(", "s", ",", "section", ")", "if", "not", "expr", ".", "eval", "(", "ctx", ")", ":", "continue", "for", "field", "in", "self", ".", "_config", ".", "options", "(", "s", ")", ":", "value", "=", "self", ".", "_config", ".", "get", "(", "s", ",", "field", ")", "if", "value", "is", "None", ":", "args", ".", "append", "(", "field", ")", "else", ":", "args", ".", "append", "(", "\"%s=%s\"", "%", "(", "field", ",", "value", ")", ")", "return", "args" ]
https://github.com/facebook/watchman/blob/0917460c71b000b96be9b9575d77f06f2f6053bb/build/fbcode_builder/getdeps/manifest.py#L270-L293
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py
python
Text.tag_nextrange
(self, tagName, index1, index2=None)
return self.tk.splitlist(self.tk.call( self._w, 'tag', 'nextrange', tagName, index1, index2))
Return a list of start and end index for the first sequence of characters between INDEX1 and INDEX2 which all have tag TAGNAME. The text is searched forward from INDEX1.
Return a list of start and end index for the first sequence of characters between INDEX1 and INDEX2 which all have tag TAGNAME. The text is searched forward from INDEX1.
[ "Return", "a", "list", "of", "start", "and", "end", "index", "for", "the", "first", "sequence", "of", "characters", "between", "INDEX1", "and", "INDEX2", "which", "all", "have", "tag", "TAGNAME", ".", "The", "text", "is", "searched", "forward", "from", "INDEX1", "." ]
def tag_nextrange(self, tagName, index1, index2=None): """Return a list of start and end index for the first sequence of characters between INDEX1 and INDEX2 which all have tag TAGNAME. The text is searched forward from INDEX1.""" return self.tk.splitlist(self.tk.call( self._w, 'tag', 'nextrange', tagName, index1, index2))
[ "def", "tag_nextrange", "(", "self", ",", "tagName", ",", "index1", ",", "index2", "=", "None", ")", ":", "return", "self", ".", "tk", ".", "splitlist", "(", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'tag'", ",", "'nextrange'", ",", "tagName", ",", "index1", ",", "index2", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L3383-L3388
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/chat.py
python
Chat.from_bot
(self)
return self._from_bot
Gets the from_bot of this Chat. # noqa: E501 :return: The from_bot of this Chat. # noqa: E501 :rtype: bool
Gets the from_bot of this Chat. # noqa: E501
[ "Gets", "the", "from_bot", "of", "this", "Chat", ".", "#", "noqa", ":", "E501" ]
def from_bot(self): """Gets the from_bot of this Chat. # noqa: E501 :return: The from_bot of this Chat. # noqa: E501 :rtype: bool """ return self._from_bot
[ "def", "from_bot", "(", "self", ")", ":", "return", "self", ".", "_from_bot" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/chat.py#L190-L197
RobotLocomotion/drake
0e18a34604c45ed65bc9018a54f7610f91cdad5b
examples/acrobot/spong_sim.py
python
simulate
(*, initial_state, controller_params, t_final, tape_period)
return x_tape
Simulates an Acrobot + Spong controller from the given initial state and parameters until the given final time. Returns the state sampled at the given tape_period.
Simulates an Acrobot + Spong controller from the given initial state and parameters until the given final time. Returns the state sampled at the given tape_period.
[ "Simulates", "an", "Acrobot", "+", "Spong", "controller", "from", "the", "given", "initial", "state", "and", "parameters", "until", "the", "given", "final", "time", ".", "Returns", "the", "state", "sampled", "at", "the", "given", "tape_period", "." ]
def simulate(*, initial_state, controller_params, t_final, tape_period): """Simulates an Acrobot + Spong controller from the given initial state and parameters until the given final time. Returns the state sampled at the given tape_period. """ builder = DiagramBuilder() plant = builder.AddSystem(AcrobotPlant()) controller = builder.AddSystem(AcrobotSpongController()) builder.Connect(plant.get_output_port(0), controller.get_input_port(0)) builder.Connect(controller.get_output_port(0), plant.get_input_port(0)) state_logger = LogVectorOutput(plant.get_output_port(0), builder, tape_period) diagram = builder.Build() simulator = Simulator(diagram) context = simulator.get_mutable_context() plant_context = diagram.GetMutableSubsystemContext(plant, context) controller_context = diagram.GetMutableSubsystemContext( controller, context) plant_context.SetContinuousState(initial_state) controller_context.get_mutable_numeric_parameter(0).SetFromVector( controller_params) simulator.AdvanceTo(t_final) x_tape = state_logger.FindLog(context).data() return x_tape
[ "def", "simulate", "(", "*", ",", "initial_state", ",", "controller_params", ",", "t_final", ",", "tape_period", ")", ":", "builder", "=", "DiagramBuilder", "(", ")", "plant", "=", "builder", ".", "AddSystem", "(", "AcrobotPlant", "(", ")", ")", "controller", "=", "builder", ".", "AddSystem", "(", "AcrobotSpongController", "(", ")", ")", "builder", ".", "Connect", "(", "plant", ".", "get_output_port", "(", "0", ")", ",", "controller", ".", "get_input_port", "(", "0", ")", ")", "builder", ".", "Connect", "(", "controller", ".", "get_output_port", "(", "0", ")", ",", "plant", ".", "get_input_port", "(", "0", ")", ")", "state_logger", "=", "LogVectorOutput", "(", "plant", ".", "get_output_port", "(", "0", ")", ",", "builder", ",", "tape_period", ")", "diagram", "=", "builder", ".", "Build", "(", ")", "simulator", "=", "Simulator", "(", "diagram", ")", "context", "=", "simulator", ".", "get_mutable_context", "(", ")", "plant_context", "=", "diagram", ".", "GetMutableSubsystemContext", "(", "plant", ",", "context", ")", "controller_context", "=", "diagram", ".", "GetMutableSubsystemContext", "(", "controller", ",", "context", ")", "plant_context", ".", "SetContinuousState", "(", "initial_state", ")", "controller_context", ".", "get_mutable_numeric_parameter", "(", "0", ")", ".", "SetFromVector", "(", "controller_params", ")", "simulator", ".", "AdvanceTo", "(", "t_final", ")", "x_tape", "=", "state_logger", ".", "FindLog", "(", "context", ")", ".", "data", "(", ")", "return", "x_tape" ]
https://github.com/RobotLocomotion/drake/blob/0e18a34604c45ed65bc9018a54f7610f91cdad5b/examples/acrobot/spong_sim.py#L20-L48
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/contrib/learn/python/learn/datasets/base.py
python
load_csv_with_header
(filename, target_dtype, features_dtype, target_column=-1)
return Dataset(data=data, target=target)
Load dataset from CSV file with a header row.
Load dataset from CSV file with a header row.
[ "Load", "dataset", "from", "CSV", "file", "with", "a", "header", "row", "." ]
def load_csv_with_header(filename, target_dtype, features_dtype, target_column=-1): """Load dataset from CSV file with a header row.""" with gfile.Open(filename) as csv_file: data_file = csv.reader(csv_file) header = next(data_file) n_samples = int(header[0]) n_features = int(header[1]) data = np.zeros((n_samples, n_features)) target = np.zeros((n_samples,), dtype=target_dtype) for i, row in enumerate(data_file): target[i] = np.asarray(row.pop(target_column), dtype=target_dtype) data[i] = np.asarray(row, dtype=features_dtype) return Dataset(data=data, target=target)
[ "def", "load_csv_with_header", "(", "filename", ",", "target_dtype", ",", "features_dtype", ",", "target_column", "=", "-", "1", ")", ":", "with", "gfile", ".", "Open", "(", "filename", ")", "as", "csv_file", ":", "data_file", "=", "csv", ".", "reader", "(", "csv_file", ")", "header", "=", "next", "(", "data_file", ")", "n_samples", "=", "int", "(", "header", "[", "0", "]", ")", "n_features", "=", "int", "(", "header", "[", "1", "]", ")", "data", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "n_features", ")", ")", "target", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", ")", ",", "dtype", "=", "target_dtype", ")", "for", "i", ",", "row", "in", "enumerate", "(", "data_file", ")", ":", "target", "[", "i", "]", "=", "np", ".", "asarray", "(", "row", ".", "pop", "(", "target_column", ")", ",", "dtype", "=", "target_dtype", ")", "data", "[", "i", "]", "=", "np", ".", "asarray", "(", "row", ",", "dtype", "=", "features_dtype", ")", "return", "Dataset", "(", "data", "=", "data", ",", "target", "=", "target", ")" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/contrib/learn/python/learn/datasets/base.py#L53-L69
klzgrad/naiveproxy
ed2c513637c77b18721fe428d7ed395b4d284c83
src/build/config/ios/codesign.py
python
Entitlements.__init__
(self, entitlements_path)
Initializes Entitlements object from entitlement file.
Initializes Entitlements object from entitlement file.
[ "Initializes", "Entitlements", "object", "from", "entitlement", "file", "." ]
def __init__(self, entitlements_path): """Initializes Entitlements object from entitlement file.""" self._path = entitlements_path self._data = LoadPlistFile(self._path)
[ "def", "__init__", "(", "self", ",", "entitlements_path", ")", ":", "self", ".", "_path", "=", "entitlements_path", "self", ".", "_data", "=", "LoadPlistFile", "(", "self", ".", "_path", ")" ]
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/config/ios/codesign.py#L234-L237
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/speedmeter.py
python
SpeedMeter.GetAngleRange
(self)
return self._anglerange
Returns the range of existence for :class:`SpeedMeter`. The returned values are in radians.
Returns the range of existence for :class:`SpeedMeter`. The returned values are in radians.
[ "Returns", "the", "range", "of", "existence", "for", ":", "class", ":", "SpeedMeter", ".", "The", "returned", "values", "are", "in", "radians", "." ]
def GetAngleRange(self): """ Returns the range of existence for :class:`SpeedMeter`. The returned values are in radians. """ return self._anglerange
[ "def", "GetAngleRange", "(", "self", ")", ":", "return", "self", ".", "_anglerange" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/speedmeter.py#L1194-L1200
google/or-tools
2cb85b4eead4c38e1c54b48044f92087cf165bce
ortools/sat/python/cp_model.py
python
CpModel.AddMinEquality
(self, target, exprs)
return ct
Adds `target == Min(variables)`.
Adds `target == Min(variables)`.
[ "Adds", "target", "==", "Min", "(", "variables", ")", "." ]
def AddMinEquality(self, target, exprs): """Adds `target == Min(variables)`.""" ct = Constraint(self.__model.constraints) model_ct = self.__model.constraints[ct.Index()] model_ct.lin_max.exprs.extend( [self.ParseLinearExpression(x, True) for x in exprs]) model_ct.lin_max.target.CopyFrom( self.ParseLinearExpression(target, True)) return ct
[ "def", "AddMinEquality", "(", "self", ",", "target", ",", "exprs", ")", ":", "ct", "=", "Constraint", "(", "self", ".", "__model", ".", "constraints", ")", "model_ct", "=", "self", ".", "__model", ".", "constraints", "[", "ct", ".", "Index", "(", ")", "]", "model_ct", ".", "lin_max", ".", "exprs", ".", "extend", "(", "[", "self", ".", "ParseLinearExpression", "(", "x", ",", "True", ")", "for", "x", "in", "exprs", "]", ")", "model_ct", ".", "lin_max", ".", "target", ".", "CopyFrom", "(", "self", ".", "ParseLinearExpression", "(", "target", ",", "True", ")", ")", "return", "ct" ]
https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/ortools/sat/python/cp_model.py#L1495-L1503
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/tools/jinja2/nodes.py
python
Node.set_environment
(self, environment)
return self
Set the environment for all nodes.
Set the environment for all nodes.
[ "Set", "the", "environment", "for", "all", "nodes", "." ]
def set_environment(self, environment): """Set the environment for all nodes.""" todo = deque([self]) while todo: node = todo.popleft() node.environment = environment todo.extend(node.iter_child_nodes()) return self
[ "def", "set_environment", "(", "self", ",", "environment", ")", ":", "todo", "=", "deque", "(", "[", "self", "]", ")", "while", "todo", ":", "node", "=", "todo", ".", "popleft", "(", ")", "node", ".", "environment", "=", "environment", "todo", ".", "extend", "(", "node", ".", "iter_child_nodes", "(", ")", ")", "return", "self" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/tools/jinja2/nodes.py#L219-L226
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
build/android/install_emulator_deps.py
python
CheckKVM
()
return os.path.exists('/dev/kvm')
Quickly check whether KVM is enabled. Returns: True iff /dev/kvm exists (Linux only).
Quickly check whether KVM is enabled.
[ "Quickly", "check", "whether", "KVM", "is", "enabled", "." ]
def CheckKVM(): """Quickly check whether KVM is enabled. Returns: True iff /dev/kvm exists (Linux only). """ return os.path.exists('/dev/kvm')
[ "def", "CheckKVM", "(", ")", ":", "return", "os", ".", "path", ".", "exists", "(", "'/dev/kvm'", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/build/android/install_emulator_deps.py#L97-L103
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2.py
python
loadCatalog
(filename)
return ret
Load the catalog and makes its definitions effective for the default external entity loader. It will recurse in SGML CATALOG entries. this function is not thread safe, catalog initialization should preferably be done once at startup
Load the catalog and makes its definitions effective for the default external entity loader. It will recurse in SGML CATALOG entries. this function is not thread safe, catalog initialization should preferably be done once at startup
[ "Load", "the", "catalog", "and", "makes", "its", "definitions", "effective", "for", "the", "default", "external", "entity", "loader", ".", "It", "will", "recurse", "in", "SGML", "CATALOG", "entries", ".", "this", "function", "is", "not", "thread", "safe", "catalog", "initialization", "should", "preferably", "be", "done", "once", "at", "startup" ]
def loadCatalog(filename): """Load the catalog and makes its definitions effective for the default external entity loader. It will recurse in SGML CATALOG entries. this function is not thread safe, catalog initialization should preferably be done once at startup """ ret = libxml2mod.xmlLoadCatalog(filename) return ret
[ "def", "loadCatalog", "(", "filename", ")", ":", "ret", "=", "libxml2mod", ".", "xmlLoadCatalog", "(", "filename", ")", "return", "ret" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2.py#L984-L990