nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Code/Tools/waf-1.7.13/waflib/ConfigSet.py
python
ConfigSet.prepend_value
(self, var, val)
Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple
Prepends a value to the specified item::
[ "Prepends", "a", "value", "to", "the", "specified", "item", "::" ]
def prepend_value(self, var, val): """ Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] self.table[var] = val + self._get_list_value_for_modification(var)
[ "def", "prepend_value", "(", "self", ",", "var", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "str", ")", ":", "val", "=", "[", "val", "]", "self", ".", "table", "[", "var", "]", "=", "val", "+", "self", ".", "_get_list_value_for_modification", "(", "var", ")" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/ConfigSet.py#L219-L230
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/core/ops/common.py
python
get_op_result_name
(left, right)
return name
Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string
Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series.
[ "Find", "the", "appropriate", "name", "to", "pin", "to", "an", "operation", "result", ".", "This", "result", "should", "always", "be", "either", "an", "Index", "or", "a", "Series", "." ]
def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """ if isinstance(right, (ABCSeries, ABCIndex)): name = _maybe_match_name(left, right) else: name = left.name return name
[ "def", "get_op_result_name", "(", "left", ",", "right", ")", ":", "if", "isinstance", "(", "right", ",", "(", "ABCSeries", ",", "ABCIndex", ")", ")", ":", "name", "=", "_maybe_match_name", "(", "left", ",", "right", ")", "else", ":", "name", "=", "left", ".", "name", "return", "name" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/core/ops/common.py#L74-L93
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/control_flow_util_v2.py
python
unique_fn_name
(scope, name)
return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_")
Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. "true", "body"). Returns: A string, the name to use for the function.
Returns a unique name to use for a control flow function.
[ "Returns", "a", "unique", "name", "to", "use", "for", "a", "control", "flow", "function", "." ]
def unique_fn_name(scope, name): """Returns a unique name to use for a control flow function. Args: scope: A name scope string. name: An identifier for this function (e.g. "true", "body"). Returns: A string, the name to use for the function. """ return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_")
[ "def", "unique_fn_name", "(", "scope", ",", "name", ")", ":", "return", "(", "\"%s%s_%s\"", "%", "(", "scope", ",", "name", ",", "ops", ".", "uid", "(", ")", ")", ")", ".", "replace", "(", "\"/\"", ",", "\"_\"", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/control_flow_util_v2.py#L75-L85
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/arrays/timedeltas.py
python
ints_to_td64ns
(data, unit="ns")
return data, copy_made
Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating the integers as multiples of the given timedelta unit. Parameters ---------- data : numpy.ndarray with integer-dtype unit : str, default "ns" The timedelta unit to treat integers as multiples of. Returns ------- numpy.ndarray : timedelta64[ns] array converted from data bool : whether a copy was made
Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating the integers as multiples of the given timedelta unit.
[ "Convert", "an", "ndarray", "with", "integer", "-", "dtype", "to", "timedelta64", "[", "ns", "]", "dtype", "treating", "the", "integers", "as", "multiples", "of", "the", "given", "timedelta", "unit", "." ]
def ints_to_td64ns(data, unit="ns"): """ Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating the integers as multiples of the given timedelta unit. Parameters ---------- data : numpy.ndarray with integer-dtype unit : str, default "ns" The timedelta unit to treat integers as multiples of. Returns ------- numpy.ndarray : timedelta64[ns] array converted from data bool : whether a copy was made """ copy_made = False unit = unit if unit is not None else "ns" if data.dtype != np.int64: # converting to int64 makes a copy, so we can avoid # re-copying later data = data.astype(np.int64) copy_made = True if unit != "ns": dtype_str = "timedelta64[{unit}]".format(unit=unit) data = data.view(dtype_str) # TODO: watch out for overflows when converting from lower-resolution data = data.astype("timedelta64[ns]") # the astype conversion makes a copy, so we can avoid re-copying later copy_made = True else: data = data.view("timedelta64[ns]") return data, copy_made
[ "def", "ints_to_td64ns", "(", "data", ",", "unit", "=", "\"ns\"", ")", ":", "copy_made", "=", "False", "unit", "=", "unit", "if", "unit", "is", "not", "None", "else", "\"ns\"", "if", "data", ".", "dtype", "!=", "np", ".", "int64", ":", "# converting to int64 makes a copy, so we can avoid", "# re-copying later", "data", "=", "data", ".", "astype", "(", "np", ".", "int64", ")", "copy_made", "=", "True", "if", "unit", "!=", "\"ns\"", ":", "dtype_str", "=", "\"timedelta64[{unit}]\"", ".", "format", "(", "unit", "=", "unit", ")", "data", "=", "data", ".", "view", "(", "dtype_str", ")", "# TODO: watch out for overflows when converting from lower-resolution", "data", "=", "data", ".", "astype", "(", "\"timedelta64[ns]\"", ")", "# the astype conversion makes a copy, so we can avoid re-copying later", "copy_made", "=", "True", "else", ":", "data", "=", "data", ".", "view", "(", "\"timedelta64[ns]\"", ")", "return", "data", ",", "copy_made" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/arrays/timedeltas.py#L960-L997
francinexue/xuefu
b6ff79747a42e020588c0c0a921048e08fe4680c
cnx/bar.py
python
Bars.getBar
(self, instrument)
return self.__barDict.get(instrument, None)
Returns the :class:`pyalgotrade.bar.Bar` for the given instrument or None if the instrument is not found.
Returns the :class:`pyalgotrade.bar.Bar` for the given instrument or None if the instrument is not found.
[ "Returns", "the", ":", "class", ":", "pyalgotrade", ".", "bar", ".", "Bar", "for", "the", "given", "instrument", "or", "None", "if", "the", "instrument", "is", "not", "found", "." ]
def getBar(self, instrument): """Returns the :class:`pyalgotrade.bar.Bar` for the given instrument or None if the instrument is not found.""" return self.__barDict.get(instrument, None)
[ "def", "getBar", "(", "self", ",", "instrument", ")", ":", "return", "self", ".", "__barDict", ".", "get", "(", "instrument", ",", "None", ")" ]
https://github.com/francinexue/xuefu/blob/b6ff79747a42e020588c0c0a921048e08fe4680c/cnx/bar.py#L298-L300
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/api_key.py
python
APIKey.id
(self)
return self._id
Gets the id of this APIKey. # noqa: E501 :return: The id of this APIKey. # noqa: E501 :rtype: str
Gets the id of this APIKey. # noqa: E501
[ "Gets", "the", "id", "of", "this", "APIKey", ".", "#", "noqa", ":", "E501" ]
def id(self): """Gets the id of this APIKey. # noqa: E501 :return: The id of this APIKey. # noqa: E501 :rtype: str """ return self._id
[ "def", "id", "(", "self", ")", ":", "return", "self", ".", "_id" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/api_key.py#L86-L93
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemWebCommunicator/AWS/common-code/lib/AWSIoTPythonSDK/MQTTLib.py
python
AWSIoTMQTTClient.onMessage
(self, message)
**Description** Callback that gets called when the client receives a new message. The callback registration should happen before calling connect/connectAsync. This callback, if present, will always be triggered regardless of whether there is any message callback registered upon subscribe API call. It is for the purpose to aggregating the processing of received messages in one function. **Syntax** .. code:: python # Register an onMessage callback myAWSIoTMQTTClient.onMessage = myOnMessageCallback **Parameters** *message* - Received MQTT message. It contains the source topic as :code:`message.topic`, and the payload as :code:`message.payload`. **Returns** None
**Description**
[ "**", "Description", "**" ]
def onMessage(self, message): """ **Description** Callback that gets called when the client receives a new message. The callback registration should happen before calling connect/connectAsync. This callback, if present, will always be triggered regardless of whether there is any message callback registered upon subscribe API call. It is for the purpose to aggregating the processing of received messages in one function. **Syntax** .. code:: python # Register an onMessage callback myAWSIoTMQTTClient.onMessage = myOnMessageCallback **Parameters** *message* - Received MQTT message. It contains the source topic as :code:`message.topic`, and the payload as :code:`message.payload`. **Returns** None """ pass
[ "def", "onMessage", "(", "self", ",", "message", ")", ":", "pass" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemWebCommunicator/AWS/common-code/lib/AWSIoTPythonSDK/MQTTLib.py#L835-L861
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/plat-mac/videoreader.py
python
_Reader._getpixmapcontent
(self)
return ''.join(rv)
Shuffle the offscreen PixMap data, because it may have funny stride values
Shuffle the offscreen PixMap data, because it may have funny stride values
[ "Shuffle", "the", "offscreen", "PixMap", "data", "because", "it", "may", "have", "funny", "stride", "values" ]
def _getpixmapcontent(self): """Shuffle the offscreen PixMap data, because it may have funny stride values""" rowbytes = Qdoffs.GetPixRowBytes(self.pixmap) width = self.videodescr['width'] height = self.videodescr['height'] start = 0 rv = [] for i in range(height): nextline = Qdoffs.GetPixMapBytes(self.pixmap, start, width*4) start = start + rowbytes rv.append(nextline) return ''.join(rv)
[ "def", "_getpixmapcontent", "(", "self", ")", ":", "rowbytes", "=", "Qdoffs", ".", "GetPixRowBytes", "(", "self", ".", "pixmap", ")", "width", "=", "self", ".", "videodescr", "[", "'width'", "]", "height", "=", "self", ".", "videodescr", "[", "'height'", "]", "start", "=", "0", "rv", "=", "[", "]", "for", "i", "in", "range", "(", "height", ")", ":", "nextline", "=", "Qdoffs", ".", "GetPixMapBytes", "(", "self", ".", "pixmap", ",", "start", ",", "width", "*", "4", ")", "start", "=", "start", "+", "rowbytes", "rv", ".", "append", "(", "nextline", ")", "return", "''", ".", "join", "(", "rv", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/plat-mac/videoreader.py#L235-L246
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/fourcircle_utility.py
python
get_spice_group_name
(exp_number)
return 'HB3A_Exp{0}_SPICES'.format(exp_number)
get SPICE TableWorkspaces group name :param exp_number: :param scan_number: :return:
get SPICE TableWorkspaces group name :param exp_number: :param scan_number: :return:
[ "get", "SPICE", "TableWorkspaces", "group", "name", ":", "param", "exp_number", ":", ":", "param", "scan_number", ":", ":", "return", ":" ]
def get_spice_group_name(exp_number): """ get SPICE TableWorkspaces group name :param exp_number: :param scan_number: :return: """ return 'HB3A_Exp{0}_SPICES'.format(exp_number)
[ "def", "get_spice_group_name", "(", "exp_number", ")", ":", "return", "'HB3A_Exp{0}_SPICES'", ".", "format", "(", "exp_number", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/fourcircle_utility.py#L443-L450
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/setuptools/_distutils/command/sdist.py
python
sdist.make_distribution
(self)
Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'.
Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'.
[ "Create", "the", "source", "distribution", "(", "s", ")", ".", "First", "we", "create", "the", "release", "tree", "with", "make_release_tree", "()", ";", "then", "we", "create", "all", "required", "archive", "files", "(", "according", "to", "self", ".", "formats", ")", "from", "the", "release", "tree", ".", "Finally", "we", "clean", "up", "by", "blowing", "away", "the", "release", "tree", "(", "unless", "self", ".", "keep_temp", "is", "true", ")", ".", "The", "list", "of", "archive", "files", "created", "is", "stored", "so", "it", "can", "be", "retrieved", "later", "by", "get_archive_files", "()", "." ]
def make_distribution(self): """Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'. """ # Don't warn about missing meta-data here -- should be (and is!) # done elsewhere. base_dir = self.distribution.get_fullname() base_name = os.path.join(self.dist_dir, base_dir) self.make_release_tree(base_dir, self.filelist.files) archive_files = [] # remember names of files we create # tar archive must be created last to avoid overwrite and remove if 'tar' in self.formats: self.formats.append(self.formats.pop(self.formats.index('tar'))) for fmt in self.formats: file = self.make_archive(base_name, fmt, base_dir=base_dir, owner=self.owner, group=self.group) archive_files.append(file) self.distribution.dist_files.append(('sdist', '', file)) self.archive_files = archive_files if not self.keep_temp: dir_util.remove_tree(base_dir, dry_run=self.dry_run)
[ "def", "make_distribution", "(", "self", ")", ":", "# Don't warn about missing meta-data here -- should be (and is!)", "# done elsewhere.", "base_dir", "=", "self", ".", "distribution", ".", "get_fullname", "(", ")", "base_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dist_dir", ",", "base_dir", ")", "self", ".", "make_release_tree", "(", "base_dir", ",", "self", ".", "filelist", ".", "files", ")", "archive_files", "=", "[", "]", "# remember names of files we create", "# tar archive must be created last to avoid overwrite and remove", "if", "'tar'", "in", "self", ".", "formats", ":", "self", ".", "formats", ".", "append", "(", "self", ".", "formats", ".", "pop", "(", "self", ".", "formats", ".", "index", "(", "'tar'", ")", ")", ")", "for", "fmt", "in", "self", ".", "formats", ":", "file", "=", "self", ".", "make_archive", "(", "base_name", ",", "fmt", ",", "base_dir", "=", "base_dir", ",", "owner", "=", "self", ".", "owner", ",", "group", "=", "self", ".", "group", ")", "archive_files", ".", "append", "(", "file", ")", "self", ".", "distribution", ".", "dist_files", ".", "append", "(", "(", "'sdist'", ",", "''", ",", "file", ")", ")", "self", ".", "archive_files", "=", "archive_files", "if", "not", "self", ".", "keep_temp", ":", "dir_util", ".", "remove_tree", "(", "base_dir", ",", "dry_run", "=", "self", ".", "dry_run", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/setuptools/_distutils/command/sdist.py#L460-L488
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/dataview.py
python
DataViewTreeCtrl.GetChildCount
(*args, **kwargs)
return _dataview.DataViewTreeCtrl_GetChildCount(*args, **kwargs)
GetChildCount(self, DataViewItem parent) -> int
GetChildCount(self, DataViewItem parent) -> int
[ "GetChildCount", "(", "self", "DataViewItem", "parent", ")", "-", ">", "int" ]
def GetChildCount(*args, **kwargs): """GetChildCount(self, DataViewItem parent) -> int""" return _dataview.DataViewTreeCtrl_GetChildCount(*args, **kwargs)
[ "def", "GetChildCount", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_dataview", ".", "DataViewTreeCtrl_GetChildCount", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/dataview.py#L2528-L2530
Slicer/SlicerGitSVNArchive
65e92bb16c2b32ea47a1a66bee71f238891ee1ca
Base/Python/slicer/util.py
python
loadNodeFromFile
(filename, filetype, properties={}, returnNode=False)
return loadedNode
Load node into the scene from a file. :param filename: full path of the file to load. :param filetype: specifies the file type, which determines which IO class will load the file. :param properties: map containing additional parameters for the loading. :param returnNode: Deprecated. If set to true then the method returns status flag and node instead of signalling error by throwing an exception. :return: loaded node (if multiple nodes are loaded then a list of nodes). If returnNode is True then a status flag and loaded node are returned.
Load node into the scene from a file. :param filename: full path of the file to load. :param filetype: specifies the file type, which determines which IO class will load the file. :param properties: map containing additional parameters for the loading. :param returnNode: Deprecated. If set to true then the method returns status flag and node instead of signalling error by throwing an exception. :return: loaded node (if multiple nodes are loaded then a list of nodes). If returnNode is True then a status flag and loaded node are returned.
[ "Load", "node", "into", "the", "scene", "from", "a", "file", ".", ":", "param", "filename", ":", "full", "path", "of", "the", "file", "to", "load", ".", ":", "param", "filetype", ":", "specifies", "the", "file", "type", "which", "determines", "which", "IO", "class", "will", "load", "the", "file", ".", ":", "param", "properties", ":", "map", "containing", "additional", "parameters", "for", "the", "loading", ".", ":", "param", "returnNode", ":", "Deprecated", ".", "If", "set", "to", "true", "then", "the", "method", "returns", "status", "flag", "and", "node", "instead", "of", "signalling", "error", "by", "throwing", "an", "exception", ".", ":", "return", ":", "loaded", "node", "(", "if", "multiple", "nodes", "are", "loaded", "then", "a", "list", "of", "nodes", ")", ".", "If", "returnNode", "is", "True", "then", "a", "status", "flag", "and", "loaded", "node", "are", "returned", "." ]
def loadNodeFromFile(filename, filetype, properties={}, returnNode=False): """Load node into the scene from a file. :param filename: full path of the file to load. :param filetype: specifies the file type, which determines which IO class will load the file. :param properties: map containing additional parameters for the loading. :param returnNode: Deprecated. If set to true then the method returns status flag and node instead of signalling error by throwing an exception. :return: loaded node (if multiple nodes are loaded then a list of nodes). If returnNode is True then a status flag and loaded node are returned. """ from slicer import app from vtk import vtkCollection properties['fileName'] = filename loadedNodesCollection = vtkCollection() success = app.coreIOManager().loadNodes(filetype, properties, loadedNodesCollection) loadedNode = loadedNodesCollection.GetItemAsObject(0) if loadedNodesCollection.GetNumberOfItems() > 0 else None # Deprecated way of returning status and node if returnNode: import logging logging.warning("loadNodeFromFile `returnNode` argument is deprecated. Loaded node is now returned directly if `returnNode` is not specified.") return success, loadedNode if not success: errorMessage = "Failed to load node from file: " + str(filename) raise RuntimeError(errorMessage) return loadedNode
[ "def", "loadNodeFromFile", "(", "filename", ",", "filetype", ",", "properties", "=", "{", "}", ",", "returnNode", "=", "False", ")", ":", "from", "slicer", "import", "app", "from", "vtk", "import", "vtkCollection", "properties", "[", "'fileName'", "]", "=", "filename", "loadedNodesCollection", "=", "vtkCollection", "(", ")", "success", "=", "app", ".", "coreIOManager", "(", ")", ".", "loadNodes", "(", "filetype", ",", "properties", ",", "loadedNodesCollection", ")", "loadedNode", "=", "loadedNodesCollection", ".", "GetItemAsObject", "(", "0", ")", "if", "loadedNodesCollection", ".", "GetNumberOfItems", "(", ")", ">", "0", "else", "None", "# Deprecated way of returning status and node", "if", "returnNode", ":", "import", "logging", "logging", ".", "warning", "(", "\"loadNodeFromFile `returnNode` argument is deprecated. Loaded node is now returned directly if `returnNode` is not specified.\"", ")", "return", "success", ",", "loadedNode", "if", "not", "success", ":", "errorMessage", "=", "\"Failed to load node from file: \"", "+", "str", "(", "filename", ")", "raise", "RuntimeError", "(", "errorMessage", ")", "return", "loadedNode" ]
https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Base/Python/slicer/util.py#L367-L395
PixarAnimationStudios/USD
faed18ce62c8736b02413635b584a2f637156bad
pxr/usd/usdUtils/complianceChecker.py
python
BaseRuleChecker.CheckLayer
(self, layer)
Check the given SdfLayer.
Check the given SdfLayer.
[ "Check", "the", "given", "SdfLayer", "." ]
def CheckLayer(self, layer): """ Check the given SdfLayer. """ pass
[ "def", "CheckLayer", "(", "self", ",", "layer", ")", ":", "pass" ]
https://github.com/PixarAnimationStudios/USD/blob/faed18ce62c8736b02413635b584a2f637156bad/pxr/usd/usdUtils/complianceChecker.py#L115-L117
NVIDIA/TensorRT
42805f078052daad1a98bc5965974fcffaad0960
demo/BERT/helpers/tokenization.py
python
BasicTokenizer._run_strip_accents
(self, text)
return "".join(output)
Strips accents from a piece of text.
Strips accents from a piece of text.
[ "Strips", "accents", "from", "a", "piece", "of", "text", "." ]
def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output)
[ "def", "_run_strip_accents", "(", "self", ",", "text", ")", ":", "text", "=", "unicodedata", ".", "normalize", "(", "\"NFD\"", ",", "text", ")", "output", "=", "[", "]", "for", "char", "in", "text", ":", "cat", "=", "unicodedata", ".", "category", "(", "char", ")", "if", "cat", "==", "\"Mn\"", ":", "continue", "output", ".", "append", "(", "char", ")", "return", "\"\"", ".", "join", "(", "output", ")" ]
https://github.com/NVIDIA/TensorRT/blob/42805f078052daad1a98bc5965974fcffaad0960/demo/BERT/helpers/tokenization.py#L250-L259
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib2to3/pytree.py
python
Node._prefix_getter
(self)
return self.children[0].prefix
The whitespace and comments preceding this node in the input.
The whitespace and comments preceding this node in the input.
[ "The", "whitespace", "and", "comments", "preceding", "this", "node", "in", "the", "input", "." ]
def _prefix_getter(self): """ The whitespace and comments preceding this node in the input. """ if not self.children: return "" return self.children[0].prefix
[ "def", "_prefix_getter", "(", "self", ")", ":", "if", "not", "self", ".", "children", ":", "return", "\"\"", "return", "self", ".", "children", "[", "0", "]", ".", "prefix" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib2to3/pytree.py#L308-L314
GoSSIP-SJTU/TripleDoggy
03648d6b19c812504b14e8b98c8c7b3f443f4e54
examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
python
KScriptGenerator.updateFunctionCallMap
(self, caller, callee)
Maintains a map of functions that are called from other functions
Maintains a map of functions that are called from other functions
[ "Maintains", "a", "map", "of", "functions", "that", "are", "called", "from", "other", "functions" ]
def updateFunctionCallMap(self, caller, callee): """Maintains a map of functions that are called from other functions""" if not caller in self.calledFunctionTable: self.calledFunctionTable[caller] = [] if not callee in self.calledFunctionTable[caller]: self.calledFunctionTable[caller].append(callee) if not caller in self.comprehensiveCalledFunctionTable: self.comprehensiveCalledFunctionTable[caller] = [] self.comprehensiveCalledFunctionTable[caller].append(callee)
[ "def", "updateFunctionCallMap", "(", "self", ",", "caller", ",", "callee", ")", ":", "if", "not", "caller", "in", "self", ".", "calledFunctionTable", ":", "self", ".", "calledFunctionTable", "[", "caller", "]", "=", "[", "]", "if", "not", "callee", "in", "self", ".", "calledFunctionTable", "[", "caller", "]", ":", "self", ".", "calledFunctionTable", "[", "caller", "]", ".", "append", "(", "callee", ")", "if", "not", "caller", "in", "self", ".", "comprehensiveCalledFunctionTable", ":", "self", ".", "comprehensiveCalledFunctionTable", "[", "caller", "]", "=", "[", "]", "self", ".", "comprehensiveCalledFunctionTable", "[", "caller", "]", ".", "append", "(", "callee", ")" ]
https://github.com/GoSSIP-SJTU/TripleDoggy/blob/03648d6b19c812504b14e8b98c8c7b3f443f4e54/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py#L56-L64
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/supertooltip.py
python
SuperToolTip.OnWidgetEnter
(self, event)
Starts the :class:`SuperToolTip` timer for creation, handles the ``wx.EVT_ENTER_WINDOW`` event. :param `event`: a :class:`MouseEvent` event to be processed.
Starts the :class:`SuperToolTip` timer for creation, handles the ``wx.EVT_ENTER_WINDOW`` event.
[ "Starts", "the", ":", "class", ":", "SuperToolTip", "timer", "for", "creation", "handles", "the", "wx", ".", "EVT_ENTER_WINDOW", "event", "." ]
def OnWidgetEnter(self, event): """ Starts the :class:`SuperToolTip` timer for creation, handles the ``wx.EVT_ENTER_WINDOW`` event. :param `event`: a :class:`MouseEvent` event to be processed. """ if self._superToolTip: # Not yet created return if not self._runningApp.__superToolTip: # The running app doesn't want tooltips... return if not self._widget.GetTopLevelParent().IsActive(): self._startTimer.Stop() return if self._startTimer.IsRunning(): # We are already running event.Skip() return self._startTimer.Start(self._startDelayTime*1000) event.Skip()
[ "def", "OnWidgetEnter", "(", "self", ",", "event", ")", ":", "if", "self", ".", "_superToolTip", ":", "# Not yet created", "return", "if", "not", "self", ".", "_runningApp", ".", "__superToolTip", ":", "# The running app doesn't want tooltips...", "return", "if", "not", "self", ".", "_widget", ".", "GetTopLevelParent", "(", ")", ".", "IsActive", "(", ")", ":", "self", ".", "_startTimer", ".", "Stop", "(", ")", "return", "if", "self", ".", "_startTimer", ".", "IsRunning", "(", ")", ":", "# We are already running", "event", ".", "Skip", "(", ")", "return", "self", ".", "_startTimer", ".", "Start", "(", "self", ".", "_startDelayTime", "*", "1000", ")", "event", ".", "Skip", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/supertooltip.py#L840-L864
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/transpose_benchmark.py
python
TransposeBenchmark._run_graph
(self, device, input_shape, perm, num_iters, datatype)
return duration
runs the graph and print its execution time. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. num_iters: Number of iterations to run the benchmark. datatype: numpy data type of the input tensor. Returns: The duration of the run in seconds.
runs the graph and print its execution time.
[ "runs", "the", "graph", "and", "print", "its", "execution", "time", "." ]
def _run_graph(self, device, input_shape, perm, num_iters, datatype): """runs the graph and print its execution time. Args: device: String, the device to run on. input_shape: Shape of the input tensor. perm: A list of ints with the same length as input tensor's dimension. num_iters: Number of iterations to run the benchmark. datatype: numpy data type of the input tensor. Returns: The duration of the run in seconds. """ graph = ops.Graph() with graph.as_default(): outputs = build_graph(device, input_shape, perm, datatype, num_iters) with session_lib.Session(graph=graph) as session: variables.global_variables_initializer().run() # warmup runs session.run(outputs) start_time = time.time() session.run(outputs) duration = (time.time() - start_time) / num_iters throughput = np.prod( np.array(input_shape)) * datatype().itemsize * 2 / duration / 1e9 print("%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s." % (device, str(datatype), str(input_shape).replace(" ", ""), str(perm).replace(" ", ""), num_iters, duration, throughput)) name_template = ( "transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}") self.report_benchmark( name=name_template.format( device=device, dtype=str(datatype).replace(" ", ""), inputshape=str(input_shape).replace(" ", ""), perm=str(perm).replace(" ", "")).replace(" ", ""), iters=num_iters, wall_time=duration) return duration
[ "def", "_run_graph", "(", "self", ",", "device", ",", "input_shape", ",", "perm", ",", "num_iters", ",", "datatype", ")", ":", "graph", "=", "ops", ".", "Graph", "(", ")", "with", "graph", ".", "as_default", "(", ")", ":", "outputs", "=", "build_graph", "(", "device", ",", "input_shape", ",", "perm", ",", "datatype", ",", "num_iters", ")", "with", "session_lib", ".", "Session", "(", "graph", "=", "graph", ")", "as", "session", ":", "variables", ".", "global_variables_initializer", "(", ")", ".", "run", "(", ")", "# warmup runs", "session", ".", "run", "(", "outputs", ")", "start_time", "=", "time", ".", "time", "(", ")", "session", ".", "run", "(", "outputs", ")", "duration", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "/", "num_iters", "throughput", "=", "np", ".", "prod", "(", "np", ".", "array", "(", "input_shape", ")", ")", "*", "datatype", "(", ")", ".", "itemsize", "*", "2", "/", "duration", "/", "1e9", "print", "(", "\"%s %s inputshape:%s perm:%s %d %.6fsec, %.4fGB/s.\"", "%", "(", "device", ",", "str", "(", "datatype", ")", ",", "str", "(", "input_shape", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "str", "(", "perm", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "num_iters", ",", "duration", ",", "throughput", ")", ")", "name_template", "=", "(", "\"transpose_{device}_{dtype}_input_shape_{inputshape}_perm_{perm}\"", ")", "self", ".", "report_benchmark", "(", "name", "=", "name_template", ".", "format", "(", "device", "=", "device", ",", "dtype", "=", "str", "(", "datatype", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "inputshape", "=", "str", "(", "input_shape", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "perm", "=", "str", "(", "perm", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ",", "iters", "=", "num_iters", ",", "wall_time", "=", "duration", ")", "return", "duration" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/transpose_benchmark.py#L65-L108
Constellation/iv
64c3a9c7c517063f29d90d449180ea8f6f4d946f
tools/cpplint.py
python
FileInfo.IsSource
(self)
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
File has a source file extension.
File has a source file extension.
[ "File", "has", "a", "source", "file", "extension", "." ]
def IsSource(self): """File has a source file extension.""" return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
[ "def", "IsSource", "(", "self", ")", ":", "return", "self", ".", "Extension", "(", ")", "[", "1", ":", "]", "in", "(", "'c'", ",", "'cc'", ",", "'cpp'", ",", "'cxx'", ")" ]
https://github.com/Constellation/iv/blob/64c3a9c7c517063f29d90d449180ea8f6f4d946f/tools/cpplint.py#L944-L946
apache/incubator-mxnet
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py
python
convert_contrib_box_nms
(node, **kwargs)
return nodes
Map MXNet's _contrib_box_nms operator to ONNX
Map MXNet's _contrib_box_nms operator to ONNX
[ "Map", "MXNet", "s", "_contrib_box_nms", "operator", "to", "ONNX" ]
def convert_contrib_box_nms(node, **kwargs): """Map MXNet's _contrib_box_nms operator to ONNX """ from onnx.helper import make_node name, input_nodes, attrs = get_inputs(node, kwargs) input_dtypes = get_input_dtypes(node, kwargs) dtype = input_dtypes[0] #dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype] opset_version = kwargs['opset_version'] if opset_version < 11: raise AttributeError('ONNX opset 11 or greater is required to export this operator') overlap_thresh = float(attrs.get('overlap_thresh', '0.5')) valid_thresh = float(attrs.get('valid_thresh', '0')) topk = int(attrs.get('topk', '-1')) coord_start = int(attrs.get('coord_start', '2')) score_index = int(attrs.get('score_index', '1')) id_index = int(attrs.get('id_index', '-1')) force_suppress = attrs.get('force_suppress', 'True') background_id = int(attrs.get('background_id', '-1')) in_format = attrs.get('in_format', 'corner') out_format = attrs.get('out_format', 'corner') center_point_box = 0 if in_format == 'corner' else 1 if topk == -1: topk = 2**31-1 if in_format != out_format: raise NotImplementedError('box_nms does not currently support in_fomat != out_format') if background_id != -1: raise NotImplementedError('box_nms does not currently support background_id != -1') if id_index != -1 or force_suppress == 'False': logging.warning('box_nms: id_idex != -1 or/and force_suppress == False detected. ' 'However, due to ONNX limitations, boxes of different categories will NOT ' 'be exempted from suppression. This might lead to different behavior than ' 'native MXNet') create_tensor([coord_start], name+'_cs', kwargs['initializer']) create_tensor([coord_start+4], name+'_cs_p4', kwargs['initializer']) create_tensor([score_index], name+'_si', kwargs['initializer']) create_tensor([score_index+1], name+'_si_p1', kwargs['initializer']) create_tensor([topk], name+'_topk', kwargs['initializer']) create_tensor([overlap_thresh], name+'_ot', kwargs['initializer'], dtype=np.float32) create_tensor([valid_thresh], name+'_vt', kwargs['initializer'], dtype=np.float32) create_tensor([-1], name+'_m1', kwargs['initializer']) create_tensor([-1], name+'_m1_f', kwargs['initializer'], dtype=dtype) create_tensor([0], name+'_0', kwargs['initializer']) create_tensor([1], name+'_1', kwargs['initializer']) create_tensor([2], name+'_2', kwargs['initializer']) create_tensor([3], name+'_3', kwargs['initializer']) create_tensor([0, 1, -1], name+'_scores_shape', kwargs['initializer']) create_tensor([0, 0, 1, 0], name+'_pad', kwargs['initializer']) create_tensor([0, -1], name+'_bat_spat_helper', kwargs['initializer']) create_const_scalar_node(name+"_0_s", np.int64(0), kwargs) create_const_scalar_node(name+"_1_s", np.int64(1), kwargs) nodes = [ make_node('Shape', [input_nodes[0]], [name+'_shape']), make_node('Shape', [name+'_shape'], [name+'_dim']), make_node('Sub', [name+'_dim', name+'_2'], [name+'_dim_m2']), make_node('Slice', [name+'_shape', name+'_dim_m2', name+'_dim'], [name+'_shape_last2']), make_node('Concat', [name+'_m1', name+'_shape_last2'], [name+'_shape_3d'], axis=0), make_node('Reshape', [input_nodes[0], name+'_shape_3d'], [name+'_data_3d']), make_node('Slice', [name+'_data_3d', name+'_cs', name+'_cs_p4', name+'_m1'], [name+'_boxes']), make_node('Slice', [name+'_data_3d', name+'_si', name+'_si_p1', name+'_m1'], [name+'_scores_raw']), make_node('Reshape', [name+'_scores_raw', name+'_scores_shape'], [name+'_scores']), make_node('Shape', [name+'_scores'], [name+'_scores_shape_actual']), make_node('NonMaxSuppression', [name+'_boxes', name+'_scores', name+'_topk', name+'_ot', name+'_vt'], [name+'_nms'], center_point_box=center_point_box), make_node('Slice', [name+'_nms', name+'_0', name+'_3', name+'_m1', name+'_2'], [name+'_nms_sliced']), make_node('GatherND', [name+'_data_3d', name+'_nms_sliced'], [name+'_candidates']), make_node('Pad', [name+'_candidates', name+'_pad', name+'_m1_f'], [name+'_cand_padded']), make_node('Shape', [name+'_nms'], [name+'_nms_shape']), make_node('Slice', [name+'_nms_shape', name+'_0', name+'_1'], [name+'_cand_cnt']), make_node('Squeeze', [name+'_cand_cnt'], [name+'_cc_s'], axes=[0]), make_node('Range', [name+'_0_s', name+'_cc_s', name+'_1_s'], [name+'_cand_indices']), make_node('Slice', [name+'_scores_shape_actual', name+'_0', name+'_3', name+'_m1', name+'_2'], [name+'_shape_bat_spat']), make_node('Slice', [name+'_shape_bat_spat', name+'_1', name+'_2'], [name+'_spat_dim']), make_node('Expand', [name+'_cand_cnt', name+'_shape_bat_spat'], [name+'_base_indices']), make_node('ScatterND', [name+'_base_indices', name+'_nms_sliced', name+'_cand_indices'], [name+'_indices']), make_node('TopK', [name+'_indices', name+'_spat_dim'], [name+'_indices_sorted', name+'__'], largest=0, axis=-1, sorted=1), make_node('Gather', [name+'_cand_padded', name+'_indices_sorted'], [name+'_gather']), make_node('Reshape', [name+'_gather', name+'_shape'], [name+'0']) ] return nodes
[ "def", "convert_contrib_box_nms", "(", "node", ",", "*", "*", "kwargs", ")", ":", "from", "onnx", ".", "helper", "import", "make_node", "name", ",", "input_nodes", ",", "attrs", "=", "get_inputs", "(", "node", ",", "kwargs", ")", "input_dtypes", "=", "get_input_dtypes", "(", "node", ",", "kwargs", ")", "dtype", "=", "input_dtypes", "[", "0", "]", "#dtype_t = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]", "opset_version", "=", "kwargs", "[", "'opset_version'", "]", "if", "opset_version", "<", "11", ":", "raise", "AttributeError", "(", "'ONNX opset 11 or greater is required to export this operator'", ")", "overlap_thresh", "=", "float", "(", "attrs", ".", "get", "(", "'overlap_thresh'", ",", "'0.5'", ")", ")", "valid_thresh", "=", "float", "(", "attrs", ".", "get", "(", "'valid_thresh'", ",", "'0'", ")", ")", "topk", "=", "int", "(", "attrs", ".", "get", "(", "'topk'", ",", "'-1'", ")", ")", "coord_start", "=", "int", "(", "attrs", ".", "get", "(", "'coord_start'", ",", "'2'", ")", ")", "score_index", "=", "int", "(", "attrs", ".", "get", "(", "'score_index'", ",", "'1'", ")", ")", "id_index", "=", "int", "(", "attrs", ".", "get", "(", "'id_index'", ",", "'-1'", ")", ")", "force_suppress", "=", "attrs", ".", "get", "(", "'force_suppress'", ",", "'True'", ")", "background_id", "=", "int", "(", "attrs", ".", "get", "(", "'background_id'", ",", "'-1'", ")", ")", "in_format", "=", "attrs", ".", "get", "(", "'in_format'", ",", "'corner'", ")", "out_format", "=", "attrs", ".", "get", "(", "'out_format'", ",", "'corner'", ")", "center_point_box", "=", "0", "if", "in_format", "==", "'corner'", "else", "1", "if", "topk", "==", "-", "1", ":", "topk", "=", "2", "**", "31", "-", "1", "if", "in_format", "!=", "out_format", ":", "raise", "NotImplementedError", "(", "'box_nms does not currently support in_fomat != out_format'", ")", "if", "background_id", "!=", "-", "1", ":", "raise", "NotImplementedError", "(", "'box_nms does not currently support background_id != -1'", ")", "if", "id_index", "!=", "-", "1", "or", "force_suppress", "==", "'False'", ":", "logging", ".", "warning", "(", "'box_nms: id_idex != -1 or/and force_suppress == False detected. '", "'However, due to ONNX limitations, boxes of different categories will NOT '", "'be exempted from suppression. This might lead to different behavior than '", "'native MXNet'", ")", "create_tensor", "(", "[", "coord_start", "]", ",", "name", "+", "'_cs'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "coord_start", "+", "4", "]", ",", "name", "+", "'_cs_p4'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "score_index", "]", ",", "name", "+", "'_si'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "score_index", "+", "1", "]", ",", "name", "+", "'_si_p1'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "topk", "]", ",", "name", "+", "'_topk'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "overlap_thresh", "]", ",", "name", "+", "'_ot'", ",", "kwargs", "[", "'initializer'", "]", ",", "dtype", "=", "np", ".", "float32", ")", "create_tensor", "(", "[", "valid_thresh", "]", ",", "name", "+", "'_vt'", ",", "kwargs", "[", "'initializer'", "]", ",", "dtype", "=", "np", ".", "float32", ")", "create_tensor", "(", "[", "-", "1", "]", ",", "name", "+", "'_m1'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "-", "1", "]", ",", "name", "+", "'_m1_f'", ",", "kwargs", "[", "'initializer'", "]", ",", "dtype", "=", "dtype", ")", "create_tensor", "(", "[", "0", "]", ",", "name", "+", "'_0'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "1", "]", ",", "name", "+", "'_1'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "2", "]", ",", "name", "+", "'_2'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "3", "]", ",", "name", "+", "'_3'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "0", ",", "1", ",", "-", "1", "]", ",", "name", "+", "'_scores_shape'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "0", ",", "0", ",", "1", ",", "0", "]", ",", "name", "+", "'_pad'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_tensor", "(", "[", "0", ",", "-", "1", "]", ",", "name", "+", "'_bat_spat_helper'", ",", "kwargs", "[", "'initializer'", "]", ")", "create_const_scalar_node", "(", "name", "+", "\"_0_s\"", ",", "np", ".", "int64", "(", "0", ")", ",", "kwargs", ")", "create_const_scalar_node", "(", "name", "+", "\"_1_s\"", ",", "np", ".", "int64", "(", "1", ")", ",", "kwargs", ")", "nodes", "=", "[", "make_node", "(", "'Shape'", ",", "[", "input_nodes", "[", "0", "]", "]", ",", "[", "name", "+", "'_shape'", "]", ")", ",", "make_node", "(", "'Shape'", ",", "[", "name", "+", "'_shape'", "]", ",", "[", "name", "+", "'_dim'", "]", ")", ",", "make_node", "(", "'Sub'", ",", "[", "name", "+", "'_dim'", ",", "name", "+", "'_2'", "]", ",", "[", "name", "+", "'_dim_m2'", "]", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_shape'", ",", "name", "+", "'_dim_m2'", ",", "name", "+", "'_dim'", "]", ",", "[", "name", "+", "'_shape_last2'", "]", ")", ",", "make_node", "(", "'Concat'", ",", "[", "name", "+", "'_m1'", ",", "name", "+", "'_shape_last2'", "]", ",", "[", "name", "+", "'_shape_3d'", "]", ",", "axis", "=", "0", ")", ",", "make_node", "(", "'Reshape'", ",", "[", "input_nodes", "[", "0", "]", ",", "name", "+", "'_shape_3d'", "]", ",", "[", "name", "+", "'_data_3d'", "]", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_data_3d'", ",", "name", "+", "'_cs'", ",", "name", "+", "'_cs_p4'", ",", "name", "+", "'_m1'", "]", ",", "[", "name", "+", "'_boxes'", "]", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_data_3d'", ",", "name", "+", "'_si'", ",", "name", "+", "'_si_p1'", ",", "name", "+", "'_m1'", "]", ",", "[", "name", "+", "'_scores_raw'", "]", ")", ",", "make_node", "(", "'Reshape'", ",", "[", "name", "+", "'_scores_raw'", ",", "name", "+", "'_scores_shape'", "]", ",", "[", "name", "+", "'_scores'", "]", ")", ",", "make_node", "(", "'Shape'", ",", "[", "name", "+", "'_scores'", "]", ",", "[", "name", "+", "'_scores_shape_actual'", "]", ")", ",", "make_node", "(", "'NonMaxSuppression'", ",", "[", "name", "+", "'_boxes'", ",", "name", "+", "'_scores'", ",", "name", "+", "'_topk'", ",", "name", "+", "'_ot'", ",", "name", "+", "'_vt'", "]", ",", "[", "name", "+", "'_nms'", "]", ",", "center_point_box", "=", "center_point_box", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_nms'", ",", "name", "+", "'_0'", ",", "name", "+", "'_3'", ",", "name", "+", "'_m1'", ",", "name", "+", "'_2'", "]", ",", "[", "name", "+", "'_nms_sliced'", "]", ")", ",", "make_node", "(", "'GatherND'", ",", "[", "name", "+", "'_data_3d'", ",", "name", "+", "'_nms_sliced'", "]", ",", "[", "name", "+", "'_candidates'", "]", ")", ",", "make_node", "(", "'Pad'", ",", "[", "name", "+", "'_candidates'", ",", "name", "+", "'_pad'", ",", "name", "+", "'_m1_f'", "]", ",", "[", "name", "+", "'_cand_padded'", "]", ")", ",", "make_node", "(", "'Shape'", ",", "[", "name", "+", "'_nms'", "]", ",", "[", "name", "+", "'_nms_shape'", "]", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_nms_shape'", ",", "name", "+", "'_0'", ",", "name", "+", "'_1'", "]", ",", "[", "name", "+", "'_cand_cnt'", "]", ")", ",", "make_node", "(", "'Squeeze'", ",", "[", "name", "+", "'_cand_cnt'", "]", ",", "[", "name", "+", "'_cc_s'", "]", ",", "axes", "=", "[", "0", "]", ")", ",", "make_node", "(", "'Range'", ",", "[", "name", "+", "'_0_s'", ",", "name", "+", "'_cc_s'", ",", "name", "+", "'_1_s'", "]", ",", "[", "name", "+", "'_cand_indices'", "]", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_scores_shape_actual'", ",", "name", "+", "'_0'", ",", "name", "+", "'_3'", ",", "name", "+", "'_m1'", ",", "name", "+", "'_2'", "]", ",", "[", "name", "+", "'_shape_bat_spat'", "]", ")", ",", "make_node", "(", "'Slice'", ",", "[", "name", "+", "'_shape_bat_spat'", ",", "name", "+", "'_1'", ",", "name", "+", "'_2'", "]", ",", "[", "name", "+", "'_spat_dim'", "]", ")", ",", "make_node", "(", "'Expand'", ",", "[", "name", "+", "'_cand_cnt'", ",", "name", "+", "'_shape_bat_spat'", "]", ",", "[", "name", "+", "'_base_indices'", "]", ")", ",", "make_node", "(", "'ScatterND'", ",", "[", "name", "+", "'_base_indices'", ",", "name", "+", "'_nms_sliced'", ",", "name", "+", "'_cand_indices'", "]", ",", "[", "name", "+", "'_indices'", "]", ")", ",", "make_node", "(", "'TopK'", ",", "[", "name", "+", "'_indices'", ",", "name", "+", "'_spat_dim'", "]", ",", "[", "name", "+", "'_indices_sorted'", ",", "name", "+", "'__'", "]", ",", "largest", "=", "0", ",", "axis", "=", "-", "1", ",", "sorted", "=", "1", ")", ",", "make_node", "(", "'Gather'", ",", "[", "name", "+", "'_cand_padded'", ",", "name", "+", "'_indices_sorted'", "]", ",", "[", "name", "+", "'_gather'", "]", ")", ",", "make_node", "(", "'Reshape'", ",", "[", "name", "+", "'_gather'", ",", "name", "+", "'_shape'", "]", ",", "[", "name", "+", "'0'", "]", ")", "]", "return", "nodes" ]
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/onnx/mx2onnx/_op_translations/_op_translations_opset12.py#L3596-L3693
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/dateutil/parser/isoparser.py
python
isoparser.parse_isodate
(self, datestr)
return date(*components)
Parse the date portion of an ISO string. :param datestr: The string portion of an ISO string, without a separator :return: Returns a :class:`datetime.date` object
Parse the date portion of an ISO string.
[ "Parse", "the", "date", "portion", "of", "an", "ISO", "string", "." ]
def parse_isodate(self, datestr): """ Parse the date portion of an ISO string. :param datestr: The string portion of an ISO string, without a separator :return: Returns a :class:`datetime.date` object """ components, pos = self._parse_isodate(datestr) if pos < len(datestr): raise ValueError('String contains unknown ISO ' + 'components: {}'.format(datestr)) return date(*components)
[ "def", "parse_isodate", "(", "self", ",", "datestr", ")", ":", "components", ",", "pos", "=", "self", ".", "_parse_isodate", "(", "datestr", ")", "if", "pos", "<", "len", "(", "datestr", ")", ":", "raise", "ValueError", "(", "'String contains unknown ISO '", "+", "'components: {}'", ".", "format", "(", "datestr", ")", ")", "return", "date", "(", "*", "components", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/dateutil/parser/isoparser.py#L149-L163
lmb-freiburg/flownet2
b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc
python/caffe/io.py
python
blobprotovector_str_to_arraylist
(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
Converts a serialized blobprotovec to a list of arrays.
Converts a serialized blobprotovec to a list of arrays.
[ "Converts", "a", "serialized", "blobprotovec", "to", "a", "list", "of", "arrays", "." ]
def blobprotovector_str_to_arraylist(str): """Converts a serialized blobprotovec to a list of arrays. """ vec = caffe_pb2.BlobProtoVector() vec.ParseFromString(str) return [blobproto_to_array(blob) for blob in vec.blobs]
[ "def", "blobprotovector_str_to_arraylist", "(", "str", ")", ":", "vec", "=", "caffe_pb2", ".", "BlobProtoVector", "(", ")", "vec", ".", "ParseFromString", "(", "str", ")", "return", "[", "blobproto_to_array", "(", "blob", ")", "for", "blob", "in", "vec", ".", "blobs", "]" ]
https://github.com/lmb-freiburg/flownet2/blob/b92e198b56b0e52e1ba0a5a98dc0e39fa5ae70cc/python/caffe/io.py#L58-L63
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
ppapi/generators/idl_gen_pnacl.py
python
PnaclGen.TypeNeedsWrapping
(self, type_node, array_dims)
return is_aggregate and not is_reference
Return true if a parameter type needs wrapping. Currently, this is true for byval aggregates.
Return true if a parameter type needs wrapping. Currently, this is true for byval aggregates.
[ "Return", "true", "if", "a", "parameter", "type", "needs", "wrapping", ".", "Currently", "this", "is", "true", "for", "byval", "aggregates", "." ]
def TypeNeedsWrapping(self, type_node, array_dims): """Return true if a parameter type needs wrapping. Currently, this is true for byval aggregates. """ is_aggregate = type_node.startswith('struct') or \ type_node.startswith('union') is_reference = (type_node.find('*') != -1 or array_dims != []) return is_aggregate and not is_reference
[ "def", "TypeNeedsWrapping", "(", "self", ",", "type_node", ",", "array_dims", ")", ":", "is_aggregate", "=", "type_node", ".", "startswith", "(", "'struct'", ")", "or", "type_node", ".", "startswith", "(", "'union'", ")", "is_reference", "=", "(", "type_node", ".", "find", "(", "'*'", ")", "!=", "-", "1", "or", "array_dims", "!=", "[", "]", ")", "return", "is_aggregate", "and", "not", "is_reference" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/ppapi/generators/idl_gen_pnacl.py#L103-L110
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/closure_linter/closure_linter/ecmametadatapass.py
python
EcmaMetaDataPass._GetOperatorType
(self, token)
return EcmaMetaData.BINARY_OPERATOR
Returns the operator type of the given operator token. Args: token: The token to get arity for. Returns: The type of the operator. One of the *_OPERATOR constants defined in EcmaMetaData.
Returns the operator type of the given operator token.
[ "Returns", "the", "operator", "type", "of", "the", "given", "operator", "token", "." ]
def _GetOperatorType(self, token): """Returns the operator type of the given operator token. Args: token: The token to get arity for. Returns: The type of the operator. One of the *_OPERATOR constants defined in EcmaMetaData. """ if token.string == '?': return EcmaMetaData.TERNARY_OPERATOR if token.string in TokenType.UNARY_OPERATORS: return EcmaMetaData.UNARY_OPERATOR last_code = token.metadata.last_code if not last_code or last_code.type == TokenType.END_BLOCK: return EcmaMetaData.UNARY_OPERATOR if (token.string in TokenType.UNARY_POST_OPERATORS and last_code.type in TokenType.EXPRESSION_ENDER_TYPES): return EcmaMetaData.UNARY_POST_OPERATOR if (token.string in TokenType.UNARY_OK_OPERATORS and last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and last_code.string not in TokenType.UNARY_POST_OPERATORS): return EcmaMetaData.UNARY_OPERATOR return EcmaMetaData.BINARY_OPERATOR
[ "def", "_GetOperatorType", "(", "self", ",", "token", ")", ":", "if", "token", ".", "string", "==", "'?'", ":", "return", "EcmaMetaData", ".", "TERNARY_OPERATOR", "if", "token", ".", "string", "in", "TokenType", ".", "UNARY_OPERATORS", ":", "return", "EcmaMetaData", ".", "UNARY_OPERATOR", "last_code", "=", "token", ".", "metadata", ".", "last_code", "if", "not", "last_code", "or", "last_code", ".", "type", "==", "TokenType", ".", "END_BLOCK", ":", "return", "EcmaMetaData", ".", "UNARY_OPERATOR", "if", "(", "token", ".", "string", "in", "TokenType", ".", "UNARY_POST_OPERATORS", "and", "last_code", ".", "type", "in", "TokenType", ".", "EXPRESSION_ENDER_TYPES", ")", ":", "return", "EcmaMetaData", ".", "UNARY_POST_OPERATOR", "if", "(", "token", ".", "string", "in", "TokenType", ".", "UNARY_OK_OPERATORS", "and", "last_code", ".", "type", "not", "in", "TokenType", ".", "EXPRESSION_ENDER_TYPES", "and", "last_code", ".", "string", "not", "in", "TokenType", ".", "UNARY_POST_OPERATORS", ")", ":", "return", "EcmaMetaData", ".", "UNARY_OPERATOR", "return", "EcmaMetaData", ".", "BINARY_OPERATOR" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/closure_linter/closure_linter/ecmametadatapass.py#L545-L574
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/decimal.py
python
Context.__repr__
(self)
return ', '.join(s) + ')'
Show the current context.
Show the current context.
[ "Show", "the", "current", "context", "." ]
def __repr__(self): """Show the current context.""" s = [] s.append('Context(prec=%(prec)d, rounding=%(rounding)s, ' 'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d' % vars(self)) names = [f.__name__ for f, v in self.flags.items() if v] s.append('flags=[' + ', '.join(names) + ']') names = [t.__name__ for t, v in self.traps.items() if v] s.append('traps=[' + ', '.join(names) + ']') return ', '.join(s) + ')'
[ "def", "__repr__", "(", "self", ")", ":", "s", "=", "[", "]", "s", ".", "append", "(", "'Context(prec=%(prec)d, rounding=%(rounding)s, '", "'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d'", "%", "vars", "(", "self", ")", ")", "names", "=", "[", "f", ".", "__name__", "for", "f", ",", "v", "in", "self", ".", "flags", ".", "items", "(", ")", "if", "v", "]", "s", ".", "append", "(", "'flags=['", "+", "', '", ".", "join", "(", "names", ")", "+", "']'", ")", "names", "=", "[", "t", ".", "__name__", "for", "t", ",", "v", "in", "self", ".", "traps", ".", "items", "(", ")", "if", "v", "]", "s", ".", "append", "(", "'traps=['", "+", "', '", ".", "join", "(", "names", ")", "+", "']'", ")", "return", "', '", ".", "join", "(", "s", ")", "+", "')'" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/decimal.py#L3820-L3830
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/tpu/feature_column.py
python
split_sequence_columns
(feature_columns)
return sequence_columns, non_sequence_columns
Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. For use in a TPUEstimator model_fn function. E.g. def model_fn(features): sequence_columns, feature_columns = ( tf.tpu.feature_column.split_sequence_columns(feature_columns)) input = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) sequence_features, sequence_lengths = ( tf.contrib.feature_column.sequence_input_layer( features=features, feature_columns=sequence_columns)) Args: feature_columns: A list of _TPUEmbeddingColumns to split. Returns: Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the second is the non-sequence columns.
Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns.
[ "Split", "a", "list", "of", "_TPUEmbeddingColumn", "into", "sequence", "and", "non", "-", "sequence", "columns", "." ]
def split_sequence_columns(feature_columns): """Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. For use in a TPUEstimator model_fn function. E.g. def model_fn(features): sequence_columns, feature_columns = ( tf.tpu.feature_column.split_sequence_columns(feature_columns)) input = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) sequence_features, sequence_lengths = ( tf.contrib.feature_column.sequence_input_layer( features=features, feature_columns=sequence_columns)) Args: feature_columns: A list of _TPUEmbeddingColumns to split. Returns: Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the second is the non-sequence columns. """ sequence_columns = [] non_sequence_columns = [] for column in feature_columns: if not isinstance(column, (_TPUEmbeddingColumn, _TPUSharedEmbeddingColumn)): raise TypeError( 'column must be a _TPUEmbeddingColumn or _TPUSharedEmbeddingColumn ' 'but got %s instead.' % (type(column))) if column.is_sequence_column(): sequence_columns.append(column) else: non_sequence_columns.append(column) return sequence_columns, non_sequence_columns
[ "def", "split_sequence_columns", "(", "feature_columns", ")", ":", "sequence_columns", "=", "[", "]", "non_sequence_columns", "=", "[", "]", "for", "column", "in", "feature_columns", ":", "if", "not", "isinstance", "(", "column", ",", "(", "_TPUEmbeddingColumn", ",", "_TPUSharedEmbeddingColumn", ")", ")", ":", "raise", "TypeError", "(", "'column must be a _TPUEmbeddingColumn or _TPUSharedEmbeddingColumn '", "'but got %s instead.'", "%", "(", "type", "(", "column", ")", ")", ")", "if", "column", ".", "is_sequence_column", "(", ")", ":", "sequence_columns", ".", "append", "(", "column", ")", "else", ":", "non_sequence_columns", ".", "append", "(", "column", ")", "return", "sequence_columns", ",", "non_sequence_columns" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/tpu/feature_column.py#L687-L719
tfwu/FaceDetection-ConvNet-3D
f9251c48eb40c5aec8fba7455115c355466555be
python/build/lib.linux-x86_64-2.7/mxnet/context.py
python
cpu
(device_id=0)
return Context('cpu', device_id)
Return a CPU context. This function is a short cut for Context('cpu', device_id) Parameters ---------- device_id : int, optional The device id of the device. device_id is not needed for CPU. This is included to make interface compatible with GPU. Returns ------- context : Context The corresponding CPU context.
Return a CPU context.
[ "Return", "a", "CPU", "context", "." ]
def cpu(device_id=0): """Return a CPU context. This function is a short cut for Context('cpu', device_id) Parameters ---------- device_id : int, optional The device id of the device. device_id is not needed for CPU. This is included to make interface compatible with GPU. Returns ------- context : Context The corresponding CPU context. """ return Context('cpu', device_id)
[ "def", "cpu", "(", "device_id", "=", "0", ")", ":", "return", "Context", "(", "'cpu'", ",", "device_id", ")" ]
https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/build/lib.linux-x86_64-2.7/mxnet/context.py#L71-L87
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.py
python
GatherEncoder.__init__
(self, tensorspec, commuting_structure, state_update_aggregation_modes, initial_state_fn, get_params_fn, encode_fn, decode_before_sum_fn, decode_after_sum_fn, update_state_fn)
Creates a `GatherEncoder` for encoding `tensorspec`-like values. This class should not be instantiated directly. Instead, use the provided `@classmethod`. Args: tensorspec: A `tf.TensorSpec`. The created `GatherEncoder` will be constrained to only encode input values compatible with `tensorspec`. commuting_structure: The commuting structure of the `GatherEncoder`. state_update_aggregation_modes: The `StageAggregationMode` values to be used to aggregate `state_update_tensors` initial_state_fn: A `tf.function`. get_params_fn: A `tf.function`. encode_fn: A `tf.function`. decode_before_sum_fn: A `tf.function`. decode_after_sum_fn: A `tf.function`. update_state_fn: A `tf.function`. Returns: A `GatherEncoder`.
Creates a `GatherEncoder` for encoding `tensorspec`-like values.
[ "Creates", "a", "GatherEncoder", "for", "encoding", "tensorspec", "-", "like", "values", "." ]
def __init__(self, tensorspec, commuting_structure, state_update_aggregation_modes, initial_state_fn, get_params_fn, encode_fn, decode_before_sum_fn, decode_after_sum_fn, update_state_fn): """Creates a `GatherEncoder` for encoding `tensorspec`-like values. This class should not be instantiated directly. Instead, use the provided `@classmethod`. Args: tensorspec: A `tf.TensorSpec`. The created `GatherEncoder` will be constrained to only encode input values compatible with `tensorspec`. commuting_structure: The commuting structure of the `GatherEncoder`. state_update_aggregation_modes: The `StageAggregationMode` values to be used to aggregate `state_update_tensors` initial_state_fn: A `tf.function`. get_params_fn: A `tf.function`. encode_fn: A `tf.function`. decode_before_sum_fn: A `tf.function`. decode_after_sum_fn: A `tf.function`. update_state_fn: A `tf.function`. Returns: A `GatherEncoder`. """ self._tensorspec = tensorspec self._commuting_structure = commuting_structure self._state_update_aggregation_modes = state_update_aggregation_modes self._initial_state_fn = initial_state_fn self._get_params_fn = get_params_fn self._encode_fn = encode_fn self._decode_before_sum_fn = decode_before_sum_fn self._decode_after_sum_fn = decode_after_sum_fn self._update_state_fn = update_state_fn
[ "def", "__init__", "(", "self", ",", "tensorspec", ",", "commuting_structure", ",", "state_update_aggregation_modes", ",", "initial_state_fn", ",", "get_params_fn", ",", "encode_fn", ",", "decode_before_sum_fn", ",", "decode_after_sum_fn", ",", "update_state_fn", ")", ":", "self", ".", "_tensorspec", "=", "tensorspec", "self", ".", "_commuting_structure", "=", "commuting_structure", "self", ".", "_state_update_aggregation_modes", "=", "state_update_aggregation_modes", "self", ".", "_initial_state_fn", "=", "initial_state_fn", "self", ".", "_get_params_fn", "=", "get_params_fn", "self", ".", "_encode_fn", "=", "encode_fn", "self", ".", "_decode_before_sum_fn", "=", "decode_before_sum_fn", "self", ".", "_decode_after_sum_fn", "=", "decode_after_sum_fn", "self", ".", "_update_state_fn", "=", "update_state_fn" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow2.x/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.py#L85-L119
rdkit/rdkit
ede860ae316d12d8568daf5ee800921c3389c84e
rdkit/ML/KNN/KNNRegressionModel.py
python
KNNRegressionModel.PredictExample
(self, example, appendExamples=0, weightedAverage=0, neighborList=None)
return accum
Generates a prediction for an example by looking at its closest neighbors **Arguments** - examples: the example to be classified - appendExamples: if this is nonzero then the example will be stored on this model - weightedAverage: if provided, the neighbors' contributions to the value will be weighed by their reciprocal square distance - neighborList: if provided, will be used to return the list of neighbors **Returns** - the classification of _example_
Generates a prediction for an example by looking at its closest neighbors
[ "Generates", "a", "prediction", "for", "an", "example", "by", "looking", "at", "its", "closest", "neighbors" ]
def PredictExample(self, example, appendExamples=0, weightedAverage=0, neighborList=None): """ Generates a prediction for an example by looking at its closest neighbors **Arguments** - examples: the example to be classified - appendExamples: if this is nonzero then the example will be stored on this model - weightedAverage: if provided, the neighbors' contributions to the value will be weighed by their reciprocal square distance - neighborList: if provided, will be used to return the list of neighbors **Returns** - the classification of _example_ """ if appendExamples: self._examples.append(example) # first find the k-closest examples in the training set knnLst = self.GetNeighbors(example) accum = 0.0 denom = 0.0 for knn in knnLst: if knn[1] is None: continue if weightedAverage: dist = knn[0] if dist == 0.0: w = 1. else: w = 1. / dist else: w = 1.0 accum += w * knn[1][-1] denom += w if denom: accum /= denom if neighborList is not None: neighborList.extend(knnLst) return accum
[ "def", "PredictExample", "(", "self", ",", "example", ",", "appendExamples", "=", "0", ",", "weightedAverage", "=", "0", ",", "neighborList", "=", "None", ")", ":", "if", "appendExamples", ":", "self", ".", "_examples", ".", "append", "(", "example", ")", "# first find the k-closest examples in the training set", "knnLst", "=", "self", ".", "GetNeighbors", "(", "example", ")", "accum", "=", "0.0", "denom", "=", "0.0", "for", "knn", "in", "knnLst", ":", "if", "knn", "[", "1", "]", "is", "None", ":", "continue", "if", "weightedAverage", ":", "dist", "=", "knn", "[", "0", "]", "if", "dist", "==", "0.0", ":", "w", "=", "1.", "else", ":", "w", "=", "1.", "/", "dist", "else", ":", "w", "=", "1.0", "accum", "+=", "w", "*", "knn", "[", "1", "]", "[", "-", "1", "]", "denom", "+=", "w", "if", "denom", ":", "accum", "/=", "denom", "if", "neighborList", "is", "not", "None", ":", "neighborList", ".", "extend", "(", "knnLst", ")", "return", "accum" ]
https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/ML/KNN/KNNRegressionModel.py#L37-L81
bareos/bareos
56a10bb368b0a81e977bb51304033fe49d59efb0
restapi/bareos_restapi/__init__.py
python
import_volume
( *, response: Response, current_user: User = Depends(get_current_user), importParams: volumeImport = Body(..., title="Volume import parameters"), )
return responseDict
import volumes the _import_ command
import volumes the _import_ command
[ "import", "volumes", "the", "_import_", "command" ]
def import_volume( *, response: Response, current_user: User = Depends(get_current_user), importParams: volumeImport = Body(..., title="Volume import parameters"), ): """ import volumes the _import_ command """ responseDict = {} updateCommand = "import" updateCommand += parseCommandOptions(importParams.dict()) # print(updateCommand) try: responseDict = current_user.jsonDirector.call(updateCommand) except Exception as e: response.status_code = 500 return { "message": "Could not import volumes on director %s. Message: '%s'" % (CONFIG_DIRECTOR_NAME, e) } # Director delivers empty response response.status_code = 200 return responseDict
[ "def", "import_volume", "(", "*", ",", "response", ":", "Response", ",", "current_user", ":", "User", "=", "Depends", "(", "get_current_user", ")", ",", "importParams", ":", "volumeImport", "=", "Body", "(", "...", ",", "title", "=", "\"Volume import parameters\"", ")", ",", ")", ":", "responseDict", "=", "{", "}", "updateCommand", "=", "\"import\"", "updateCommand", "+=", "parseCommandOptions", "(", "importParams", ".", "dict", "(", ")", ")", "# print(updateCommand)", "try", ":", "responseDict", "=", "current_user", ".", "jsonDirector", ".", "call", "(", "updateCommand", ")", "except", "Exception", "as", "e", ":", "response", ".", "status_code", "=", "500", "return", "{", "\"message\"", ":", "\"Could not import volumes on director %s. Message: '%s'\"", "%", "(", "CONFIG_DIRECTOR_NAME", ",", "e", ")", "}", "# Director delivers empty response", "response", ".", "status_code", "=", "200", "return", "responseDict" ]
https://github.com/bareos/bareos/blob/56a10bb368b0a81e977bb51304033fe49d59efb0/restapi/bareos_restapi/__init__.py#L1376-L1399
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/turtle.py
python
RawTurtle.clearstamp
(self, stampid)
Delete stamp with given stampid Argument: stampid - an integer, must be return value of previous stamp() call. Example (for a Turtle instance named turtle): >>> turtle.color("blue") >>> astamp = turtle.stamp() >>> turtle.fd(50) >>> turtle.clearstamp(astamp)
Delete stamp with given stampid
[ "Delete", "stamp", "with", "given", "stampid" ]
def clearstamp(self, stampid): """Delete stamp with given stampid Argument: stampid - an integer, must be return value of previous stamp() call. Example (for a Turtle instance named turtle): >>> turtle.color("blue") >>> astamp = turtle.stamp() >>> turtle.fd(50) >>> turtle.clearstamp(astamp) """ self._clearstamp(stampid) self._update()
[ "def", "clearstamp", "(", "self", ",", "stampid", ")", ":", "self", ".", "_clearstamp", "(", "stampid", ")", "self", ".", "_update", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/lib-tk/turtle.py#L2933-L2946
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/tools/inspector_protocol/jinja2/filters.py
python
do_rejectattr
(*args, **kwargs)
return select_or_reject(args, kwargs, lambda x: not x, True)
Filters a sequence of objects by applying a test to the specified attribute of each object, and rejecting the objects with the test succeeding. If no test is specified, the attribute's value will be evaluated as a boolean. .. sourcecode:: jinja {{ users|rejectattr("is_active") }} {{ users|rejectattr("email", "none") }} .. versionadded:: 2.7
Filters a sequence of objects by applying a test to the specified attribute of each object, and rejecting the objects with the test succeeding.
[ "Filters", "a", "sequence", "of", "objects", "by", "applying", "a", "test", "to", "the", "specified", "attribute", "of", "each", "object", "and", "rejecting", "the", "objects", "with", "the", "test", "succeeding", "." ]
def do_rejectattr(*args, **kwargs): """Filters a sequence of objects by applying a test to the specified attribute of each object, and rejecting the objects with the test succeeding. If no test is specified, the attribute's value will be evaluated as a boolean. .. sourcecode:: jinja {{ users|rejectattr("is_active") }} {{ users|rejectattr("email", "none") }} .. versionadded:: 2.7 """ return select_or_reject(args, kwargs, lambda x: not x, True)
[ "def", "do_rejectattr", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "select_or_reject", "(", "args", ",", "kwargs", ",", "lambda", "x", ":", "not", "x", ",", "True", ")" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/inspector_protocol/jinja2/filters.py#L1028-L1043
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/plat-mac/macostools.py
python
copytree
(src, dst, copydates=1)
Copy a complete file tree to a new destination
Copy a complete file tree to a new destination
[ "Copy", "a", "complete", "file", "tree", "to", "a", "new", "destination" ]
def copytree(src, dst, copydates=1): """Copy a complete file tree to a new destination""" if os.path.isdir(src): mkdirs(dst) files = os.listdir(src) for f in files: copytree(os.path.join(src, f), os.path.join(dst, f), copydates) else: copy(src, dst, 1, copydates)
[ "def", "copytree", "(", "src", ",", "dst", ",", "copydates", "=", "1", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "src", ")", ":", "mkdirs", "(", "dst", ")", "files", "=", "os", ".", "listdir", "(", "src", ")", "for", "f", "in", "files", ":", "copytree", "(", "os", ".", "path", ".", "join", "(", "src", ",", "f", ")", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "f", ")", ",", "copydates", ")", "else", ":", "copy", "(", "src", ",", "dst", ",", "1", ",", "copydates", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/plat-mac/macostools.py#L130-L138
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/framework/op_callbacks.py
python
should_invoke_op_callbacks
()
return ctx.op_callbacks and not ctx.invoking_op_callbacks
Determine if op callbacks are present and should be invoked. Returns: A thread-local result (boolean) indicating whether any op callback(s) exist and should be invoked.
Determine if op callbacks are present and should be invoked.
[ "Determine", "if", "op", "callbacks", "are", "present", "and", "should", "be", "invoked", "." ]
def should_invoke_op_callbacks(): """Determine if op callbacks are present and should be invoked. Returns: A thread-local result (boolean) indicating whether any op callback(s) exist and should be invoked. """ ctx = context.context() return ctx.op_callbacks and not ctx.invoking_op_callbacks
[ "def", "should_invoke_op_callbacks", "(", ")", ":", "ctx", "=", "context", ".", "context", "(", ")", "return", "ctx", ".", "op_callbacks", "and", "not", "ctx", ".", "invoking_op_callbacks" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/framework/op_callbacks.py#L114-L122
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/numpy/files/numpy/ma/mrecords.py
python
MaskedRecords.__getitem__
(self, indx)
return obj
Returns all the fields sharing the same fieldname base. The fieldname base is either `_data` or `_mask`.
Returns all the fields sharing the same fieldname base. The fieldname base is either `_data` or `_mask`.
[ "Returns", "all", "the", "fields", "sharing", "the", "same", "fieldname", "base", ".", "The", "fieldname", "base", "is", "either", "_data", "or", "_mask", "." ]
def __getitem__(self, indx): """Returns all the fields sharing the same fieldname base. The fieldname base is either `_data` or `_mask`.""" _localdict = self.__dict__ _mask = ndarray.__getattribute__(self, '_mask') _data = ndarray.view(self, _localdict['_baseclass']) # We want a field ........ if isinstance(indx, basestring): #!!!: Make sure _sharedmask is True to propagate back to _fieldmask #!!!: Don't use _set_mask, there are some copies being made... #!!!: ...that break propagation #!!!: Don't force the mask to nomask, that wrecks easy masking obj = _data[indx].view(MaskedArray) obj._mask = _mask[indx] obj._sharedmask = True fval = _localdict['_fill_value'] if fval is not None: obj._fill_value = fval[indx] # Force to masked if the mask is True if not obj.ndim and obj._mask: return masked return obj # We want some elements .. # First, the data ........ obj = np.array(_data[indx], copy=False).view(mrecarray) obj._mask = np.array(_mask[indx], copy=False).view(recarray) return obj
[ "def", "__getitem__", "(", "self", ",", "indx", ")", ":", "_localdict", "=", "self", ".", "__dict__", "_mask", "=", "ndarray", ".", "__getattribute__", "(", "self", ",", "'_mask'", ")", "_data", "=", "ndarray", ".", "view", "(", "self", ",", "_localdict", "[", "'_baseclass'", "]", ")", "# We want a field ........", "if", "isinstance", "(", "indx", ",", "basestring", ")", ":", "#!!!: Make sure _sharedmask is True to propagate back to _fieldmask", "#!!!: Don't use _set_mask, there are some copies being made...", "#!!!: ...that break propagation", "#!!!: Don't force the mask to nomask, that wrecks easy masking", "obj", "=", "_data", "[", "indx", "]", ".", "view", "(", "MaskedArray", ")", "obj", ".", "_mask", "=", "_mask", "[", "indx", "]", "obj", ".", "_sharedmask", "=", "True", "fval", "=", "_localdict", "[", "'_fill_value'", "]", "if", "fval", "is", "not", "None", ":", "obj", ".", "_fill_value", "=", "fval", "[", "indx", "]", "# Force to masked if the mask is True", "if", "not", "obj", ".", "ndim", "and", "obj", ".", "_mask", ":", "return", "masked", "return", "obj", "# We want some elements ..", "# First, the data ........", "obj", "=", "np", ".", "array", "(", "_data", "[", "indx", "]", ",", "copy", "=", "False", ")", ".", "view", "(", "mrecarray", ")", "obj", ".", "_mask", "=", "np", ".", "array", "(", "_mask", "[", "indx", "]", ",", "copy", "=", "False", ")", ".", "view", "(", "recarray", ")", "return", "obj" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/ma/mrecords.py#L289-L315
facebookincubator/BOLT
88c70afe9d388ad430cc150cc158641701397f70
llvm/utils/lit/lit/discovery.py
python
find_tests_for_inputs
(lit_config, inputs, indirectlyRunCheck)
return tests
find_tests_for_inputs(lit_config, inputs) -> [Test] Given a configuration object and a list of input specifiers, find all the tests to execute.
find_tests_for_inputs(lit_config, inputs) -> [Test]
[ "find_tests_for_inputs", "(", "lit_config", "inputs", ")", "-", ">", "[", "Test", "]" ]
def find_tests_for_inputs(lit_config, inputs, indirectlyRunCheck): """ find_tests_for_inputs(lit_config, inputs) -> [Test] Given a configuration object and a list of input specifiers, find all the tests to execute. """ # Expand '@...' form in inputs. actual_inputs = [] for input in inputs: if input.startswith('@'): f = open(input[1:]) try: for ln in f: ln = ln.strip() if ln: actual_inputs.append(ln) finally: f.close() else: actual_inputs.append(input) # Load the tests from the inputs. tests = [] test_suite_cache = {} local_config_cache = {} for input in actual_inputs: prev = len(tests) tests.extend(getTests(input, lit_config, test_suite_cache, local_config_cache, indirectlyRunCheck)[1]) if prev == len(tests): lit_config.warning('input %r contained no tests' % input) # This data is no longer needed but keeping it around causes awful # performance problems while the test suites run. for k, suite in test_suite_cache.items(): if suite[0]: suite[0].test_times = None # If there were any errors during test discovery, exit now. if lit_config.numErrors: sys.stderr.write('%d errors, exiting.\n' % lit_config.numErrors) sys.exit(2) return tests
[ "def", "find_tests_for_inputs", "(", "lit_config", ",", "inputs", ",", "indirectlyRunCheck", ")", ":", "# Expand '@...' form in inputs.", "actual_inputs", "=", "[", "]", "for", "input", "in", "inputs", ":", "if", "input", ".", "startswith", "(", "'@'", ")", ":", "f", "=", "open", "(", "input", "[", "1", ":", "]", ")", "try", ":", "for", "ln", "in", "f", ":", "ln", "=", "ln", ".", "strip", "(", ")", "if", "ln", ":", "actual_inputs", ".", "append", "(", "ln", ")", "finally", ":", "f", ".", "close", "(", ")", "else", ":", "actual_inputs", ".", "append", "(", "input", ")", "# Load the tests from the inputs.", "tests", "=", "[", "]", "test_suite_cache", "=", "{", "}", "local_config_cache", "=", "{", "}", "for", "input", "in", "actual_inputs", ":", "prev", "=", "len", "(", "tests", ")", "tests", ".", "extend", "(", "getTests", "(", "input", ",", "lit_config", ",", "test_suite_cache", ",", "local_config_cache", ",", "indirectlyRunCheck", ")", "[", "1", "]", ")", "if", "prev", "==", "len", "(", "tests", ")", ":", "lit_config", ".", "warning", "(", "'input %r contained no tests'", "%", "input", ")", "# This data is no longer needed but keeping it around causes awful", "# performance problems while the test suites run.", "for", "k", ",", "suite", "in", "test_suite_cache", ".", "items", "(", ")", ":", "if", "suite", "[", "0", "]", ":", "suite", "[", "0", "]", ".", "test_times", "=", "None", "# If there were any errors during test discovery, exit now.", "if", "lit_config", ".", "numErrors", ":", "sys", ".", "stderr", ".", "write", "(", "'%d errors, exiting.\\n'", "%", "lit_config", ".", "numErrors", ")", "sys", ".", "exit", "(", "2", ")", "return", "tests" ]
https://github.com/facebookincubator/BOLT/blob/88c70afe9d388ad430cc150cc158641701397f70/llvm/utils/lit/lit/discovery.py#L249-L294
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/learn/python/learn/estimators/run_config.py
python
_count_worker
(cluster_spec)
return len(cluster_spec.as_dict().get('worker', [])) if cluster_spec else 0
Counts the number of workers in cluster_spec.
Counts the number of workers in cluster_spec.
[ "Counts", "the", "number", "of", "workers", "in", "cluster_spec", "." ]
def _count_worker(cluster_spec): """Counts the number of workers in cluster_spec.""" return len(cluster_spec.as_dict().get('worker', [])) if cluster_spec else 0
[ "def", "_count_worker", "(", "cluster_spec", ")", ":", "return", "len", "(", "cluster_spec", ".", "as_dict", "(", ")", ".", "get", "(", "'worker'", ",", "[", "]", ")", ")", "if", "cluster_spec", "else", "0" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/estimators/run_config.py#L385-L387
albertz/openlierox
d316c14a8eb57848ef56e9bfa7b23a56f694a51b
tools/DedicatedServerVideo/gdata/Crypto/PublicKey/RSA.py
python
RSAobj.has_private
(self)
has_private() : bool Return a Boolean denoting whether the object contains private components.
has_private() : bool Return a Boolean denoting whether the object contains private components.
[ "has_private", "()", ":", "bool", "Return", "a", "Boolean", "denoting", "whether", "the", "object", "contains", "private", "components", "." ]
def has_private(self): """has_private() : bool Return a Boolean denoting whether the object contains private components. """ if hasattr(self, 'd'): return 1 else: return 0
[ "def", "has_private", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'d'", ")", ":", "return", "1", "else", ":", "return", "0" ]
https://github.com/albertz/openlierox/blob/d316c14a8eb57848ef56e9bfa7b23a56f694a51b/tools/DedicatedServerVideo/gdata/Crypto/PublicKey/RSA.py#L131-L138
microsoft/CNTK
e9396480025b9ca457d26b6f33dd07c474c6aa04
bindings/python/cntk/logging/graph.py
python
find_by_name
(node, node_name, depth=0)
return result[0]
Finds a function in the graph starting from ``node`` and doing a depth-first search. It assumes that the name occurs only once. Args: node (:class:`~cntk.ops.functions.Function` or :class:`~cntk.variables.Variable`): the node to start the journey from node_name (`str`): name for which we are search nodes depth (int, default 0): how deep into the block hierarchy the DFS algorithm should go into. Set to -1 for infinite depth. Returns: Primitive (or block) function having the specified name See also: :func:`~cntk.ops.functions.Function.find_by_name` in class :class:`~cntk.ops.functions.Function`.
Finds a function in the graph starting from ``node`` and doing a depth-first search. It assumes that the name occurs only once.
[ "Finds", "a", "function", "in", "the", "graph", "starting", "from", "node", "and", "doing", "a", "depth", "-", "first", "search", ".", "It", "assumes", "that", "the", "name", "occurs", "only", "once", "." ]
def find_by_name(node, node_name, depth=0): ''' Finds a function in the graph starting from ``node`` and doing a depth-first search. It assumes that the name occurs only once. Args: node (:class:`~cntk.ops.functions.Function` or :class:`~cntk.variables.Variable`): the node to start the journey from node_name (`str`): name for which we are search nodes depth (int, default 0): how deep into the block hierarchy the DFS algorithm should go into. Set to -1 for infinite depth. Returns: Primitive (or block) function having the specified name See also: :func:`~cntk.ops.functions.Function.find_by_name` in class :class:`~cntk.ops.functions.Function`. ''' if not isinstance(node_name, str): raise ValueError('node name has to be a string. You gave ' 'a %s' % type(node_name)) result = depth_first_search(node, lambda x: x.name == node_name, depth) if len(result) > 1: raise ValueError('found multiple functions matching "%s". ' 'If that was expected call find_all_with_name' % node_name) if not result: return None return result[0]
[ "def", "find_by_name", "(", "node", ",", "node_name", ",", "depth", "=", "0", ")", ":", "if", "not", "isinstance", "(", "node_name", ",", "str", ")", ":", "raise", "ValueError", "(", "'node name has to be a string. You gave '", "'a %s'", "%", "type", "(", "node_name", ")", ")", "result", "=", "depth_first_search", "(", "node", ",", "lambda", "x", ":", "x", ".", "name", "==", "node_name", ",", "depth", ")", "if", "len", "(", "result", ")", ">", "1", ":", "raise", "ValueError", "(", "'found multiple functions matching \"%s\". '", "'If that was expected call find_all_with_name'", "%", "node_name", ")", "if", "not", "result", ":", "return", "None", "return", "result", "[", "0", "]" ]
https://github.com/microsoft/CNTK/blob/e9396480025b9ca457d26b6f33dd07c474c6aa04/bindings/python/cntk/logging/graph.py#L99-L132
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/core/fromnumeric.py
python
round_
(a, decimals=0, out=None)
return around(a, decimals=decimals, out=out)
Round an array to the given number of decimals. See Also -------- around : equivalent function; see for details.
Round an array to the given number of decimals.
[ "Round", "an", "array", "to", "the", "given", "number", "of", "decimals", "." ]
def round_(a, decimals=0, out=None): """ Round an array to the given number of decimals. See Also -------- around : equivalent function; see for details. """ return around(a, decimals=decimals, out=out)
[ "def", "round_", "(", "a", ",", "decimals", "=", "0", ",", "out", "=", "None", ")", ":", "return", "around", "(", "a", ",", "decimals", "=", "decimals", ",", "out", "=", "out", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/core/fromnumeric.py#L3731-L3739
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py2/numpy/core/_internal.py
python
_ctypes._as_parameter_
(self)
return self.data_as(ctypes.c_void_p)
Overrides the ctypes semi-magic method Enables `c_func(some_array.ctypes)`
Overrides the ctypes semi-magic method
[ "Overrides", "the", "ctypes", "semi", "-", "magic", "method" ]
def _as_parameter_(self): """ Overrides the ctypes semi-magic method Enables `c_func(some_array.ctypes)` """ return self.data_as(ctypes.c_void_p)
[ "def", "_as_parameter_", "(", "self", ")", ":", "return", "self", ".", "data_as", "(", "ctypes", ".", "c_void_p", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/core/_internal.py#L348-L354
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/python/m5/ext/pyfdt/pyfdt.py
python
FdtProperty.new_raw_property
(name, raw_value)
Instantiate property with raw value type
Instantiate property with raw value type
[ "Instantiate", "property", "with", "raw", "value", "type" ]
def new_raw_property(name, raw_value): """Instantiate property with raw value type""" if FdtProperty.__check_prop_strings(raw_value): return FdtPropertyStrings.init_raw(name, raw_value) elif len(raw_value) and len(raw_value) % 4 == 0: return FdtPropertyWords.init_raw(name, raw_value) elif len(raw_value) and len(raw_value): return FdtPropertyBytes.init_raw(name, raw_value) else: return FdtProperty(name)
[ "def", "new_raw_property", "(", "name", ",", "raw_value", ")", ":", "if", "FdtProperty", ".", "__check_prop_strings", "(", "raw_value", ")", ":", "return", "FdtPropertyStrings", ".", "init_raw", "(", "name", ",", "raw_value", ")", "elif", "len", "(", "raw_value", ")", "and", "len", "(", "raw_value", ")", "%", "4", "==", "0", ":", "return", "FdtPropertyWords", ".", "init_raw", "(", "name", ",", "raw_value", ")", "elif", "len", "(", "raw_value", ")", "and", "len", "(", "raw_value", ")", ":", "return", "FdtPropertyBytes", ".", "init_raw", "(", "name", ",", "raw_value", ")", "else", ":", "return", "FdtProperty", "(", "name", ")" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/m5/ext/pyfdt/pyfdt.py#L147-L156
musescore/MuseScore
a817fea23e3c2be30847b7fde5b01746222c252e
tools/crashdump/win/generate_breakpad_symbols.py
python
GenerateSymbols
(options, binaries)
Dumps the symbols of binary and places them in the given directory.
Dumps the symbols of binary and places them in the given directory.
[ "Dumps", "the", "symbols", "of", "binary", "and", "places", "them", "in", "the", "given", "directory", "." ]
def GenerateSymbols(options, binaries): """Dumps the symbols of binary and places them in the given directory.""" q = queue.Queue() print_lock = threading.Lock() def _Worker(): dump_syms = options.dumpsyms_bin while True: binary = q.get() if options.verbose: with print_lock: print("Generating symbols for %s" % binary) syms = GetCommandOutput([dump_syms, binary]) module_line = re.match("MODULE [^ ]+ [^ ]+ ([0-9A-Fa-f]+) (.*)\r\n", syms) if module_line == None: with print_lock: print("Failed to get symbols for %s" % binary) q.task_done() continue output_path = os.path.join(options.symbols_dir, module_line.group(2), module_line.group(1)) mkdir_p(output_path) symbol_file = "%s.sym" % module_line.group(2)[:-4] # strip .pdb f = open(os.path.join(output_path, symbol_file), 'w') f.write(syms) f.close() q.task_done() for binary in binaries: q.put(binary) for _ in range(options.jobs): t = threading.Thread(target=_Worker) t.daemon = True t.start() q.join()
[ "def", "GenerateSymbols", "(", "options", ",", "binaries", ")", ":", "q", "=", "queue", ".", "Queue", "(", ")", "print_lock", "=", "threading", ".", "Lock", "(", ")", "def", "_Worker", "(", ")", ":", "dump_syms", "=", "options", ".", "dumpsyms_bin", "while", "True", ":", "binary", "=", "q", ".", "get", "(", ")", "if", "options", ".", "verbose", ":", "with", "print_lock", ":", "print", "(", "\"Generating symbols for %s\"", "%", "binary", ")", "syms", "=", "GetCommandOutput", "(", "[", "dump_syms", ",", "binary", "]", ")", "module_line", "=", "re", ".", "match", "(", "\"MODULE [^ ]+ [^ ]+ ([0-9A-Fa-f]+) (.*)\\r\\n\"", ",", "syms", ")", "if", "module_line", "==", "None", ":", "with", "print_lock", ":", "print", "(", "\"Failed to get symbols for %s\"", "%", "binary", ")", "q", ".", "task_done", "(", ")", "continue", "output_path", "=", "os", ".", "path", ".", "join", "(", "options", ".", "symbols_dir", ",", "module_line", ".", "group", "(", "2", ")", ",", "module_line", ".", "group", "(", "1", ")", ")", "mkdir_p", "(", "output_path", ")", "symbol_file", "=", "\"%s.sym\"", "%", "module_line", ".", "group", "(", "2", ")", "[", ":", "-", "4", "]", "# strip .pdb", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "output_path", ",", "symbol_file", ")", ",", "'w'", ")", "f", ".", "write", "(", "syms", ")", "f", ".", "close", "(", ")", "q", ".", "task_done", "(", ")", "for", "binary", "in", "binaries", ":", "q", ".", "put", "(", "binary", ")", "for", "_", "in", "range", "(", "options", ".", "jobs", ")", ":", "t", "=", "threading", ".", "Thread", "(", "target", "=", "_Worker", ")", "t", ".", "daemon", "=", "True", "t", ".", "start", "(", ")", "q", ".", "join", "(", ")" ]
https://github.com/musescore/MuseScore/blob/a817fea23e3c2be30847b7fde5b01746222c252e/tools/crashdump/win/generate_breakpad_symbols.py#L56-L97
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/third_party/web-page-replay/third_party/dns/rdtypes/ANY/LOC.py
python
LOC.__init__
(self, rdclass, rdtype, latitude, longitude, altitude, size=1.0, hprec=10000.0, vprec=10.0)
Initialize a LOC record instance. The parameters I{latitude} and I{longitude} may be either a 4-tuple of integers specifying (degrees, minutes, seconds, milliseconds), or they may be floating point values specifying the number of degrees. The other parameters are floats.
Initialize a LOC record instance.
[ "Initialize", "a", "LOC", "record", "instance", "." ]
def __init__(self, rdclass, rdtype, latitude, longitude, altitude, size=1.0, hprec=10000.0, vprec=10.0): """Initialize a LOC record instance. The parameters I{latitude} and I{longitude} may be either a 4-tuple of integers specifying (degrees, minutes, seconds, milliseconds), or they may be floating point values specifying the number of degrees. The other parameters are floats.""" super(LOC, self).__init__(rdclass, rdtype) if isinstance(latitude, int) or isinstance(latitude, long): latitude = float(latitude) if isinstance(latitude, float): latitude = _float_to_tuple(latitude) self.latitude = latitude if isinstance(longitude, int) or isinstance(longitude, long): longitude = float(longitude) if isinstance(longitude, float): longitude = _float_to_tuple(longitude) self.longitude = longitude self.altitude = float(altitude) self.size = float(size) self.horizontal_precision = float(hprec) self.vertical_precision = float(vprec)
[ "def", "__init__", "(", "self", ",", "rdclass", ",", "rdtype", ",", "latitude", ",", "longitude", ",", "altitude", ",", "size", "=", "1.0", ",", "hprec", "=", "10000.0", ",", "vprec", "=", "10.0", ")", ":", "super", "(", "LOC", ",", "self", ")", ".", "__init__", "(", "rdclass", ",", "rdtype", ")", "if", "isinstance", "(", "latitude", ",", "int", ")", "or", "isinstance", "(", "latitude", ",", "long", ")", ":", "latitude", "=", "float", "(", "latitude", ")", "if", "isinstance", "(", "latitude", ",", "float", ")", ":", "latitude", "=", "_float_to_tuple", "(", "latitude", ")", "self", ".", "latitude", "=", "latitude", "if", "isinstance", "(", "longitude", ",", "int", ")", "or", "isinstance", "(", "longitude", ",", "long", ")", ":", "longitude", "=", "float", "(", "longitude", ")", "if", "isinstance", "(", "longitude", ",", "float", ")", ":", "longitude", "=", "_float_to_tuple", "(", "longitude", ")", "self", ".", "longitude", "=", "longitude", "self", ".", "altitude", "=", "float", "(", "altitude", ")", "self", ".", "size", "=", "float", "(", "size", ")", "self", ".", "horizontal_precision", "=", "float", "(", "hprec", ")", "self", ".", "vertical_precision", "=", "float", "(", "vprec", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/third_party/web-page-replay/third_party/dns/rdtypes/ANY/LOC.py#L100-L123
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/more-itertools/py2/more_itertools/more.py
python
split_into
(iterable, sizes)
Yield a list of sequential items from *iterable* of length 'n' for each integer 'n' in *sizes*. >>> list(split_into([1,2,3,4,5,6], [1,2,3])) [[1], [2, 3], [4, 5, 6]] If the sum of *sizes* is smaller than the length of *iterable*, then the remaining items of *iterable* will not be returned. >>> list(split_into([1,2,3,4,5,6], [2,3])) [[1, 2], [3, 4, 5]] If the sum of *sizes* is larger than the length of *iterable*, fewer items will be returned in the iteration that overruns *iterable* and further lists will be empty: >>> list(split_into([1,2,3,4], [1,2,3,4])) [[1], [2, 3], [4], []] When a ``None`` object is encountered in *sizes*, the returned list will contain items up to the end of *iterable* the same way that itertools.slice does: >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] :func:`split_into` can be useful for grouping a series of items where the sizes of the groups are not uniform. An example would be where in a row from a table, multiple columns represent elements of the same feature (e.g. a point represented by x,y,z) but, the format is not the same for all columns.
Yield a list of sequential items from *iterable* of length 'n' for each integer 'n' in *sizes*.
[ "Yield", "a", "list", "of", "sequential", "items", "from", "*", "iterable", "*", "of", "length", "n", "for", "each", "integer", "n", "in", "*", "sizes", "*", "." ]
def split_into(iterable, sizes): """Yield a list of sequential items from *iterable* of length 'n' for each integer 'n' in *sizes*. >>> list(split_into([1,2,3,4,5,6], [1,2,3])) [[1], [2, 3], [4, 5, 6]] If the sum of *sizes* is smaller than the length of *iterable*, then the remaining items of *iterable* will not be returned. >>> list(split_into([1,2,3,4,5,6], [2,3])) [[1, 2], [3, 4, 5]] If the sum of *sizes* is larger than the length of *iterable*, fewer items will be returned in the iteration that overruns *iterable* and further lists will be empty: >>> list(split_into([1,2,3,4], [1,2,3,4])) [[1], [2, 3], [4], []] When a ``None`` object is encountered in *sizes*, the returned list will contain items up to the end of *iterable* the same way that itertools.slice does: >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None])) [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]] :func:`split_into` can be useful for grouping a series of items where the sizes of the groups are not uniform. An example would be where in a row from a table, multiple columns represent elements of the same feature (e.g. a point represented by x,y,z) but, the format is not the same for all columns. """ # convert the iterable argument into an iterator so its contents can # be consumed by islice in case it is a generator it = iter(iterable) for size in sizes: if size is None: yield list(it) return else: yield list(islice(it, size))
[ "def", "split_into", "(", "iterable", ",", "sizes", ")", ":", "# convert the iterable argument into an iterator so its contents can", "# be consumed by islice in case it is a generator", "it", "=", "iter", "(", "iterable", ")", "for", "size", "in", "sizes", ":", "if", "size", "is", "None", ":", "yield", "list", "(", "it", ")", "return", "else", ":", "yield", "list", "(", "islice", "(", "it", ",", "size", ")", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/more-itertools/py2/more_itertools/more.py#L1074-L1116
vesoft-inc/nebula
25a06217ebaf169e1f0e5ff6a797ba6f0c41fc35
.linters/cpp/cpplint.py
python
ProcessGlobalSuppressions
(lines)
Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline.
Updates the list of global error suppressions.
[ "Updates", "the", "list", "of", "global", "error", "suppressions", "." ]
def ProcessGlobalSuppressions(lines): """Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. """ for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True
[ "def", "ProcessGlobalSuppressions", "(", "lines", ")", ":", "for", "line", "in", "lines", ":", "if", "_SEARCH_C_FILE", ".", "search", "(", "line", ")", ":", "for", "category", "in", "_DEFAULT_C_SUPPRESSED_CATEGORIES", ":", "_global_error_suppressions", "[", "category", "]", "=", "True", "if", "_SEARCH_KERNEL_FILE", ".", "search", "(", "line", ")", ":", "for", "category", "in", "_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES", ":", "_global_error_suppressions", "[", "category", "]", "=", "True" ]
https://github.com/vesoft-inc/nebula/blob/25a06217ebaf169e1f0e5ff6a797ba6f0c41fc35/.linters/cpp/cpplint.py#L755-L770
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/distributed/ClockDelta.py
python
ClockDelta.__resetClock
(self, timeDelta)
this is called when the global clock gets adjusted timeDelta is equal to the amount of time, in seconds, that has been added to the global clock
this is called when the global clock gets adjusted timeDelta is equal to the amount of time, in seconds, that has been added to the global clock
[ "this", "is", "called", "when", "the", "global", "clock", "gets", "adjusted", "timeDelta", "is", "equal", "to", "the", "amount", "of", "time", "in", "seconds", "that", "has", "been", "added", "to", "the", "global", "clock" ]
def __resetClock(self, timeDelta): """ this is called when the global clock gets adjusted timeDelta is equal to the amount of time, in seconds, that has been added to the global clock """ assert self.notify.debug( "adjusting timebase by %f seconds" % timeDelta) # adjust our timebase by the same amount self.delta += timeDelta
[ "def", "__resetClock", "(", "self", ",", "timeDelta", ")", ":", "assert", "self", ".", "notify", ".", "debug", "(", "\"adjusting timebase by %f seconds\"", "%", "timeDelta", ")", "# adjust our timebase by the same amount", "self", ".", "delta", "+=", "timeDelta" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/distributed/ClockDelta.py#L93-L102
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozbuild/makeutil.py
python
Rule.targets
(self)
return iter(self._targets)
Return an iterator on the rule targets.
Return an iterator on the rule targets.
[ "Return", "an", "iterator", "on", "the", "rule", "targets", "." ]
def targets(self): '''Return an iterator on the rule targets.''' # Ensure the returned iterator is actually just that, an iterator. # Avoids caller fiddling with the set itself. return iter(self._targets)
[ "def", "targets", "(", "self", ")", ":", "# Ensure the returned iterator is actually just that, an iterator.", "# Avoids caller fiddling with the set itself.", "return", "iter", "(", "self", ".", "_targets", ")" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/python/mozbuild/mozbuild/makeutil.py#L120-L124
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/traceback.py
python
extract_stack
(f=None, limit = None)
return list
Extract the raw traceback from the current stack frame. The return value has the same format as for extract_tb(). The optional 'f' and 'limit' arguments have the same meaning as for print_stack(). Each item in the list is a quadruple (filename, line number, function name, text), and the entries are in order from oldest to newest stack frame.
Extract the raw traceback from the current stack frame.
[ "Extract", "the", "raw", "traceback", "from", "the", "current", "stack", "frame", "." ]
def extract_stack(f=None, limit = None): """Extract the raw traceback from the current stack frame. The return value has the same format as for extract_tb(). The optional 'f' and 'limit' arguments have the same meaning as for print_stack(). Each item in the list is a quadruple (filename, line number, function name, text), and the entries are in order from oldest to newest stack frame. """ if f is None: try: raise ZeroDivisionError except ZeroDivisionError: f = sys.exc_info()[2].tb_frame.f_back if limit is None: if hasattr(sys, 'tracebacklimit'): limit = sys.tracebacklimit list = [] n = 0 while f is not None and (limit is None or n < limit): lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: line = line.strip() else: line = None list.append((filename, lineno, name, line)) f = f.f_back n = n+1 list.reverse() return list
[ "def", "extract_stack", "(", "f", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "f", "is", "None", ":", "try", ":", "raise", "ZeroDivisionError", "except", "ZeroDivisionError", ":", "f", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ".", "tb_frame", ".", "f_back", "if", "limit", "is", "None", ":", "if", "hasattr", "(", "sys", ",", "'tracebacklimit'", ")", ":", "limit", "=", "sys", ".", "tracebacklimit", "list", "=", "[", "]", "n", "=", "0", "while", "f", "is", "not", "None", "and", "(", "limit", "is", "None", "or", "n", "<", "limit", ")", ":", "lineno", "=", "f", ".", "f_lineno", "co", "=", "f", ".", "f_code", "filename", "=", "co", ".", "co_filename", "name", "=", "co", ".", "co_name", "linecache", ".", "checkcache", "(", "filename", ")", "line", "=", "linecache", ".", "getline", "(", "filename", ",", "lineno", ",", "f", ".", "f_globals", ")", "if", "line", ":", "line", "=", "line", ".", "strip", "(", ")", "else", ":", "line", "=", "None", "list", ".", "append", "(", "(", "filename", ",", "lineno", ",", "name", ",", "line", ")", ")", "f", "=", "f", ".", "f_back", "n", "=", "n", "+", "1", "list", ".", "reverse", "(", ")", "return", "list" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/traceback.py#L280-L312
HKUST-Aerial-Robotics/Fast-Planner
2ddd7793eecd573dbb5b47e2c985aa06606df3cf
uav_simulator/Utils/quadrotor_msgs/src/quadrotor_msgs/msg/_OutputData.py
python
OutputData.serialize_numpy
(self, buff, numpy)
serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module
serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module
[ "serialize", "message", "with", "numpy", "array", "types", "into", "buffer", ":", "param", "buff", ":", "buffer", "StringIO", ":", "param", "numpy", ":", "numpy", "python", "module" ]
def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs)) _x = self.header.frame_id length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) if python3: buff.write(struct.pack('<I%sB'%length, length, *_x)) else: buff.write(struct.pack('<I%ss'%length, length, _x)) _x = self buff.write(_struct_H16d.pack(_x.loop_rate, _x.voltage, _x.orientation.x, _x.orientation.y, _x.orientation.z, _x.orientation.w, _x.angular_velocity.x, _x.angular_velocity.y, _x.angular_velocity.z, _x.linear_acceleration.x, _x.linear_acceleration.y, _x.linear_acceleration.z, _x.pressure_dheight, _x.pressure_height, _x.magnetic_field.x, _x.magnetic_field.y, _x.magnetic_field.z)) _x = self.radio_channel # - if encoded as a list instead, serialize as bytes instead of string if type(_x) in [list, tuple]: buff.write(_struct_8B.pack(*_x)) else: buff.write(_struct_8s.pack(_x)) buff.write(_struct_B.pack(self.seq)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x)))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
[ "def", "serialize_numpy", "(", "self", ",", "buff", ",", "numpy", ")", ":", "try", ":", "_x", "=", "self", "buff", ".", "write", "(", "_struct_3I", ".", "pack", "(", "_x", ".", "header", ".", "seq", ",", "_x", ".", "header", ".", "stamp", ".", "secs", ",", "_x", ".", "header", ".", "stamp", ".", "nsecs", ")", ")", "_x", "=", "self", ".", "header", ".", "frame_id", "length", "=", "len", "(", "_x", ")", "if", "python3", "or", "type", "(", "_x", ")", "==", "unicode", ":", "_x", "=", "_x", ".", "encode", "(", "'utf-8'", ")", "length", "=", "len", "(", "_x", ")", "if", "python3", ":", "buff", ".", "write", "(", "struct", ".", "pack", "(", "'<I%sB'", "%", "length", ",", "length", ",", "*", "_x", ")", ")", "else", ":", "buff", ".", "write", "(", "struct", ".", "pack", "(", "'<I%ss'", "%", "length", ",", "length", ",", "_x", ")", ")", "_x", "=", "self", "buff", ".", "write", "(", "_struct_H16d", ".", "pack", "(", "_x", ".", "loop_rate", ",", "_x", ".", "voltage", ",", "_x", ".", "orientation", ".", "x", ",", "_x", ".", "orientation", ".", "y", ",", "_x", ".", "orientation", ".", "z", ",", "_x", ".", "orientation", ".", "w", ",", "_x", ".", "angular_velocity", ".", "x", ",", "_x", ".", "angular_velocity", ".", "y", ",", "_x", ".", "angular_velocity", ".", "z", ",", "_x", ".", "linear_acceleration", ".", "x", ",", "_x", ".", "linear_acceleration", ".", "y", ",", "_x", ".", "linear_acceleration", ".", "z", ",", "_x", ".", "pressure_dheight", ",", "_x", ".", "pressure_height", ",", "_x", ".", "magnetic_field", ".", "x", ",", "_x", ".", "magnetic_field", ".", "y", ",", "_x", ".", "magnetic_field", ".", "z", ")", ")", "_x", "=", "self", ".", "radio_channel", "# - if encoded as a list instead, serialize as bytes instead of string", "if", "type", "(", "_x", ")", "in", "[", "list", ",", "tuple", "]", ":", "buff", ".", "write", "(", "_struct_8B", ".", "pack", "(", "*", "_x", ")", ")", "else", ":", "buff", ".", "write", "(", "_struct_8s", ".", "pack", "(", "_x", ")", ")", "buff", ".", "write", "(", "_struct_B", ".", "pack", "(", "self", ".", "seq", ")", ")", "except", "struct", ".", "error", "as", "se", ":", "self", ".", "_check_types", "(", "struct", ".", "error", "(", "\"%s: '%s' when writing '%s'\"", "%", "(", "type", "(", "se", ")", ",", "str", "(", "se", ")", ",", "str", "(", "_x", ")", ")", ")", ")", "except", "TypeError", "as", "te", ":", "self", ".", "_check_types", "(", "ValueError", "(", "\"%s: '%s' when writing '%s'\"", "%", "(", "type", "(", "te", ")", ",", "str", "(", "te", ")", ",", "str", "(", "_x", ")", ")", ")", ")" ]
https://github.com/HKUST-Aerial-Robotics/Fast-Planner/blob/2ddd7793eecd573dbb5b47e2c985aa06606df3cf/uav_simulator/Utils/quadrotor_msgs/src/quadrotor_msgs/msg/_OutputData.py#L197-L225
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/op_info_register.py
python
TBERegOp.compute_cost
(self, compute_cost=10)
return self
Define the calculation efficiency of operator, which refers to the value of the cost model in the tiling module. Args: compute_cost (int): Value of compute cost. Default: 10.
Define the calculation efficiency of operator, which refers to the value of the cost model in the tiling module.
[ "Define", "the", "calculation", "efficiency", "of", "operator", "which", "refers", "to", "the", "value", "of", "the", "cost", "model", "in", "the", "tiling", "module", "." ]
def compute_cost(self, compute_cost=10): """ Define the calculation efficiency of operator, which refers to the value of the cost model in the tiling module. Args: compute_cost (int): Value of compute cost. Default: 10. """ self._is_int(compute_cost) self.compute_cost_ = compute_cost return self
[ "def", "compute_cost", "(", "self", ",", "compute_cost", "=", "10", ")", ":", "self", ".", "_is_int", "(", "compute_cost", ")", "self", ".", "compute_cost_", "=", "compute_cost", "return", "self" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/op_info_register.py#L511-L521
etternagame/etterna
8775f74ac9c353320128609d4b4150672e9a6d04
extern/crashpad/crashpad/third_party/mini_chromium/mini_chromium/build/win_helper.py
python
_GenerateEnvironmentFiles
(install_dir, out_dir, script_path)
return result
It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers. Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) to set up the environment, and then do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path.
It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers. Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) to set up the environment, and then do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path.
[ "It", "s", "not", "sufficient", "to", "have", "the", "absolute", "path", "to", "the", "compiler", "linker", "etc", ".", "on", "Windows", "as", "those", "tools", "rely", "on", ".", "dlls", "being", "in", "the", "PATH", ".", "We", "also", "need", "to", "support", "both", "x86", "and", "x64", "compilers", ".", "Different", "architectures", "require", "a", "different", "compiler", "binary", "and", "different", "supporting", "environment", "variables", "(", "INCLUDE", "LIB", "LIBPATH", ")", ".", "So", "we", "extract", "the", "environment", "here", "wrap", "all", "invocations", "of", "compiler", "tools", "(", "cl", "link", "lib", "rc", "midl", "etc", ".", ")", "to", "set", "up", "the", "environment", "and", "then", "do", "not", "prefix", "the", "compiler", "with", "an", "absolute", "path", "instead", "preferring", "something", "like", "cl", ".", "exe", "in", "the", "rule", "which", "will", "then", "run", "whichever", "the", "environment", "setup", "has", "put", "in", "the", "path", "." ]
def _GenerateEnvironmentFiles(install_dir, out_dir, script_path): """It's not sufficient to have the absolute path to the compiler, linker, etc. on Windows, as those tools rely on .dlls being in the PATH. We also need to support both x86 and x64 compilers. Different architectures require a different compiler binary, and different supporting environment variables (INCLUDE, LIB, LIBPATH). So, we extract the environment here, wrap all invocations of compiler tools (cl, link, lib, rc, midl, etc.) to set up the environment, and then do not prefix the compiler with an absolute path, instead preferring something like "cl.exe" in the rule which will then run whichever the environment setup has put in the path.""" archs = ('x86', 'amd64', 'arm64') result = [] for arch in archs: # Extract environment variables for subprocesses. args = [os.path.join(install_dir, script_path)] script_arch_name = arch if script_path.endswith('SetEnv.cmd') and arch == 'amd64': script_arch_name = '/x64' if arch == 'arm64': script_arch_name = 'x86_arm64' args.extend((script_arch_name, '&&', 'set')) popen = subprocess.Popen( args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) variables, _ = popen.communicate() if popen.returncode != 0: raise Exception('"%s" failed with error %d' % (args, popen.returncode)) env = _ExtractImportantEnvironment(variables) env_block = _FormatAsEnvironmentBlock(env) basename = 'environment.' + arch with open(os.path.join(out_dir, basename), 'wb') as f: f.write(env_block) result.append(basename) return result
[ "def", "_GenerateEnvironmentFiles", "(", "install_dir", ",", "out_dir", ",", "script_path", ")", ":", "archs", "=", "(", "'x86'", ",", "'amd64'", ",", "'arm64'", ")", "result", "=", "[", "]", "for", "arch", "in", "archs", ":", "# Extract environment variables for subprocesses.", "args", "=", "[", "os", ".", "path", ".", "join", "(", "install_dir", ",", "script_path", ")", "]", "script_arch_name", "=", "arch", "if", "script_path", ".", "endswith", "(", "'SetEnv.cmd'", ")", "and", "arch", "==", "'amd64'", ":", "script_arch_name", "=", "'/x64'", "if", "arch", "==", "'arm64'", ":", "script_arch_name", "=", "'x86_arm64'", "args", ".", "extend", "(", "(", "script_arch_name", ",", "'&&'", ",", "'set'", ")", ")", "popen", "=", "subprocess", ".", "Popen", "(", "args", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "variables", ",", "_", "=", "popen", ".", "communicate", "(", ")", "if", "popen", ".", "returncode", "!=", "0", ":", "raise", "Exception", "(", "'\"%s\" failed with error %d'", "%", "(", "args", ",", "popen", ".", "returncode", ")", ")", "env", "=", "_ExtractImportantEnvironment", "(", "variables", ")", "env_block", "=", "_FormatAsEnvironmentBlock", "(", "env", ")", "basename", "=", "'environment.'", "+", "arch", "with", "open", "(", "os", ".", "path", ".", "join", "(", "out_dir", ",", "basename", ")", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "env_block", ")", "result", ".", "append", "(", "basename", ")", "return", "result" ]
https://github.com/etternagame/etterna/blob/8775f74ac9c353320128609d4b4150672e9a6d04/extern/crashpad/crashpad/third_party/mini_chromium/mini_chromium/build/win_helper.py#L71-L104
freeorion/freeorion
c266a40eccd3a99a17de8fe57c36ef6ba3771665
default/python/AI/ShipDesignAI.py
python
ShipDesignCache._check_cache_for_consistency
(self)
Check if the persistent cache is consistent with the gamestate and fix it if not. This function should be called once at the beginning of the turn (before update_shipdesign_cache()). Especially (only?) in multiplayer games, the shipDesignIDs may sometimes change across turns.
Check if the persistent cache is consistent with the gamestate and fix it if not.
[ "Check", "if", "the", "persistent", "cache", "is", "consistent", "with", "the", "gamestate", "and", "fix", "it", "if", "not", "." ]
def _check_cache_for_consistency(self): """Check if the persistent cache is consistent with the gamestate and fix it if not. This function should be called once at the beginning of the turn (before update_shipdesign_cache()). Especially (only?) in multiplayer games, the shipDesignIDs may sometimes change across turns. """ debug("Checking persistent cache for consistency...") try: for partname in self.part_by_partname: cached_name = self.part_by_partname[partname].name if cached_name != partname: self.part_by_partname[partname] = fo.getShipPart(partname) error("Part cache corrupted. Expected: %s, got: %s. Cache was repaired." % (partname, cached_name)) except Exception as e: self.part_by_partname.clear() error(e, exc_info=True) corrupted = [] # create a copy of the dict-keys so we can alter the dict for designname in list(self.design_id_by_name): # dropping invalid designs from cache if self.design_id_by_name[designname] == INVALID_ID: del self.design_id_by_name[designname] continue try: cached_name = fo.getShipDesign(self.design_id_by_name[designname]).name if cached_name != designname: warning("ShipID cache corrupted. Expected: %s, got: %s." % (designname, cached_name)) design_id = next( iter( [ shipDesignID for shipDesignID in fo.getEmpire().allShipDesigns if designname == fo.getShipDesign(shipDesignID).name ] ), None, ) if design_id is not None: self.design_id_by_name[designname] = design_id else: corrupted.append(designname) except AttributeError: warning("ShipID cache corrupted. Could not get cached shipdesign. Repairing Cache.", exc_info=True) design_id = next( iter( [ shipDesignID for shipDesignID in fo.getEmpire().allShipDesigns if designname == fo.getShipDesign(shipDesignID).name ] ), None, ) if design_id is not None: self.design_id_by_name[designname] = design_id else: corrupted.append(designname) for corrupted_entry in corrupted: del self.design_id_by_name[corrupted_entry] bad_ref = next( iter([_key for _key, _val in self.map_reference_design_name.items() if _val == corrupted_entry]), None ) if bad_ref is not None: del self.map_reference_design_name[bad_ref]
[ "def", "_check_cache_for_consistency", "(", "self", ")", ":", "debug", "(", "\"Checking persistent cache for consistency...\"", ")", "try", ":", "for", "partname", "in", "self", ".", "part_by_partname", ":", "cached_name", "=", "self", ".", "part_by_partname", "[", "partname", "]", ".", "name", "if", "cached_name", "!=", "partname", ":", "self", ".", "part_by_partname", "[", "partname", "]", "=", "fo", ".", "getShipPart", "(", "partname", ")", "error", "(", "\"Part cache corrupted. Expected: %s, got: %s. Cache was repaired.\"", "%", "(", "partname", ",", "cached_name", ")", ")", "except", "Exception", "as", "e", ":", "self", ".", "part_by_partname", ".", "clear", "(", ")", "error", "(", "e", ",", "exc_info", "=", "True", ")", "corrupted", "=", "[", "]", "# create a copy of the dict-keys so we can alter the dict", "for", "designname", "in", "list", "(", "self", ".", "design_id_by_name", ")", ":", "# dropping invalid designs from cache", "if", "self", ".", "design_id_by_name", "[", "designname", "]", "==", "INVALID_ID", ":", "del", "self", ".", "design_id_by_name", "[", "designname", "]", "continue", "try", ":", "cached_name", "=", "fo", ".", "getShipDesign", "(", "self", ".", "design_id_by_name", "[", "designname", "]", ")", ".", "name", "if", "cached_name", "!=", "designname", ":", "warning", "(", "\"ShipID cache corrupted. Expected: %s, got: %s.\"", "%", "(", "designname", ",", "cached_name", ")", ")", "design_id", "=", "next", "(", "iter", "(", "[", "shipDesignID", "for", "shipDesignID", "in", "fo", ".", "getEmpire", "(", ")", ".", "allShipDesigns", "if", "designname", "==", "fo", ".", "getShipDesign", "(", "shipDesignID", ")", ".", "name", "]", ")", ",", "None", ",", ")", "if", "design_id", "is", "not", "None", ":", "self", ".", "design_id_by_name", "[", "designname", "]", "=", "design_id", "else", ":", "corrupted", ".", "append", "(", "designname", ")", "except", "AttributeError", ":", "warning", "(", "\"ShipID cache corrupted. Could not get cached shipdesign. Repairing Cache.\"", ",", "exc_info", "=", "True", ")", "design_id", "=", "next", "(", "iter", "(", "[", "shipDesignID", "for", "shipDesignID", "in", "fo", ".", "getEmpire", "(", ")", ".", "allShipDesigns", "if", "designname", "==", "fo", ".", "getShipDesign", "(", "shipDesignID", ")", ".", "name", "]", ")", ",", "None", ",", ")", "if", "design_id", "is", "not", "None", ":", "self", ".", "design_id_by_name", "[", "designname", "]", "=", "design_id", "else", ":", "corrupted", ".", "append", "(", "designname", ")", "for", "corrupted_entry", "in", "corrupted", ":", "del", "self", ".", "design_id_by_name", "[", "corrupted_entry", "]", "bad_ref", "=", "next", "(", "iter", "(", "[", "_key", "for", "_key", ",", "_val", "in", "self", ".", "map_reference_design_name", ".", "items", "(", ")", "if", "_val", "==", "corrupted_entry", "]", ")", ",", "None", ")", "if", "bad_ref", "is", "not", "None", ":", "del", "self", ".", "map_reference_design_name", "[", "bad_ref", "]" ]
https://github.com/freeorion/freeorion/blob/c266a40eccd3a99a17de8fe57c36ef6ba3771665/default/python/AI/ShipDesignAI.py#L329-L393
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/logging/__init__.py
python
LoggerAdapter.info
(self, msg, *args, **kwargs)
Delegate an info call to the underlying logger, after adding contextual information from this adapter instance.
Delegate an info call to the underlying logger, after adding contextual information from this adapter instance.
[ "Delegate", "an", "info", "call", "to", "the", "underlying", "logger", "after", "adding", "contextual", "information", "from", "this", "adapter", "instance", "." ]
def info(self, msg, *args, **kwargs): """ Delegate an info call to the underlying logger, after adding contextual information from this adapter instance. """ msg, kwargs = self.process(msg, kwargs) self.logger.info(msg, *args, **kwargs)
[ "def", "info", "(", "self", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "msg", ",", "kwargs", "=", "self", ".", "process", "(", "msg", ",", "kwargs", ")", "self", ".", "logger", ".", "info", "(", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi-v7a/toolchain/lib/python2.7/logging/__init__.py#L1424-L1430
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/SANS/isis_reduction_steps.py
python
DarkRunSubtraction._get_dark_run_name_and_path
(self, setting)
return dark_run_ws_name, dark_run_file_path
@param settings: a dark run settings tuple @returns a dark run workspace name and the dark run path @raises RuntimeError: if there is an issue with loading the workspace
[]
def _get_dark_run_name_and_path(self, setting): ''' @param settings: a dark run settings tuple @returns a dark run workspace name and the dark run path @raises RuntimeError: if there is an issue with loading the workspace ''' dark_run_ws_name = None dark_run_file_path = None try: dark_run_file_path, dark_run_ws_name = getFileAndName(setting.run_number) dark_run_file_path = dark_run_file_path.replace("\\", "/") except: raise RuntimeError("DarkRunSubtration: The specified dark run file cannot be found or loaded. " "Please make sure that that it exists in your search directory.") return dark_run_ws_name, dark_run_file_path
[ "def", "_get_dark_run_name_and_path", "(", "self", ",", "setting", ")", ":", "dark_run_ws_name", "=", "None", "dark_run_file_path", "=", "None", "try", ":", "dark_run_file_path", ",", "dark_run_ws_name", "=", "getFileAndName", "(", "setting", ".", "run_number", ")", "dark_run_file_path", "=", "dark_run_file_path", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "except", ":", "raise", "RuntimeError", "(", "\"DarkRunSubtration: The specified dark run file cannot be found or loaded. \"", "\"Please make sure that that it exists in your search directory.\"", ")", "return", "dark_run_ws_name", ",", "dark_run_file_path" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/isis_reduction_steps.py#L1533-L1547
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/lite/tools/dataset/cropper/cropper_configure.py
python
get_all_dependencies_of_file
(headers_flag, filename)
return list(processed_cc), "".join(errors)
Create dependency list for a file (incl. all source files needed). :param headers_flag: string containing headers include paths with -I prepended to them. :param filename: a string containing path of a file. :return: all dependencies of that file and the error string
Create dependency list for a file (incl. all source files needed).
[ "Create", "dependency", "list", "for", "a", "file", "(", "incl", ".", "all", "source", "files", "needed", ")", "." ]
def get_all_dependencies_of_file(headers_flag, filename): """ Create dependency list for a file (incl. all source files needed). :param headers_flag: string containing headers include paths with -I prepended to them. :param filename: a string containing path of a file. :return: all dependencies of that file and the error string """ errors = [] # a queue to process files queue_cc = queue.SimpleQueue() # a set of items that have ever been in queue_cc (faster access time) queue_cc_set = set() # store processed files processed_cc = set() # add the source file to the queue queue_cc.put(filename) queue_cc_set.add(filename) while not queue_cc.empty(): # process the first item in the queue curr_cc = queue_cc.get() deps, error = get_dependencies_of_file(headers_flag, curr_cc) errors.append(error) processed_cc.add(curr_cc) # prepare its dependencies for processing for dep_h in deps: dep_cc = build_source_file_path(dep_h) # ignore if marked as an external dependency if dep_cc == "EXTERNAL": processed_cc.add(dep_h) continue # add to queue if needs processing if needs_processing(dep_cc, processed_cc, queue_cc_set): queue_cc.put(dep_cc) queue_cc_set.add(dep_cc) logger.debug('file: {} | deps: {}'.format(os.path.basename(filename), len(processed_cc))) return list(processed_cc), "".join(errors)
[ "def", "get_all_dependencies_of_file", "(", "headers_flag", ",", "filename", ")", ":", "errors", "=", "[", "]", "# a queue to process files", "queue_cc", "=", "queue", ".", "SimpleQueue", "(", ")", "# a set of items that have ever been in queue_cc (faster access time)", "queue_cc_set", "=", "set", "(", ")", "# store processed files", "processed_cc", "=", "set", "(", ")", "# add the source file to the queue", "queue_cc", ".", "put", "(", "filename", ")", "queue_cc_set", ".", "add", "(", "filename", ")", "while", "not", "queue_cc", ".", "empty", "(", ")", ":", "# process the first item in the queue", "curr_cc", "=", "queue_cc", ".", "get", "(", ")", "deps", ",", "error", "=", "get_dependencies_of_file", "(", "headers_flag", ",", "curr_cc", ")", "errors", ".", "append", "(", "error", ")", "processed_cc", ".", "add", "(", "curr_cc", ")", "# prepare its dependencies for processing", "for", "dep_h", "in", "deps", ":", "dep_cc", "=", "build_source_file_path", "(", "dep_h", ")", "# ignore if marked as an external dependency", "if", "dep_cc", "==", "\"EXTERNAL\"", ":", "processed_cc", ".", "add", "(", "dep_h", ")", "continue", "# add to queue if needs processing", "if", "needs_processing", "(", "dep_cc", ",", "processed_cc", ",", "queue_cc_set", ")", ":", "queue_cc", ".", "put", "(", "dep_cc", ")", "queue_cc_set", ".", "add", "(", "dep_cc", ")", "logger", ".", "debug", "(", "'file: {} | deps: {}'", ".", "format", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ",", "len", "(", "processed_cc", ")", ")", ")", "return", "list", "(", "processed_cc", ")", ",", "\"\"", ".", "join", "(", "errors", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/lite/tools/dataset/cropper/cropper_configure.py#L241-L280
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/six.py
python
ensure_binary
(s, encoding="utf-8", errors="strict")
Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes`
Coerce **s** to six.binary_type.
[ "Coerce", "**", "s", "**", "to", "six", ".", "binary_type", "." ]
def ensure_binary(s, encoding="utf-8", errors="strict"): """Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ if isinstance(s, text_type): return s.encode(encoding, errors) elif isinstance(s, binary_type): return s else: raise TypeError("not expecting type '%s'" % type(s))
[ "def", "ensure_binary", "(", "s", ",", "encoding", "=", "\"utf-8\"", ",", "errors", "=", "\"strict\"", ")", ":", "if", "isinstance", "(", "s", ",", "text_type", ")", ":", "return", "s", ".", "encode", "(", "encoding", ",", "errors", ")", "elif", "isinstance", "(", "s", ",", "binary_type", ")", ":", "return", "s", "else", ":", "raise", "TypeError", "(", "\"not expecting type '%s'\"", "%", "type", "(", "s", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/six.py#L1839-L1871
root-project/root
fcd3583bb14852bf2e8cd2415717cbaac0e75896
bindings/experimental/distrdf/python/DistRDF/Operation.py
python
Operation._classify_operation
(self, name)
return op_type
Classifies the given operation as action or transformation and returns the type.
Classifies the given operation as action or transformation and returns the type.
[ "Classifies", "the", "given", "operation", "as", "action", "or", "transformation", "and", "returns", "the", "type", "." ]
def _classify_operation(self, name): """ Classifies the given operation as action or transformation and returns the type. """ operations_dict = { "Define": Operation.TRANSFORMATION, "DefinePerSample": Operation.TRANSFORMATION, "Filter": Operation.TRANSFORMATION, "Range": Operation.TRANSFORMATION, "Aggregate": Operation.ACTION, "Histo1D": Operation.ACTION, "Histo2D": Operation.ACTION, "Histo3D": Operation.ACTION, "HistoND": Operation.ACTION, "Profile1D": Operation.ACTION, "Profile2D": Operation.ACTION, "Profile3D": Operation.ACTION, "Count": Operation.ACTION, "Min": Operation.ACTION, "Max": Operation.ACTION, "Mean": Operation.ACTION, "Sum": Operation.ACTION, "Fill": Operation.ACTION, "Redefine": Operation.TRANSFORMATION, "Reduce": Operation.ACTION, "Report": Operation.ACTION, "Take": Operation.ACTION, "Graph": Operation.ACTION, "Snapshot": Operation.INSTANT_ACTION, "Foreach": Operation.INSTANT_ACTION, "AsNumpy": Operation.INSTANT_ACTION } op_type = operations_dict.get(name) if not op_type: raise Exception("Invalid operation \"{}\"".format(name)) return op_type
[ "def", "_classify_operation", "(", "self", ",", "name", ")", ":", "operations_dict", "=", "{", "\"Define\"", ":", "Operation", ".", "TRANSFORMATION", ",", "\"DefinePerSample\"", ":", "Operation", ".", "TRANSFORMATION", ",", "\"Filter\"", ":", "Operation", ".", "TRANSFORMATION", ",", "\"Range\"", ":", "Operation", ".", "TRANSFORMATION", ",", "\"Aggregate\"", ":", "Operation", ".", "ACTION", ",", "\"Histo1D\"", ":", "Operation", ".", "ACTION", ",", "\"Histo2D\"", ":", "Operation", ".", "ACTION", ",", "\"Histo3D\"", ":", "Operation", ".", "ACTION", ",", "\"HistoND\"", ":", "Operation", ".", "ACTION", ",", "\"Profile1D\"", ":", "Operation", ".", "ACTION", ",", "\"Profile2D\"", ":", "Operation", ".", "ACTION", ",", "\"Profile3D\"", ":", "Operation", ".", "ACTION", ",", "\"Count\"", ":", "Operation", ".", "ACTION", ",", "\"Min\"", ":", "Operation", ".", "ACTION", ",", "\"Max\"", ":", "Operation", ".", "ACTION", ",", "\"Mean\"", ":", "Operation", ".", "ACTION", ",", "\"Sum\"", ":", "Operation", ".", "ACTION", ",", "\"Fill\"", ":", "Operation", ".", "ACTION", ",", "\"Redefine\"", ":", "Operation", ".", "TRANSFORMATION", ",", "\"Reduce\"", ":", "Operation", ".", "ACTION", ",", "\"Report\"", ":", "Operation", ".", "ACTION", ",", "\"Take\"", ":", "Operation", ".", "ACTION", ",", "\"Graph\"", ":", "Operation", ".", "ACTION", ",", "\"Snapshot\"", ":", "Operation", ".", "INSTANT_ACTION", ",", "\"Foreach\"", ":", "Operation", ".", "INSTANT_ACTION", ",", "\"AsNumpy\"", ":", "Operation", ".", "INSTANT_ACTION", "}", "op_type", "=", "operations_dict", ".", "get", "(", "name", ")", "if", "not", "op_type", ":", "raise", "Exception", "(", "\"Invalid operation \\\"{}\\\"\"", ".", "format", "(", "name", ")", ")", "return", "op_type" ]
https://github.com/root-project/root/blob/fcd3583bb14852bf2e8cd2415717cbaac0e75896/bindings/experimental/distrdf/python/DistRDF/Operation.py#L57-L96
musescore/MuseScore
a817fea23e3c2be30847b7fde5b01746222c252e
tools/crashdump/posix/generate_breakpad_symbols.py
python
GetSharedLibraryDependenciesChromeOS
(binary)
return _GetSharedLibraryDependenciesAndroidOrChromeOS(binary)
Return absolute paths to all shared library dependencies of the binary. This implementation assumes that we're running on a Linux system, but compiled for ChromeOS.
Return absolute paths to all shared library dependencies of the binary.
[ "Return", "absolute", "paths", "to", "all", "shared", "library", "dependencies", "of", "the", "binary", "." ]
def GetSharedLibraryDependenciesChromeOS(binary): """Return absolute paths to all shared library dependencies of the binary. This implementation assumes that we're running on a Linux system, but compiled for ChromeOS.""" return _GetSharedLibraryDependenciesAndroidOrChromeOS(binary)
[ "def", "GetSharedLibraryDependenciesChromeOS", "(", "binary", ")", ":", "return", "_GetSharedLibraryDependenciesAndroidOrChromeOS", "(", "binary", ")" ]
https://github.com/musescore/MuseScore/blob/a817fea23e3c2be30847b7fde5b01746222c252e/tools/crashdump/posix/generate_breakpad_symbols.py#L209-L214
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
qa/tasks/keycloak.py
python
download_conf
(ctx, config)
Downloads confi.py used in run_admin_cmds
Downloads confi.py used in run_admin_cmds
[ "Downloads", "confi", ".", "py", "used", "in", "run_admin_cmds" ]
def download_conf(ctx, config): """ Downloads confi.py used in run_admin_cmds """ assert isinstance(config, dict) log.info('Downloading conf...') testdir = teuthology.get_testdir(ctx) conf_branch = 'main' conf_repo = 'https://github.com/TRYTOBE8TME/scripts.git' for (client, _) in config.items(): ctx.cluster.only(client).run( args=[ 'git', 'clone', '-b', conf_branch, conf_repo, '{tdir}/scripts'.format(tdir=testdir), ], ) try: yield finally: log.info('Removing conf...') testdir = teuthology.get_testdir(ctx) for client in config: ctx.cluster.only(client).run( args=[ 'rm', '-rf', '{tdir}/scripts'.format(tdir=testdir), ], )
[ "def", "download_conf", "(", "ctx", ",", "config", ")", ":", "assert", "isinstance", "(", "config", ",", "dict", ")", "log", ".", "info", "(", "'Downloading conf...'", ")", "testdir", "=", "teuthology", ".", "get_testdir", "(", "ctx", ")", "conf_branch", "=", "'main'", "conf_repo", "=", "'https://github.com/TRYTOBE8TME/scripts.git'", "for", "(", "client", ",", "_", ")", "in", "config", ".", "items", "(", ")", ":", "ctx", ".", "cluster", ".", "only", "(", "client", ")", ".", "run", "(", "args", "=", "[", "'git'", ",", "'clone'", ",", "'-b'", ",", "conf_branch", ",", "conf_repo", ",", "'{tdir}/scripts'", ".", "format", "(", "tdir", "=", "testdir", ")", ",", "]", ",", ")", "try", ":", "yield", "finally", ":", "log", ".", "info", "(", "'Removing conf...'", ")", "testdir", "=", "teuthology", ".", "get_testdir", "(", "ctx", ")", "for", "client", "in", "config", ":", "ctx", ".", "cluster", ".", "only", "(", "client", ")", ".", "run", "(", "args", "=", "[", "'rm'", ",", "'-rf'", ",", "'{tdir}/scripts'", ".", "format", "(", "tdir", "=", "testdir", ")", ",", "]", ",", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/keycloak.py#L80-L110
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/bdb.py
python
Bdb.runctx
(self, cmd, globals, locals)
For backwards-compatibility. Defers to run().
For backwards-compatibility. Defers to run().
[ "For", "backwards", "-", "compatibility", ".", "Defers", "to", "run", "()", "." ]
def runctx(self, cmd, globals, locals): """For backwards-compatibility. Defers to run().""" # B/W compatibility self.run(cmd, globals, locals)
[ "def", "runctx", "(", "self", ",", "cmd", ",", "globals", ",", "locals", ")", ":", "# B/W compatibility", "self", ".", "run", "(", "cmd", ",", "globals", ",", "locals", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/bdb.py#L605-L608
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
caffe2/python/task.py
python
Cluster.nodes
(self)
return self._nodes
Returns the list of unique node names used within this context.
Returns the list of unique node names used within this context.
[ "Returns", "the", "list", "of", "unique", "node", "names", "used", "within", "this", "context", "." ]
def nodes(self): """ Returns the list of unique node names used within this context. """ return self._nodes
[ "def", "nodes", "(", "self", ")", ":", "return", "self", ".", "_nodes" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/caffe2/python/task.py#L41-L45
google/earthenterprise
0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9
earth_enterprise/src/server/wsgi/serve/publish/publish_manager_helper.py
python
PublishManagerHelper._WritePublishContentToHtaccessFile
(self, htaccess_file, target_paths_list)
Writes publish content into htaccess-file. Args: htaccess_file: file descriptor for writing to. target_paths_list: target paths list. Raises: psycopg2.Error/Warning, PublishServeException.
Writes publish content into htaccess-file.
[ "Writes", "publish", "content", "into", "htaccess", "-", "file", "." ]
def _WritePublishContentToHtaccessFile(self, htaccess_file, target_paths_list): """Writes publish content into htaccess-file. Args: htaccess_file: file descriptor for writing to. target_paths_list: target paths list. Raises: psycopg2.Error/Warning, PublishServeException. """ default_target_path = self._GetEcDefaultDbTargetPath() # Write publish header to file. htaccess_file.write("%s" % PublishManagerHelper.HTACCESS_GE_PUBLISH_BEGIN) # Write RewriteBase to file. htaccess_file.write("%s" % HTACCESS_REWRITE_BASE) logger.info( "Default target path is currently set to: %s " % default_target_path) if default_target_path: # Database is set to default for Earth Client: relative_target_path = default_target_path[1:] htaccess_file.write( EC_DEFAULT_MAP_LINE0_LOCAL_REWRITECOND ) htaccess_file.write( EC_DEFAULT_MAP_LINE1_GOOGLE_REDIRECT % relative_target_path ) htaccess_file.write( EC_DEFAULT_MAP_LINE2_GOOGLE_REDIRECT % relative_target_path ) # Collects all the needed information for all the target paths based on # target ID and adds corresponding rewrite rules into htacces-file. for (target_path, target_id, serve_wms) in target_paths_list: (virtual_host_url, db_name, host_name, db_flags) = self._QueryTargetDetailsById(target_id) if (not virtual_host_url) or (not db_name): continue # no DB published on this target path. # Identify type of published DB. (unused_norm_db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name) if serve_utils.IsFusionDb(db_type): if not host_name: raise exceptions.PublishServeException( "Internal Error - undefined host name for Fusion database %s." % db_name) else: assert serve_utils.IsPortable(db_type) if host_name: raise exceptions.PublishServeException( "Internal Error - host name is not empty for portable %s." % db_name) # Put the rules into htacces file for current target url_parse_res = urlparse.urlparse(virtual_host_url) virtual_host_path = url_parse_res.path relative_target_path = target_path[1:] # Common lines for all the databases, globes. htaccess_file.write(LINE0_TARGETDESCR % target_path) htaccess_file.write(LINE1_TRAILING_SLASH_REWRITERULE % ( relative_target_path, relative_target_path)) htaccess_file.write(LINE2_POISEARCH_REWRITERULE % ( relative_target_path, constants.POI_SEARCH_SERVICE_NAME, constants.POI_SEARCH_SERVICE_NAME)) if serve_wms: htaccess_file.write(WMS_LINE0_REWRITECOND) htaccess_file.write(WMS_LINE1_REWRITERULE % ( relative_target_path, target_path)) else: htaccess_file.write(WMS_LINE0_REWRITERULE_R404 % ( relative_target_path)) # Content for Fusion earth (GE database). if db_type == basic_types.DbType.TYPE_GE: htaccess_file.write(GE_LINE0_REWRITERULE % relative_target_path) htaccess_file.write(GE_LINE1_REWRITECOND) htaccess_file.write(GE_LINE2_REWRITERULE % ( relative_target_path, virtual_host_path, target_path, db_type)) # Content for Fusion map (map database). elif db_type == basic_types.DbType.TYPE_MAP: assert isinstance(db_flags, int) if db_flags & basic_types.DbFlags.USE_GOOGLE_BASEMAP == 0: htaccess_file.write(MAP_LINE0_LOCAL_REWRITERULE % relative_target_path) else: htaccess_file.write(MAP_LINE0_GOOGLE_REWRITERULE % relative_target_path) htaccess_file.write(MAP_LINE1_REWRITERULE % relative_target_path) htaccess_file.write(MAP_LINE2_REWRITECOND) htaccess_file.write(MAP_LINE3_REWRITERULE % ( relative_target_path, virtual_host_path, target_path, db_type)) # Content for portable globes. elif serve_utils.IsPortable(db_type): htaccess_file.write(GLX_LINE0_REWRITERULE % ( relative_target_path, target_path)) htaccess_file.write(GLX_LINE1_REWRITECOND) htaccess_file.write(GLX_LINE2_REWRITERULE % ( relative_target_path, virtual_host_path, target_path, db_type)) else: raise exceptions.PublishServeException( "Unsupported DB type %s.", db_type) # write publish footer to file. htaccess_file.write("\n%s" %PublishManagerHelper.HTACCESS_GE_PUBLISH_END)
[ "def", "_WritePublishContentToHtaccessFile", "(", "self", ",", "htaccess_file", ",", "target_paths_list", ")", ":", "default_target_path", "=", "self", ".", "_GetEcDefaultDbTargetPath", "(", ")", "# Write publish header to file.", "htaccess_file", ".", "write", "(", "\"%s\"", "%", "PublishManagerHelper", ".", "HTACCESS_GE_PUBLISH_BEGIN", ")", "# Write RewriteBase to file.", "htaccess_file", ".", "write", "(", "\"%s\"", "%", "HTACCESS_REWRITE_BASE", ")", "logger", ".", "info", "(", "\"Default target path is currently set to: %s \"", "%", "default_target_path", ")", "if", "default_target_path", ":", "# Database is set to default for Earth Client:", "relative_target_path", "=", "default_target_path", "[", "1", ":", "]", "htaccess_file", ".", "write", "(", "EC_DEFAULT_MAP_LINE0_LOCAL_REWRITECOND", ")", "htaccess_file", ".", "write", "(", "EC_DEFAULT_MAP_LINE1_GOOGLE_REDIRECT", "%", "relative_target_path", ")", "htaccess_file", ".", "write", "(", "EC_DEFAULT_MAP_LINE2_GOOGLE_REDIRECT", "%", "relative_target_path", ")", "# Collects all the needed information for all the target paths based on", "# target ID and adds corresponding rewrite rules into htacces-file.", "for", "(", "target_path", ",", "target_id", ",", "serve_wms", ")", "in", "target_paths_list", ":", "(", "virtual_host_url", ",", "db_name", ",", "host_name", ",", "db_flags", ")", "=", "self", ".", "_QueryTargetDetailsById", "(", "target_id", ")", "if", "(", "not", "virtual_host_url", ")", "or", "(", "not", "db_name", ")", ":", "continue", "# no DB published on this target path.", "# Identify type of published DB.", "(", "unused_norm_db_path", ",", "db_type", ")", "=", "serve_utils", ".", "IdentifyPublishedDb", "(", "db_name", ")", "if", "serve_utils", ".", "IsFusionDb", "(", "db_type", ")", ":", "if", "not", "host_name", ":", "raise", "exceptions", ".", "PublishServeException", "(", "\"Internal Error - undefined host name for Fusion database %s.\"", "%", "db_name", ")", "else", ":", "assert", "serve_utils", ".", "IsPortable", "(", "db_type", ")", "if", "host_name", ":", "raise", "exceptions", ".", "PublishServeException", "(", "\"Internal Error - host name is not empty for portable %s.\"", "%", "db_name", ")", "# Put the rules into htacces file for current target", "url_parse_res", "=", "urlparse", ".", "urlparse", "(", "virtual_host_url", ")", "virtual_host_path", "=", "url_parse_res", ".", "path", "relative_target_path", "=", "target_path", "[", "1", ":", "]", "# Common lines for all the databases, globes.", "htaccess_file", ".", "write", "(", "LINE0_TARGETDESCR", "%", "target_path", ")", "htaccess_file", ".", "write", "(", "LINE1_TRAILING_SLASH_REWRITERULE", "%", "(", "relative_target_path", ",", "relative_target_path", ")", ")", "htaccess_file", ".", "write", "(", "LINE2_POISEARCH_REWRITERULE", "%", "(", "relative_target_path", ",", "constants", ".", "POI_SEARCH_SERVICE_NAME", ",", "constants", ".", "POI_SEARCH_SERVICE_NAME", ")", ")", "if", "serve_wms", ":", "htaccess_file", ".", "write", "(", "WMS_LINE0_REWRITECOND", ")", "htaccess_file", ".", "write", "(", "WMS_LINE1_REWRITERULE", "%", "(", "relative_target_path", ",", "target_path", ")", ")", "else", ":", "htaccess_file", ".", "write", "(", "WMS_LINE0_REWRITERULE_R404", "%", "(", "relative_target_path", ")", ")", "# Content for Fusion earth (GE database).", "if", "db_type", "==", "basic_types", ".", "DbType", ".", "TYPE_GE", ":", "htaccess_file", ".", "write", "(", "GE_LINE0_REWRITERULE", "%", "relative_target_path", ")", "htaccess_file", ".", "write", "(", "GE_LINE1_REWRITECOND", ")", "htaccess_file", ".", "write", "(", "GE_LINE2_REWRITERULE", "%", "(", "relative_target_path", ",", "virtual_host_path", ",", "target_path", ",", "db_type", ")", ")", "# Content for Fusion map (map database).", "elif", "db_type", "==", "basic_types", ".", "DbType", ".", "TYPE_MAP", ":", "assert", "isinstance", "(", "db_flags", ",", "int", ")", "if", "db_flags", "&", "basic_types", ".", "DbFlags", ".", "USE_GOOGLE_BASEMAP", "==", "0", ":", "htaccess_file", ".", "write", "(", "MAP_LINE0_LOCAL_REWRITERULE", "%", "relative_target_path", ")", "else", ":", "htaccess_file", ".", "write", "(", "MAP_LINE0_GOOGLE_REWRITERULE", "%", "relative_target_path", ")", "htaccess_file", ".", "write", "(", "MAP_LINE1_REWRITERULE", "%", "relative_target_path", ")", "htaccess_file", ".", "write", "(", "MAP_LINE2_REWRITECOND", ")", "htaccess_file", ".", "write", "(", "MAP_LINE3_REWRITERULE", "%", "(", "relative_target_path", ",", "virtual_host_path", ",", "target_path", ",", "db_type", ")", ")", "# Content for portable globes.", "elif", "serve_utils", ".", "IsPortable", "(", "db_type", ")", ":", "htaccess_file", ".", "write", "(", "GLX_LINE0_REWRITERULE", "%", "(", "relative_target_path", ",", "target_path", ")", ")", "htaccess_file", ".", "write", "(", "GLX_LINE1_REWRITECOND", ")", "htaccess_file", ".", "write", "(", "GLX_LINE2_REWRITERULE", "%", "(", "relative_target_path", ",", "virtual_host_path", ",", "target_path", ",", "db_type", ")", ")", "else", ":", "raise", "exceptions", ".", "PublishServeException", "(", "\"Unsupported DB type %s.\"", ",", "db_type", ")", "# write publish footer to file.", "htaccess_file", ".", "write", "(", "\"\\n%s\"", "%", "PublishManagerHelper", ".", "HTACCESS_GE_PUBLISH_END", ")" ]
https://github.com/google/earthenterprise/blob/0fe84e29be470cd857e3a0e52e5d0afd5bb8cee9/earth_enterprise/src/server/wsgi/serve/publish/publish_manager_helper.py#L1369-L1471
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/operations/_quant_ops.py
python
FakeQuantPerChannelGrad.__init__
(self, num_bits=8, quant_delay=0, symmetric=False, narrow_range=False, channel_axis=1)
Initialize FakeQuantPerChannelGrad Fill
Initialize FakeQuantPerChannelGrad Fill
[ "Initialize", "FakeQuantPerChannelGrad", "Fill" ]
def __init__(self, num_bits=8, quant_delay=0, symmetric=False, narrow_range=False, channel_axis=1): """Initialize FakeQuantPerChannelGrad Fill""" if context.get_context('device_target') == "Ascend": from mindspore.ops._op_impl._custom_op import fake_quant_perchannel_grad if num_bits not in self.support_quant_bit: raise ValueError( f"For '{self.name}' attr \'num_bits\' is not support.") self.num_bits = validator.check_positive_int(num_bits, 'num_bits', self.name) self.quant_delay = validator.check_value_type( 'quant_delay', quant_delay, (int,), self.name) self.symmetric = validator.check_value_type( 'symmetric', symmetric, (bool,), self.name) self.narrow_range = validator.check_value_type( 'narrow_range', narrow_range, (bool,), self.name) self.channel_axis = validator.check_non_negative_int(channel_axis, 'channel axis', self.name) self.init_prim_io_names( inputs=['dout', 'x', 'min', 'max'], outputs=['dx'])
[ "def", "__init__", "(", "self", ",", "num_bits", "=", "8", ",", "quant_delay", "=", "0", ",", "symmetric", "=", "False", ",", "narrow_range", "=", "False", ",", "channel_axis", "=", "1", ")", ":", "if", "context", ".", "get_context", "(", "'device_target'", ")", "==", "\"Ascend\"", ":", "from", "mindspore", ".", "ops", ".", "_op_impl", ".", "_custom_op", "import", "fake_quant_perchannel_grad", "if", "num_bits", "not", "in", "self", ".", "support_quant_bit", ":", "raise", "ValueError", "(", "f\"For '{self.name}' attr \\'num_bits\\' is not support.\"", ")", "self", ".", "num_bits", "=", "validator", ".", "check_positive_int", "(", "num_bits", ",", "'num_bits'", ",", "self", ".", "name", ")", "self", ".", "quant_delay", "=", "validator", ".", "check_value_type", "(", "'quant_delay'", ",", "quant_delay", ",", "(", "int", ",", ")", ",", "self", ".", "name", ")", "self", ".", "symmetric", "=", "validator", ".", "check_value_type", "(", "'symmetric'", ",", "symmetric", ",", "(", "bool", ",", ")", ",", "self", ".", "name", ")", "self", ".", "narrow_range", "=", "validator", ".", "check_value_type", "(", "'narrow_range'", ",", "narrow_range", ",", "(", "bool", ",", ")", ",", "self", ".", "name", ")", "self", ".", "channel_axis", "=", "validator", ".", "check_non_negative_int", "(", "channel_axis", ",", "'channel axis'", ",", "self", ".", "name", ")", "self", ".", "init_prim_io_names", "(", "inputs", "=", "[", "'dout'", ",", "'x'", ",", "'min'", ",", "'max'", "]", ",", "outputs", "=", "[", "'dx'", "]", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/operations/_quant_ops.py#L975-L997
eventql/eventql
7ca0dbb2e683b525620ea30dc40540a22d5eb227
deps/3rdparty/spidermonkey/mozjs/build/pymake/pymake/data.py
python
Target.searchinlocs
(self, makefile, locs)
return None
Look in the given locations relative to the makefile working directory for a file. Return a pair of the target and the mtime if found, None if not.
Look in the given locations relative to the makefile working directory for a file. Return a pair of the target and the mtime if found, None if not.
[ "Look", "in", "the", "given", "locations", "relative", "to", "the", "makefile", "working", "directory", "for", "a", "file", ".", "Return", "a", "pair", "of", "the", "target", "and", "the", "mtime", "if", "found", "None", "if", "not", "." ]
def searchinlocs(self, makefile, locs): """ Look in the given locations relative to the makefile working directory for a file. Return a pair of the target and the mtime if found, None if not. """ for t in locs: fspath = util.normaljoin(makefile.workdir, t).replace('\\', '/') mtime = getmtime(fspath) # _log.info("Searching %s ... checking %s ... mtime %r" % (t, fspath, mtime)) if mtime is not None: return (t, mtime) return None
[ "def", "searchinlocs", "(", "self", ",", "makefile", ",", "locs", ")", ":", "for", "t", "in", "locs", ":", "fspath", "=", "util", ".", "normaljoin", "(", "makefile", ".", "workdir", ",", "t", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "mtime", "=", "getmtime", "(", "fspath", ")", "# _log.info(\"Searching %s ... checking %s ... mtime %r\" % (t, fspath, mtime))", "if", "mtime", "is", "not", "None", ":", "return", "(", "t", ",", "mtime", ")", "return", "None" ]
https://github.com/eventql/eventql/blob/7ca0dbb2e683b525620ea30dc40540a22d5eb227/deps/3rdparty/spidermonkey/mozjs/build/pymake/pymake/data.py#L1207-L1220
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/frame.py
python
DataFrame.axes
(self)
return [self.index, self.columns]
Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')]
Return a list representing the axes of the DataFrame.
[ "Return", "a", "list", "representing", "the", "axes", "of", "the", "DataFrame", "." ]
def axes(self) -> List[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns]
[ "def", "axes", "(", "self", ")", "->", "List", "[", "Index", "]", ":", "return", "[", "self", ".", "index", ",", "self", ".", "columns", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/frame.py#L516-L530
raspberrypi/tools
13474ee775d0c5ec8a7da4fb0a9fa84187abfc87
arm-bcm2708/arm-bcm2708hardfp-linux-gnueabi/share/gdb/python/gdb/printing.py
python
register_pretty_printer
(obj, printer)
Register pretty-printer PRINTER with OBJ. The printer is added to the front of the search list, thus one can override an existing printer if one needs to. Arguments: obj: Either an objfile, progspace, or None (in which case the printer is registered globally). printer: Either a function of one argument (old way) or any object which has attributes: name, enabled, __call__. Returns: Nothing. Raises: TypeError: A problem with the type of the printer. ValueError: The printer's name contains a semicolon ";". If the caller wants the printer to be listable and disableable, it must follow the PrettyPrinter API. This applies to the old way (functions) too. If printer is an object, __call__ is a method of two arguments: self, and the value to be pretty-printed. See PrettyPrinter.
Register pretty-printer PRINTER with OBJ.
[ "Register", "pretty", "-", "printer", "PRINTER", "with", "OBJ", "." ]
def register_pretty_printer(obj, printer): """Register pretty-printer PRINTER with OBJ. The printer is added to the front of the search list, thus one can override an existing printer if one needs to. Arguments: obj: Either an objfile, progspace, or None (in which case the printer is registered globally). printer: Either a function of one argument (old way) or any object which has attributes: name, enabled, __call__. Returns: Nothing. Raises: TypeError: A problem with the type of the printer. ValueError: The printer's name contains a semicolon ";". If the caller wants the printer to be listable and disableable, it must follow the PrettyPrinter API. This applies to the old way (functions) too. If printer is an object, __call__ is a method of two arguments: self, and the value to be pretty-printed. See PrettyPrinter. """ # Watch for both __name__ and name. # Functions get the former for free, but we don't want to use an # attribute named __foo__ for pretty-printers-as-objects. # If printer has both, we use `name'. if not hasattr(printer, "__name__") and not hasattr(printer, "name"): raise TypeError("printer missing attribute: name") if hasattr(printer, "name") and not hasattr(printer, "enabled"): raise TypeError("printer missing attribute: enabled") if not hasattr(printer, "__call__"): raise TypeError("printer missing attribute: __call__") if obj is None: if gdb.parameter("verbose"): gdb.write("Registering global %s pretty-printer ...\n" % name) obj = gdb else: if gdb.parameter("verbose"): gdb.write("Registering %s pretty-printer for %s ...\n" % (printer.name, obj.filename)) if hasattr(printer, "name"): if not isinstance(printer.name, basestring): raise TypeError("printer name is not a string") # If printer provides a name, make sure it doesn't contain ";". # Semicolon is used by the info/enable/disable pretty-printer commands # to delimit subprinters. if printer.name.find(";") >= 0: raise ValueError("semicolon ';' in printer name") # Also make sure the name is unique. # Alas, we can't do the same for functions and __name__, they could # all have a canonical name like "lookup_function". # PERF: gdb records printers in a list, making this inefficient. if (printer.name in [p.name for p in obj.pretty_printers if hasattr(p, "name")]): raise RuntimeError("pretty-printer already registered: %s" % printer.name) obj.pretty_printers.insert(0, printer)
[ "def", "register_pretty_printer", "(", "obj", ",", "printer", ")", ":", "# Watch for both __name__ and name.", "# Functions get the former for free, but we don't want to use an", "# attribute named __foo__ for pretty-printers-as-objects.", "# If printer has both, we use `name'.", "if", "not", "hasattr", "(", "printer", ",", "\"__name__\"", ")", "and", "not", "hasattr", "(", "printer", ",", "\"name\"", ")", ":", "raise", "TypeError", "(", "\"printer missing attribute: name\"", ")", "if", "hasattr", "(", "printer", ",", "\"name\"", ")", "and", "not", "hasattr", "(", "printer", ",", "\"enabled\"", ")", ":", "raise", "TypeError", "(", "\"printer missing attribute: enabled\"", ")", "if", "not", "hasattr", "(", "printer", ",", "\"__call__\"", ")", ":", "raise", "TypeError", "(", "\"printer missing attribute: __call__\"", ")", "if", "obj", "is", "None", ":", "if", "gdb", ".", "parameter", "(", "\"verbose\"", ")", ":", "gdb", ".", "write", "(", "\"Registering global %s pretty-printer ...\\n\"", "%", "name", ")", "obj", "=", "gdb", "else", ":", "if", "gdb", ".", "parameter", "(", "\"verbose\"", ")", ":", "gdb", ".", "write", "(", "\"Registering %s pretty-printer for %s ...\\n\"", "%", "(", "printer", ".", "name", ",", "obj", ".", "filename", ")", ")", "if", "hasattr", "(", "printer", ",", "\"name\"", ")", ":", "if", "not", "isinstance", "(", "printer", ".", "name", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"printer name is not a string\"", ")", "# If printer provides a name, make sure it doesn't contain \";\".", "# Semicolon is used by the info/enable/disable pretty-printer commands", "# to delimit subprinters.", "if", "printer", ".", "name", ".", "find", "(", "\";\"", ")", ">=", "0", ":", "raise", "ValueError", "(", "\"semicolon ';' in printer name\"", ")", "# Also make sure the name is unique.", "# Alas, we can't do the same for functions and __name__, they could", "# all have a canonical name like \"lookup_function\".", "# PERF: gdb records printers in a list, making this inefficient.", "if", "(", "printer", ".", "name", "in", "[", "p", ".", "name", "for", "p", "in", "obj", ".", "pretty_printers", "if", "hasattr", "(", "p", ",", "\"name\"", ")", "]", ")", ":", "raise", "RuntimeError", "(", "\"pretty-printer already registered: %s\"", "%", "printer", ".", "name", ")", "obj", ".", "pretty_printers", ".", "insert", "(", "0", ",", "printer", ")" ]
https://github.com/raspberrypi/tools/blob/13474ee775d0c5ec8a7da4fb0a9fa84187abfc87/arm-bcm2708/arm-bcm2708hardfp-linux-gnueabi/share/gdb/python/gdb/printing.py#L71-L133
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/dtypes/common.py
python
is_extension_type
(arr)
return False
Check whether an array-like is of a pandas extension class instance. .. deprecated:: 1.0.0 Use ``is_extension_array_dtype`` instead. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.arrays.SparseArray([1, 2, 3])) True >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True
Check whether an array-like is of a pandas extension class instance.
[ "Check", "whether", "an", "array", "-", "like", "is", "of", "a", "pandas", "extension", "class", "instance", "." ]
def is_extension_type(arr) -> bool: """ Check whether an array-like is of a pandas extension class instance. .. deprecated:: 1.0.0 Use ``is_extension_array_dtype`` instead. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.arrays.SparseArray([1, 2, 3])) True >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """ warnings.warn( "'is_extension_type' is deprecated and will be removed in a future " "version. Use 'is_extension_array_dtype' instead.", FutureWarning, stacklevel=2, ) if is_categorical(arr): return True elif is_sparse(arr): return True elif is_datetime64tz_dtype(arr): return True return False
[ "def", "is_extension_type", "(", "arr", ")", "->", "bool", ":", "warnings", ".", "warn", "(", "\"'is_extension_type' is deprecated and will be removed in a future \"", "\"version. Use 'is_extension_array_dtype' instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ",", ")", "if", "is_categorical", "(", "arr", ")", ":", "return", "True", "elif", "is_sparse", "(", "arr", ")", ":", "return", "True", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "return", "True", "return", "False" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/dtypes/common.py#L1500-L1562
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/richtext.py
python
RichTextParagraph.Copy
(*args, **kwargs)
return _richtext.RichTextParagraph_Copy(*args, **kwargs)
Copy(self, RichTextParagraph obj)
Copy(self, RichTextParagraph obj)
[ "Copy", "(", "self", "RichTextParagraph", "obj", ")" ]
def Copy(*args, **kwargs): """Copy(self, RichTextParagraph obj)""" return _richtext.RichTextParagraph_Copy(*args, **kwargs)
[ "def", "Copy", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextParagraph_Copy", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L1987-L1989
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pickletools.py
python
read_uint2
(f)
r""" >>> import io >>> read_uint2(io.BytesIO(b'\xff\x00')) 255 >>> read_uint2(io.BytesIO(b'\xff\xff')) 65535
r""" >>> import io >>> read_uint2(io.BytesIO(b'\xff\x00')) 255 >>> read_uint2(io.BytesIO(b'\xff\xff')) 65535
[ "r", ">>>", "import", "io", ">>>", "read_uint2", "(", "io", ".", "BytesIO", "(", "b", "\\", "xff", "\\", "x00", "))", "255", ">>>", "read_uint2", "(", "io", ".", "BytesIO", "(", "b", "\\", "xff", "\\", "xff", "))", "65535" ]
def read_uint2(f): r""" >>> import io >>> read_uint2(io.BytesIO(b'\xff\x00')) 255 >>> read_uint2(io.BytesIO(b'\xff\xff')) 65535 """ data = f.read(2) if len(data) == 2: return _unpack("<H", data)[0] raise ValueError("not enough data in stream to read uint2")
[ "def", "read_uint2", "(", "f", ")", ":", "data", "=", "f", ".", "read", "(", "2", ")", "if", "len", "(", "data", ")", "==", "2", ":", "return", "_unpack", "(", "\"<H\"", ",", "data", ")", "[", "0", "]", "raise", "ValueError", "(", "\"not enough data in stream to read uint2\"", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pickletools.py#L231-L243
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/ir_utils.py
python
remove_args
(blocks)
return
remove ir.Arg nodes
remove ir.Arg nodes
[ "remove", "ir", ".", "Arg", "nodes" ]
def remove_args(blocks): """remove ir.Arg nodes""" for block in blocks.values(): new_body = [] for stmt in block.body: if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Arg): continue new_body.append(stmt) block.body = new_body return
[ "def", "remove_args", "(", "blocks", ")", ":", "for", "block", "in", "blocks", ".", "values", "(", ")", ":", "new_body", "=", "[", "]", "for", "stmt", "in", "block", ".", "body", ":", "if", "isinstance", "(", "stmt", ",", "ir", ".", "Assign", ")", "and", "isinstance", "(", "stmt", ".", "value", ",", "ir", ".", "Arg", ")", ":", "continue", "new_body", ".", "append", "(", "stmt", ")", "block", ".", "body", "=", "new_body", "return" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/ir_utils.py#L499-L508
abforce/xposed_art_n
ec3fbe417d74d4664cec053d91dd4e3881176374
tools/cpplint.py
python
_IncludeState.CanonicalizeAlphabeticalOrder
(self, header_path)
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path.
Returns a path canonicalized for alphabetical comparison.
[ "Returns", "a", "path", "canonicalized", "for", "alphabetical", "comparison", "." ]
def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
[ "def", "CanonicalizeAlphabeticalOrder", "(", "self", ",", "header_path", ")", ":", "return", "header_path", ".", "replace", "(", "'-inl.h'", ",", "'.h'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ".", "lower", "(", ")" ]
https://github.com/abforce/xposed_art_n/blob/ec3fbe417d74d4664cec053d91dd4e3881176374/tools/cpplint.py#L471-L484
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/interpolate/fitpack2.py
python
UnivariateSpline.roots
(self)
Return the zeros of the spline. Restriction: only cubic splines are supported by fitpack.
Return the zeros of the spline.
[ "Return", "the", "zeros", "of", "the", "spline", "." ]
def roots(self): """ Return the zeros of the spline. Restriction: only cubic splines are supported by fitpack. """ k = self._data[5] if k == 3: z, m, ier = dfitpack.sproot(*self._eval_args[:2]) if not ier == 0: raise ValueError("Error code returned by spalde: %s" % ier) return z[:m] raise NotImplementedError('finding roots unsupported for ' 'non-cubic splines')
[ "def", "roots", "(", "self", ")", ":", "k", "=", "self", ".", "_data", "[", "5", "]", "if", "k", "==", "3", ":", "z", ",", "m", ",", "ier", "=", "dfitpack", ".", "sproot", "(", "*", "self", ".", "_eval_args", "[", ":", "2", "]", ")", "if", "not", "ier", "==", "0", ":", "raise", "ValueError", "(", "\"Error code returned by spalde: %s\"", "%", "ier", ")", "return", "z", "[", ":", "m", "]", "raise", "NotImplementedError", "(", "'finding roots unsupported for '", "'non-cubic splines'", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/interpolate/fitpack2.py#L405-L417
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/pyparsing.py
python
pyparsing_common.convertToDate
(fmt="%Y-%m-%d")
return cvt_fn
Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)]
Helper to create a parse action for converting parsed date string to Python datetime.date
[ "Helper", "to", "create", "a", "parse", "action", "for", "converting", "parsed", "date", "string", "to", "Python", "datetime", ".", "date" ]
def convertToDate(fmt="%Y-%m-%d"): """ Helper to create a parse action for converting parsed date string to Python datetime.date Params - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) Example:: date_expr = pyparsing_common.iso8601_date.copy() date_expr.setParseAction(pyparsing_common.convertToDate()) print(date_expr.parseString("1999-12-31")) prints:: [datetime.date(1999, 12, 31)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt).date() except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
[ "def", "convertToDate", "(", "fmt", "=", "\"%Y-%m-%d\"", ")", ":", "def", "cvt_fn", "(", "s", ",", "l", ",", "t", ")", ":", "try", ":", "return", "datetime", ".", "strptime", "(", "t", "[", "0", "]", ",", "fmt", ")", ".", "date", "(", ")", "except", "ValueError", "as", "ve", ":", "raise", "ParseException", "(", "s", ",", "l", ",", "str", "(", "ve", ")", ")", "return", "cvt_fn" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/_vendor/pyparsing.py#L5593-L5612
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/ultimatelistctrl.py
python
UltimateListMainWindow.EnableSelectionGradient
(self, enable=True)
Globally enables/disables drawing of gradient selections. :param `enable`: ``True`` to enable gradient-style selections, ``False`` to disable it. :note: Calling this method disables any Vista-style selection previously enabled.
Globally enables/disables drawing of gradient selections.
[ "Globally", "enables", "/", "disables", "drawing", "of", "gradient", "selections", "." ]
def EnableSelectionGradient(self, enable=True): """ Globally enables/disables drawing of gradient selections. :param `enable`: ``True`` to enable gradient-style selections, ``False`` to disable it. :note: Calling this method disables any Vista-style selection previously enabled. """ self._usegradients = enable self._vistaselection = False self.RefreshSelected()
[ "def", "EnableSelectionGradient", "(", "self", ",", "enable", "=", "True", ")", ":", "self", ".", "_usegradients", "=", "enable", "self", ".", "_vistaselection", "=", "False", "self", ".", "RefreshSelected", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/ultimatelistctrl.py#L10643-L10656
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/pybind/mgr/dashboard/services/access_control.py
python
ac_user_create_cmd
(_, username: str, inbuf: str, rolename: Optional[str] = None, name: Optional[str] = None, email: Optional[str] = None, enabled: bool = True, force_password: bool = False, pwd_expiration_date: Optional[int] = None, pwd_update_required: bool = False)
return 0, json.dumps(user.to_dict()), ''
Create a user. Password read from -i <file>
Create a user. Password read from -i <file>
[ "Create", "a", "user", ".", "Password", "read", "from", "-", "i", "<file", ">" ]
def ac_user_create_cmd(_, username: str, inbuf: str, rolename: Optional[str] = None, name: Optional[str] = None, email: Optional[str] = None, enabled: bool = True, force_password: bool = False, pwd_expiration_date: Optional[int] = None, pwd_update_required: bool = False): ''' Create a user. Password read from -i <file> ''' password = inbuf try: role = mgr.ACCESS_CTRL_DB.get_role(rolename) if rolename else None except RoleDoesNotExist as ex: if rolename not in SYSTEM_ROLES: return -errno.ENOENT, '', str(ex) role = SYSTEM_ROLES[rolename] try: if not force_password: pw_check = PasswordPolicy(password, username) pw_check.check_all() user = mgr.ACCESS_CTRL_DB.create_user(username, password, name, email, enabled, pwd_expiration_date, pwd_update_required) except PasswordPolicyException as ex: return -errno.EINVAL, '', str(ex) except UserAlreadyExists as ex: return 0, str(ex), '' if role: user.set_roles([role]) mgr.ACCESS_CTRL_DB.save() return 0, json.dumps(user.to_dict()), ''
[ "def", "ac_user_create_cmd", "(", "_", ",", "username", ":", "str", ",", "inbuf", ":", "str", ",", "rolename", ":", "Optional", "[", "str", "]", "=", "None", ",", "name", ":", "Optional", "[", "str", "]", "=", "None", ",", "email", ":", "Optional", "[", "str", "]", "=", "None", ",", "enabled", ":", "bool", "=", "True", ",", "force_password", ":", "bool", "=", "False", ",", "pwd_expiration_date", ":", "Optional", "[", "int", "]", "=", "None", ",", "pwd_update_required", ":", "bool", "=", "False", ")", ":", "password", "=", "inbuf", "try", ":", "role", "=", "mgr", ".", "ACCESS_CTRL_DB", ".", "get_role", "(", "rolename", ")", "if", "rolename", "else", "None", "except", "RoleDoesNotExist", "as", "ex", ":", "if", "rolename", "not", "in", "SYSTEM_ROLES", ":", "return", "-", "errno", ".", "ENOENT", ",", "''", ",", "str", "(", "ex", ")", "role", "=", "SYSTEM_ROLES", "[", "rolename", "]", "try", ":", "if", "not", "force_password", ":", "pw_check", "=", "PasswordPolicy", "(", "password", ",", "username", ")", "pw_check", ".", "check_all", "(", ")", "user", "=", "mgr", ".", "ACCESS_CTRL_DB", ".", "create_user", "(", "username", ",", "password", ",", "name", ",", "email", ",", "enabled", ",", "pwd_expiration_date", ",", "pwd_update_required", ")", "except", "PasswordPolicyException", "as", "ex", ":", "return", "-", "errno", ".", "EINVAL", ",", "''", ",", "str", "(", "ex", ")", "except", "UserAlreadyExists", "as", "ex", ":", "return", "0", ",", "str", "(", "ex", ")", ",", "''", "if", "role", ":", "user", ".", "set_roles", "(", "[", "role", "]", ")", "mgr", ".", "ACCESS_CTRL_DB", ".", "save", "(", ")", "return", "0", ",", "json", ".", "dumps", "(", "user", ".", "to_dict", "(", ")", ")", ",", "''" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/dashboard/services/access_control.py#L718-L752
tensor-compiler/taco
d0654a84137169883973c40a951dfdb89883fd9c
python_bindings/pytaco/pytensor/taco_tensor.py
python
tensor_log10
(t1, out_format, dtype=None)
return _compute_unary_elt_eise_op(f, t1, out_format, dtype)
Takes the log base 10 of each input in the tensor. Note that this is applied to all elements in the tensor not just non-zeros. Warnings --------- The log10 of 0 is undefined and is performed on every element in the tensor regardless of sparsity. Parameters ------------ t1: tensor, array_like input tensor or array_like object out_format: format, mode_format, optional * If a :class:`format` is specified, the result tensor is stored in the format out_format. * If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions stored in the :class:`mode_format` passed in. dtype: Datatype The datatype of the output tensor. Examples ---------- >>> import pytaco as pt >>> pt.tensor_log10([10, 100], out_format=pt.compressed, dtype=pt.float32).to_array() array([1., 2.], dtype=float32) Returns -------- log10: tensor The element wise log10 of the input tensor.
Takes the log base 10 of each input in the tensor.
[ "Takes", "the", "log", "base", "10", "of", "each", "input", "in", "the", "tensor", "." ]
def tensor_log10(t1, out_format, dtype=None): """ Takes the log base 10 of each input in the tensor. Note that this is applied to all elements in the tensor not just non-zeros. Warnings --------- The log10 of 0 is undefined and is performed on every element in the tensor regardless of sparsity. Parameters ------------ t1: tensor, array_like input tensor or array_like object out_format: format, mode_format, optional * If a :class:`format` is specified, the result tensor is stored in the format out_format. * If a :class:`mode_format` is specified, the result the result tensor has a with all of the dimensions stored in the :class:`mode_format` passed in. dtype: Datatype The datatype of the output tensor. Examples ---------- >>> import pytaco as pt >>> pt.tensor_log10([10, 100], out_format=pt.compressed, dtype=pt.float32).to_array() array([1., 2.], dtype=float32) Returns -------- log10: tensor The element wise log10 of the input tensor. """ t1 = as_tensor(t1, copy=False) cast_val = _cm.max_type(_cm.float32, t1.dtype) f = lambda x: _cm.log10(_cm.cast(x, cast_val)) return _compute_unary_elt_eise_op(f, t1, out_format, dtype)
[ "def", "tensor_log10", "(", "t1", ",", "out_format", ",", "dtype", "=", "None", ")", ":", "t1", "=", "as_tensor", "(", "t1", ",", "copy", "=", "False", ")", "cast_val", "=", "_cm", ".", "max_type", "(", "_cm", ".", "float32", ",", "t1", ".", "dtype", ")", "f", "=", "lambda", "x", ":", "_cm", ".", "log10", "(", "_cm", ".", "cast", "(", "x", ",", "cast_val", ")", ")", "return", "_compute_unary_elt_eise_op", "(", "f", ",", "t1", ",", "out_format", ",", "dtype", ")" ]
https://github.com/tensor-compiler/taco/blob/d0654a84137169883973c40a951dfdb89883fd9c/python_bindings/pytaco/pytensor/taco_tensor.py#L1793-L1832
jackaudio/jack2
21b293dbc37d42446141a08922cdec0d2550c6a0
waflib/ConfigSet.py
python
ConfigSet.prepend_value
(self, var, val)
Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple
Prepends a value to the specified item::
[ "Prepends", "a", "value", "to", "the", "specified", "item", "::" ]
def prepend_value(self, var, val): """ Prepends a value to the specified item:: def configure(conf): conf.env.prepend_value('CFLAGS', ['-O2']) The value must be a list or a tuple """ if isinstance(val, str): val = [val] self.table[var] = val + self._get_list_value_for_modification(var)
[ "def", "prepend_value", "(", "self", ",", "var", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "str", ")", ":", "val", "=", "[", "val", "]", "self", ".", "table", "[", "var", "]", "=", "val", "+", "self", ".", "_get_list_value_for_modification", "(", "var", ")" ]
https://github.com/jackaudio/jack2/blob/21b293dbc37d42446141a08922cdec0d2550c6a0/waflib/ConfigSet.py#L231-L242
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/distributions/python/ops/beta.py
python
Beta.variance
(self, name="variance")
Variance of the distribution.
Variance of the distribution.
[ "Variance", "of", "the", "distribution", "." ]
def variance(self, name="variance"): """Variance of the distribution.""" with ops.name_scope(self.name): with ops.op_scope([self._a, self._b, self._a_b_sum], name): return (self._a * self._b) / ( self._a_b_sum **2 * (self._a_b_sum + 1))
[ "def", "variance", "(", "self", ",", "name", "=", "\"variance\"", ")", ":", "with", "ops", ".", "name_scope", "(", "self", ".", "name", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "self", ".", "_a", ",", "self", ".", "_b", ",", "self", ".", "_a_b_sum", "]", ",", "name", ")", ":", "return", "(", "self", ".", "_a", "*", "self", ".", "_b", ")", "/", "(", "self", ".", "_a_b_sum", "**", "2", "*", "(", "self", ".", "_a_b_sum", "+", "1", ")", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/distributions/python/ops/beta.py#L238-L243
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
contrib/gizmos/gtk/gizmos.py
python
PreEditableListBox
(*args, **kwargs)
return val
PreEditableListBox() -> EditableListBox
PreEditableListBox() -> EditableListBox
[ "PreEditableListBox", "()", "-", ">", "EditableListBox" ]
def PreEditableListBox(*args, **kwargs): """PreEditableListBox() -> EditableListBox""" val = _gizmos.new_PreEditableListBox(*args, **kwargs) return val
[ "def", "PreEditableListBox", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "val", "=", "_gizmos", ".", "new_PreEditableListBox", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "val" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/gtk/gizmos.py#L196-L199
intel-iot-devkit/how-to-code-samples
b4ea616f36bbfa2e042beb1698f968cfd651d79f
robot-arm/python/iot_robot_arm/hardware/grove.py
python
GroveBoard.update_hardware_state
(self)
Update hardware state.
Update hardware state.
[ "Update", "hardware", "state", "." ]
def update_hardware_state(self): """ Update hardware state. """ joystick_reading = self.read_joystick() self.trigger_hardware_event(JOYSTICK_READING, joystick_reading)
[ "def", "update_hardware_state", "(", "self", ")", ":", "joystick_reading", "=", "self", ".", "read_joystick", "(", ")", "self", ".", "trigger_hardware_event", "(", "JOYSTICK_READING", ",", "joystick_reading", ")" ]
https://github.com/intel-iot-devkit/how-to-code-samples/blob/b4ea616f36bbfa2e042beb1698f968cfd651d79f/robot-arm/python/iot_robot_arm/hardware/grove.py#L92-L99
crosswalk-project/crosswalk
1b9b80835e83e77390bd6cdbc03beb63f2a6f550
build/android/merge_jars.py
python
IsMergeableJar
(jar_path)
return True
Returns True if a certain JAR does not have any classes outside the allowed namespaces.
Returns True if a certain JAR does not have any classes outside the allowed namespaces.
[ "Returns", "True", "if", "a", "certain", "JAR", "does", "not", "have", "any", "classes", "outside", "the", "allowed", "namespaces", "." ]
def IsMergeableJar(jar_path): """ Returns True if a certain JAR does not have any classes outside the allowed namespaces. """ with zipfile.ZipFile(jar_path) as zip_file: for entry_name in zip_file.namelist(): if entry_name.endswith('/'): # Directories are irrelevant. continue if any(fnmatch.fnmatchcase(entry_name, f) for f in JAR_ENTRY_WHITELIST): continue return False return True
[ "def", "IsMergeableJar", "(", "jar_path", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "jar_path", ")", "as", "zip_file", ":", "for", "entry_name", "in", "zip_file", ".", "namelist", "(", ")", ":", "if", "entry_name", ".", "endswith", "(", "'/'", ")", ":", "# Directories are irrelevant.", "continue", "if", "any", "(", "fnmatch", ".", "fnmatchcase", "(", "entry_name", ",", "f", ")", "for", "f", "in", "JAR_ENTRY_WHITELIST", ")", ":", "continue", "return", "False", "return", "True" ]
https://github.com/crosswalk-project/crosswalk/blob/1b9b80835e83e77390bd6cdbc03beb63f2a6f550/build/android/merge_jars.py#L70-L82
gromacs/gromacs
7dec3a3f99993cf5687a122de3e12de31c21c399
admin/copyright.py
python
select_comment_handler
(override, filename)
Select comment handler for a file based on file name and input options.
Select comment handler for a file based on file name and input options.
[ "Select", "comment", "handler", "for", "a", "file", "based", "on", "file", "name", "and", "input", "options", "." ]
def select_comment_handler(override, filename): """Select comment handler for a file based on file name and input options.""" filetype = override if not filetype and filename != '-': basename = os.path.basename(filename) root, ext = os.path.splitext(basename) if ext == '.cmakein': dummy, ext2 = os.path.splitext(root) if ext2: ext = ext2 if ext in ('.c', '.cu', '.cpp', '.cl', '.h', '.cuh', '.clh', '.y', '.l', '.pre', '.bm'): filetype = 'c' elif ext in ('.tex',): filetype = 'tex' elif basename in ('CMakeLists.txt', 'GMXRC', 'git-pre-commit') or \ ext in ('.cmake', '.cmakein', '.py', '.sh', '.bash', '.csh', '.zsh'): filetype = 'sh' if filetype in comment_handlers: return comment_handlers[filetype] if filetype: sys.stderr.write("Unsupported input format: {0}\n".format(filetype)) elif filename != '-': sys.stderr.write("Unsupported input format: {0}\n".format(filename)) else: sys.stderr.write("No file name or file type provided.\n") sys.exit(1)
[ "def", "select_comment_handler", "(", "override", ",", "filename", ")", ":", "filetype", "=", "override", "if", "not", "filetype", "and", "filename", "!=", "'-'", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "if", "ext", "==", "'.cmakein'", ":", "dummy", ",", "ext2", "=", "os", ".", "path", ".", "splitext", "(", "root", ")", "if", "ext2", ":", "ext", "=", "ext2", "if", "ext", "in", "(", "'.c'", ",", "'.cu'", ",", "'.cpp'", ",", "'.cl'", ",", "'.h'", ",", "'.cuh'", ",", "'.clh'", ",", "'.y'", ",", "'.l'", ",", "'.pre'", ",", "'.bm'", ")", ":", "filetype", "=", "'c'", "elif", "ext", "in", "(", "'.tex'", ",", ")", ":", "filetype", "=", "'tex'", "elif", "basename", "in", "(", "'CMakeLists.txt'", ",", "'GMXRC'", ",", "'git-pre-commit'", ")", "or", "ext", "in", "(", "'.cmake'", ",", "'.cmakein'", ",", "'.py'", ",", "'.sh'", ",", "'.bash'", ",", "'.csh'", ",", "'.zsh'", ")", ":", "filetype", "=", "'sh'", "if", "filetype", "in", "comment_handlers", ":", "return", "comment_handlers", "[", "filetype", "]", "if", "filetype", ":", "sys", ".", "stderr", ".", "write", "(", "\"Unsupported input format: {0}\\n\"", ".", "format", "(", "filetype", ")", ")", "elif", "filename", "!=", "'-'", ":", "sys", ".", "stderr", ".", "write", "(", "\"Unsupported input format: {0}\\n\"", ".", "format", "(", "filename", ")", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"No file name or file type provided.\\n\"", ")", "sys", ".", "exit", "(", "1", ")" ]
https://github.com/gromacs/gromacs/blob/7dec3a3f99993cf5687a122de3e12de31c21c399/admin/copyright.py#L297-L322
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/types/npytypes.py
python
Record.members
(self)
return [(k, v.type) for k, v in ordered]
An ordered list of (name, type) for the fields.
An ordered list of (name, type) for the fields.
[ "An", "ordered", "list", "of", "(", "name", "type", ")", "for", "the", "fields", "." ]
def members(self): """An ordered list of (name, type) for the fields. """ ordered = sorted(self.fields.items(), key=lambda x: x[1].offset) return [(k, v.type) for k, v in ordered]
[ "def", "members", "(", "self", ")", ":", "ordered", "=", "sorted", "(", "self", ".", "fields", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ".", "offset", ")", "return", "[", "(", "k", ",", "v", ".", "type", ")", "for", "k", ",", "v", "in", "ordered", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numba/types/npytypes.py#L196-L200
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_misc.py
python
TipProvider.GetTip
(*args, **kwargs)
return _misc_.TipProvider_GetTip(*args, **kwargs)
GetTip(self) -> String
GetTip(self) -> String
[ "GetTip", "(", "self", ")", "-", ">", "String" ]
def GetTip(*args, **kwargs): """GetTip(self) -> String""" return _misc_.TipProvider_GetTip(*args, **kwargs)
[ "def", "GetTip", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "TipProvider_GetTip", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_misc.py#L1255-L1257
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/stringold.py
python
strip
(s)
return s.strip()
strip(s) -> string Return a copy of the string s with leading and trailing whitespace removed.
strip(s) -> string
[ "strip", "(", "s", ")", "-", ">", "string" ]
def strip(s): """strip(s) -> string Return a copy of the string s with leading and trailing whitespace removed. """ return s.strip()
[ "def", "strip", "(", "s", ")", ":", "return", "s", ".", "strip", "(", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/stringold.py#L74-L81
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py2/scipy/signal/filter_design.py
python
ellipap
(N, rp, rs)
return z, p, k
Return (z,p,k) of Nth-order elliptic analog lowpass filter. The filter is a normalized prototype that has `rp` decibels of ripple in the passband and a stopband `rs` decibels down. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- ellip : Filter design function using this prototype References ---------- .. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5 and 12.
Return (z,p,k) of Nth-order elliptic analog lowpass filter.
[ "Return", "(", "z", "p", "k", ")", "of", "Nth", "-", "order", "elliptic", "analog", "lowpass", "filter", "." ]
def ellipap(N, rp, rs): """Return (z,p,k) of Nth-order elliptic analog lowpass filter. The filter is a normalized prototype that has `rp` decibels of ripple in the passband and a stopband `rs` decibels down. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- ellip : Filter design function using this prototype References ---------- .. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5 and 12. """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero warning # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) elif N == 1: p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0)) k = -p z = [] return asarray(z), asarray(p), k eps = numpy.sqrt(10 ** (0.1 * rp) - 1) ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1) ck1p = numpy.sqrt(1 - ck1 * ck1) if ck1p == 1: raise ValueError("Cannot design a filter with given rp and rs" " specifications.") val = special.ellipk([ck1 * ck1, ck1p * ck1p]) if abs(1 - ck1p * ck1p) < EPSILON: krat = 0 else: krat = N * val[0] / val[1] m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250, disp=0) if m < 0 or m > 1: m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250, disp=0) capk = special.ellipk(m) j = numpy.arange(1 - N % 2, N, 2) jj = len(j) [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) z = 1.0 / (sqrt(m) * snew) z = 1j * z z = numpy.concatenate((z, conjugate(z))) r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p), maxfun=250, maxiter=250, disp=0) v0 = capk * r / (N * val[0]) [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) if N % 2: newp = numpy.compress(abs(p.imag) > EPSILON * numpy.sqrt(numpy.sum(p * numpy.conjugate(p), axis=0).real), p, axis=-1) p = numpy.concatenate((p, conjugate(newp))) else: p = numpy.concatenate((p, conjugate(p))) k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real if N % 2 == 0: k = k / numpy.sqrt((1 + eps * eps)) return z, p, k
[ "def", "ellipap", "(", "N", ",", "rp", ",", "rs", ")", ":", "if", "abs", "(", "int", "(", "N", ")", ")", "!=", "N", ":", "raise", "ValueError", "(", "\"Filter order must be a nonnegative integer\"", ")", "elif", "N", "==", "0", ":", "# Avoid divide-by-zero warning", "# Even order filters have DC gain of -rp dB", "return", "numpy", ".", "array", "(", "[", "]", ")", ",", "numpy", ".", "array", "(", "[", "]", ")", ",", "10", "**", "(", "-", "rp", "/", "20", ")", "elif", "N", "==", "1", ":", "p", "=", "-", "sqrt", "(", "1.0", "/", "(", "10", "**", "(", "0.1", "*", "rp", ")", "-", "1.0", ")", ")", "k", "=", "-", "p", "z", "=", "[", "]", "return", "asarray", "(", "z", ")", ",", "asarray", "(", "p", ")", ",", "k", "eps", "=", "numpy", ".", "sqrt", "(", "10", "**", "(", "0.1", "*", "rp", ")", "-", "1", ")", "ck1", "=", "eps", "/", "numpy", ".", "sqrt", "(", "10", "**", "(", "0.1", "*", "rs", ")", "-", "1", ")", "ck1p", "=", "numpy", ".", "sqrt", "(", "1", "-", "ck1", "*", "ck1", ")", "if", "ck1p", "==", "1", ":", "raise", "ValueError", "(", "\"Cannot design a filter with given rp and rs\"", "\" specifications.\"", ")", "val", "=", "special", ".", "ellipk", "(", "[", "ck1", "*", "ck1", ",", "ck1p", "*", "ck1p", "]", ")", "if", "abs", "(", "1", "-", "ck1p", "*", "ck1p", ")", "<", "EPSILON", ":", "krat", "=", "0", "else", ":", "krat", "=", "N", "*", "val", "[", "0", "]", "/", "val", "[", "1", "]", "m", "=", "optimize", ".", "fmin", "(", "_kratio", ",", "[", "0.5", "]", ",", "args", "=", "(", "krat", ",", ")", ",", "maxfun", "=", "250", ",", "maxiter", "=", "250", ",", "disp", "=", "0", ")", "if", "m", "<", "0", "or", "m", ">", "1", ":", "m", "=", "optimize", ".", "fminbound", "(", "_kratio", ",", "0", ",", "1", ",", "args", "=", "(", "krat", ",", ")", ",", "maxfun", "=", "250", ",", "disp", "=", "0", ")", "capk", "=", "special", ".", "ellipk", "(", "m", ")", "j", "=", "numpy", ".", "arange", "(", "1", "-", "N", "%", "2", ",", "N", ",", "2", ")", "jj", "=", "len", "(", "j", ")", "[", "s", ",", "c", ",", "d", ",", "phi", "]", "=", "special", ".", "ellipj", "(", "j", "*", "capk", "/", "N", ",", "m", "*", "numpy", ".", "ones", "(", "jj", ")", ")", "snew", "=", "numpy", ".", "compress", "(", "abs", "(", "s", ")", ">", "EPSILON", ",", "s", ",", "axis", "=", "-", "1", ")", "z", "=", "1.0", "/", "(", "sqrt", "(", "m", ")", "*", "snew", ")", "z", "=", "1j", "*", "z", "z", "=", "numpy", ".", "concatenate", "(", "(", "z", ",", "conjugate", "(", "z", ")", ")", ")", "r", "=", "optimize", ".", "fmin", "(", "_vratio", ",", "special", ".", "ellipk", "(", "m", ")", ",", "args", "=", "(", "1.", "/", "eps", ",", "ck1p", "*", "ck1p", ")", ",", "maxfun", "=", "250", ",", "maxiter", "=", "250", ",", "disp", "=", "0", ")", "v0", "=", "capk", "*", "r", "/", "(", "N", "*", "val", "[", "0", "]", ")", "[", "sv", ",", "cv", ",", "dv", ",", "phi", "]", "=", "special", ".", "ellipj", "(", "v0", ",", "1", "-", "m", ")", "p", "=", "-", "(", "c", "*", "d", "*", "sv", "*", "cv", "+", "1j", "*", "s", "*", "dv", ")", "/", "(", "1", "-", "(", "d", "*", "sv", ")", "**", "2.0", ")", "if", "N", "%", "2", ":", "newp", "=", "numpy", ".", "compress", "(", "abs", "(", "p", ".", "imag", ")", ">", "EPSILON", "*", "numpy", ".", "sqrt", "(", "numpy", ".", "sum", "(", "p", "*", "numpy", ".", "conjugate", "(", "p", ")", ",", "axis", "=", "0", ")", ".", "real", ")", ",", "p", ",", "axis", "=", "-", "1", ")", "p", "=", "numpy", ".", "concatenate", "(", "(", "p", ",", "conjugate", "(", "newp", ")", ")", ")", "else", ":", "p", "=", "numpy", ".", "concatenate", "(", "(", "p", ",", "conjugate", "(", "p", ")", ")", ")", "k", "=", "(", "numpy", ".", "prod", "(", "-", "p", ",", "axis", "=", "0", ")", "/", "numpy", ".", "prod", "(", "-", "z", ",", "axis", "=", "0", ")", ")", ".", "real", "if", "N", "%", "2", "==", "0", ":", "k", "=", "k", "/", "numpy", ".", "sqrt", "(", "(", "1", "+", "eps", "*", "eps", ")", ")", "return", "z", ",", "p", ",", "k" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py2/scipy/signal/filter_design.py#L3903-L3984
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/agw/aui/auibar.py
python
AuiToolBar.DoSetSize
(self, x, y, width, height, sizeFlags=wx.SIZE_AUTO)
Sets the position and size of the window in pixels. The `sizeFlags` parameter indicates the interpretation of the other params if they are equal to -1. :param integer `x`: the window `x` position; :param integer `y`: the window `y` position; :param integer `width`: the window width; :param integer `height`: the window height; :param integer `sizeFlags`: may have one of this bit set: =================================== ====================================== Size Flags Description =================================== ====================================== ``wx.SIZE_AUTO`` A -1 indicates that a class-specific default should be used. ``wx.SIZE_AUTO_WIDTH`` A -1 indicates that a class-specific default should be used for the width. ``wx.SIZE_AUTO_HEIGHT`` A -1 indicates that a class-specific default should be used for the height. ``wx.SIZE_USE_EXISTING`` Existing dimensions should be used if -1 values are supplied. ``wx.SIZE_ALLOW_MINUS_ONE`` Allow dimensions of -1 and less to be interpreted as real dimensions, not default values. ``wx.SIZE_FORCE`` Normally, if the position and the size of the window are already the same as the parameters of this function, nothing is done. but with this flag a window resize may be forced even in this case (supported in wx 2.6.2 and later and only implemented for MSW and ignored elsewhere currently) =================================== ====================================== :note: Overridden from :class:`PyControl`.
Sets the position and size of the window in pixels. The `sizeFlags` parameter indicates the interpretation of the other params if they are equal to -1.
[ "Sets", "the", "position", "and", "size", "of", "the", "window", "in", "pixels", ".", "The", "sizeFlags", "parameter", "indicates", "the", "interpretation", "of", "the", "other", "params", "if", "they", "are", "equal", "to", "-", "1", "." ]
def DoSetSize(self, x, y, width, height, sizeFlags=wx.SIZE_AUTO): """ Sets the position and size of the window in pixels. The `sizeFlags` parameter indicates the interpretation of the other params if they are equal to -1. :param integer `x`: the window `x` position; :param integer `y`: the window `y` position; :param integer `width`: the window width; :param integer `height`: the window height; :param integer `sizeFlags`: may have one of this bit set: =================================== ====================================== Size Flags Description =================================== ====================================== ``wx.SIZE_AUTO`` A -1 indicates that a class-specific default should be used. ``wx.SIZE_AUTO_WIDTH`` A -1 indicates that a class-specific default should be used for the width. ``wx.SIZE_AUTO_HEIGHT`` A -1 indicates that a class-specific default should be used for the height. ``wx.SIZE_USE_EXISTING`` Existing dimensions should be used if -1 values are supplied. ``wx.SIZE_ALLOW_MINUS_ONE`` Allow dimensions of -1 and less to be interpreted as real dimensions, not default values. ``wx.SIZE_FORCE`` Normally, if the position and the size of the window are already the same as the parameters of this function, nothing is done. but with this flag a window resize may be forced even in this case (supported in wx 2.6.2 and later and only implemented for MSW and ignored elsewhere currently) =================================== ====================================== :note: Overridden from :class:`PyControl`. """ parent_size = self.GetParent().GetClientSize() if x + width > parent_size.x: width = max(0, parent_size.x - x) if y + height > parent_size.y: height = max(0, parent_size.y - y) wx.PyControl.DoSetSize(self, x, y, width, height, sizeFlags)
[ "def", "DoSetSize", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "sizeFlags", "=", "wx", ".", "SIZE_AUTO", ")", ":", "parent_size", "=", "self", ".", "GetParent", "(", ")", ".", "GetClientSize", "(", ")", "if", "x", "+", "width", ">", "parent_size", ".", "x", ":", "width", "=", "max", "(", "0", ",", "parent_size", ".", "x", "-", "x", ")", "if", "y", "+", "height", ">", "parent_size", ".", "y", ":", "height", "=", "max", "(", "0", ",", "parent_size", ".", "y", "-", "y", ")", "wx", ".", "PyControl", ".", "DoSetSize", "(", "self", ",", "x", ",", "y", ",", "width", ",", "height", ",", "sizeFlags", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/aui/auibar.py#L3352-L3387
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/x_any.py
python
XAny.to_str
(self)
return pprint.pformat(self.to_dict())
Returns the string representation of the model
Returns the string representation of the model
[ "Returns", "the", "string", "representation", "of", "the", "model" ]
def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict())
[ "def", "to_str", "(", "self", ")", ":", "return", "pprint", ".", "pformat", "(", "self", ".", "to_dict", "(", ")", ")" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/x_any.py#L70-L72
NREL/EnergyPlus
fadc5973b85c70e8cc923efb69c144e808a26078
cmake/ReverseDDPostProcess.py
python
main
()
return 1
Handles the main entry point into this script
Handles the main entry point into this script
[ "Handles", "the", "main", "entry", "point", "into", "this", "script" ]
def main(): """Handles the main entry point into this script""" # validate and get command line arguments reverse_dd_test_dir = process_command_arguments() # configure up all the paths base_dir, reversed_dir = configure_root_dirs(reverse_dd_test_dir) # configure paths for both csv and mtr outputs base_csv, reversed_csv = configure_paths(base_dir, reversed_dir, 'eplusout') base_mtr, rev_mtr = configure_paths(base_dir, reversed_dir, 'eplusmtr') if both_csv_files_missing(base_csv, reversed_csv): return 0 # assume everything is fine if the CSV is missing in *both* builds # do comparison of the outputs csv_match = files_match(base_csv, reversed_csv) mtr_match = files_match(base_mtr, rev_mtr) if os.path.exists(rev_mtr) else True # report the results of the comparisons if csv_match and mtr_match: return 0 return 1
[ "def", "main", "(", ")", ":", "# validate and get command line arguments", "reverse_dd_test_dir", "=", "process_command_arguments", "(", ")", "# configure up all the paths", "base_dir", ",", "reversed_dir", "=", "configure_root_dirs", "(", "reverse_dd_test_dir", ")", "# configure paths for both csv and mtr outputs", "base_csv", ",", "reversed_csv", "=", "configure_paths", "(", "base_dir", ",", "reversed_dir", ",", "'eplusout'", ")", "base_mtr", ",", "rev_mtr", "=", "configure_paths", "(", "base_dir", ",", "reversed_dir", ",", "'eplusmtr'", ")", "if", "both_csv_files_missing", "(", "base_csv", ",", "reversed_csv", ")", ":", "return", "0", "# assume everything is fine if the CSV is missing in *both* builds", "# do comparison of the outputs", "csv_match", "=", "files_match", "(", "base_csv", ",", "reversed_csv", ")", "mtr_match", "=", "files_match", "(", "base_mtr", ",", "rev_mtr", ")", "if", "os", ".", "path", ".", "exists", "(", "rev_mtr", ")", "else", "True", "# report the results of the comparisons", "if", "csv_match", "and", "mtr_match", ":", "return", "0", "return", "1" ]
https://github.com/NREL/EnergyPlus/blob/fadc5973b85c70e8cc923efb69c144e808a26078/cmake/ReverseDDPostProcess.py#L146-L169
choasup/caffe-yolo9000
e8a476c4c23d756632f7a26c681a96e3ab672544
scripts/cpp_lint.py
python
CheckAccess
(filename, clean_lines, linenum, nesting_state, error)
Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found.
Checks for improper use of DISALLOW* macros.
[ "Checks", "for", "improper", "use", "of", "DISALLOW", "*", "macros", "." ]
def CheckAccess(filename, clean_lines, linenum, nesting_state, error): """Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A _NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_EVIL_CONSTRUCTORS|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass
[ "def", "CheckAccess", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "nesting_state", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# get rid of comments and strings", "matched", "=", "Match", "(", "(", "r'\\s*(DISALLOW_COPY_AND_ASSIGN|'", "r'DISALLOW_EVIL_CONSTRUCTORS|'", "r'DISALLOW_IMPLICIT_CONSTRUCTORS)'", ")", ",", "line", ")", "if", "not", "matched", ":", "return", "if", "nesting_state", ".", "stack", "and", "isinstance", "(", "nesting_state", ".", "stack", "[", "-", "1", "]", ",", "_ClassInfo", ")", ":", "if", "nesting_state", ".", "stack", "[", "-", "1", "]", ".", "access", "!=", "'private'", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/constructors'", ",", "3", ",", "'%s must be in the private: section'", "%", "matched", ".", "group", "(", "1", ")", ")", "else", ":", "# Found DISALLOW* macro outside a class declaration, or perhaps it", "# was used inside a function when it should have been part of the", "# class declaration. We could issue a warning here, but it", "# probably resulted in a compiler error already.", "pass" ]
https://github.com/choasup/caffe-yolo9000/blob/e8a476c4c23d756632f7a26c681a96e3ab672544/scripts/cpp_lint.py#L2490-L2518
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/external/coremltools_wrap/coremltools/coremltools/models/model.py
python
MLModel.__init__
(self, model, useCPUOnly=False)
Construct an MLModel from a .mlmodel Parameters ---------- model: str or Model_pb2 If a string is given it should be the location of the .mlmodel to load. useCPUOnly: bool Set to true to restrict loading of model on CPU Only. Defaults to False. Examples -------- >>> loaded_model = MLModel('my_model_file.mlmodel')
Construct an MLModel from a .mlmodel
[ "Construct", "an", "MLModel", "from", "a", ".", "mlmodel" ]
def __init__(self, model, useCPUOnly=False): """ Construct an MLModel from a .mlmodel Parameters ---------- model: str or Model_pb2 If a string is given it should be the location of the .mlmodel to load. useCPUOnly: bool Set to true to restrict loading of model on CPU Only. Defaults to False. Examples -------- >>> loaded_model = MLModel('my_model_file.mlmodel') """ if isinstance(model, _string_types): self.__proxy__, self._spec, self._framework_error = _get_proxy_and_spec( model, useCPUOnly ) elif isinstance(model, _Model_pb2.Model): filename = _tempfile.mktemp(suffix=".mlmodel") _save_spec(model, filename) self.__proxy__, self._spec, self._framework_error = _get_proxy_and_spec( filename, useCPUOnly ) try: _os.remove(filename) except OSError: pass else: raise TypeError( "Expected model to be a .mlmodel file or a Model_pb2 object" ) self._input_description = _FeatureDescription(self._spec.description.input) self._output_description = _FeatureDescription(self._spec.description.output)
[ "def", "__init__", "(", "self", ",", "model", ",", "useCPUOnly", "=", "False", ")", ":", "if", "isinstance", "(", "model", ",", "_string_types", ")", ":", "self", ".", "__proxy__", ",", "self", ".", "_spec", ",", "self", ".", "_framework_error", "=", "_get_proxy_and_spec", "(", "model", ",", "useCPUOnly", ")", "elif", "isinstance", "(", "model", ",", "_Model_pb2", ".", "Model", ")", ":", "filename", "=", "_tempfile", ".", "mktemp", "(", "suffix", "=", "\".mlmodel\"", ")", "_save_spec", "(", "model", ",", "filename", ")", "self", ".", "__proxy__", ",", "self", ".", "_spec", ",", "self", ".", "_framework_error", "=", "_get_proxy_and_spec", "(", "filename", ",", "useCPUOnly", ")", "try", ":", "_os", ".", "remove", "(", "filename", ")", "except", "OSError", ":", "pass", "else", ":", "raise", "TypeError", "(", "\"Expected model to be a .mlmodel file or a Model_pb2 object\"", ")", "self", ".", "_input_description", "=", "_FeatureDescription", "(", "self", ".", "_spec", ".", "description", ".", "input", ")", "self", ".", "_output_description", "=", "_FeatureDescription", "(", "self", ".", "_spec", ".", "description", ".", "output", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/models/model.py#L177-L214
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/tracking/base.py
python
Trackable._gather_saveables_for_checkpoint
(self)
return {}
Returns a dictionary of values to checkpoint with this object. Keys in the returned dictionary are local to this object and in a separate namespace from dependencies. Values may either be `SaveableObject` factories or variables easily converted to `SaveableObject`s (as in `tf.compat.v1.train.Saver`'s `var_list` constructor argument). `SaveableObjects` have a name set, which Trackable needs to generate itself. So rather than returning `SaveableObjects` directly, this method should return a dictionary of callables which take `name` arguments and return `SaveableObjects` with that name. If this object may also be passed to the global-name-based `tf.compat.v1.train.Saver`, the returned callables should have a default value for their name argument (i.e. be callable with no arguments). Returned values must be saved only by this object; if any value may be shared, it should instead be a dependency. For example, variable objects save their own values with the key `VARIABLE_VALUE_KEY`, but objects which reference variables simply add a dependency. Returns: The dictionary mapping attribute names to `SaveableObject` factories described above. For example: {VARIABLE_VALUE_KEY: lambda name="global_name_for_this_object": SaveableObject(name=name, ...)}
Returns a dictionary of values to checkpoint with this object.
[ "Returns", "a", "dictionary", "of", "values", "to", "checkpoint", "with", "this", "object", "." ]
def _gather_saveables_for_checkpoint(self): """Returns a dictionary of values to checkpoint with this object. Keys in the returned dictionary are local to this object and in a separate namespace from dependencies. Values may either be `SaveableObject` factories or variables easily converted to `SaveableObject`s (as in `tf.compat.v1.train.Saver`'s `var_list` constructor argument). `SaveableObjects` have a name set, which Trackable needs to generate itself. So rather than returning `SaveableObjects` directly, this method should return a dictionary of callables which take `name` arguments and return `SaveableObjects` with that name. If this object may also be passed to the global-name-based `tf.compat.v1.train.Saver`, the returned callables should have a default value for their name argument (i.e. be callable with no arguments). Returned values must be saved only by this object; if any value may be shared, it should instead be a dependency. For example, variable objects save their own values with the key `VARIABLE_VALUE_KEY`, but objects which reference variables simply add a dependency. Returns: The dictionary mapping attribute names to `SaveableObject` factories described above. For example: {VARIABLE_VALUE_KEY: lambda name="global_name_for_this_object": SaveableObject(name=name, ...)} """ return {}
[ "def", "_gather_saveables_for_checkpoint", "(", "self", ")", ":", "return", "{", "}" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/training/tracking/base.py#L911-L942
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/python_gflags/gflags.py
python
FlagValues.MainModuleHelp
(self)
return self.ModuleHelp(_GetMainModule())
Describe the key flags of the main module. Returns: string describing the key flags of a module.
Describe the key flags of the main module.
[ "Describe", "the", "key", "flags", "of", "the", "main", "module", "." ]
def MainModuleHelp(self): """Describe the key flags of the main module. Returns: string describing the key flags of a module. """ return self.ModuleHelp(_GetMainModule())
[ "def", "MainModuleHelp", "(", "self", ")", ":", "return", "self", ".", "ModuleHelp", "(", "_GetMainModule", "(", ")", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/python_gflags/gflags.py#L1428-L1434
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/adodbapi/adodbapi.py
python
TimeConverter.COMDate
(self,obj)
Returns a ComDate from a datetime in inputformat
Returns a ComDate from a datetime in inputformat
[ "Returns", "a", "ComDate", "from", "a", "datetime", "in", "inputformat" ]
def COMDate(self,obj): 'Returns a ComDate from a datetime in inputformat' raise NotImplementedError #"Abstract class"
[ "def", "COMDate", "(", "self", ",", "obj", ")", ":", "raise", "NotImplementedError", "#\"Abstract class\"" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/site-packages/adodbapi/adodbapi.py#L126-L128
LiquidPlayer/LiquidCore
9405979363f2353ac9a71ad8ab59685dd7f919c9
deps/node-10.15.3/deps/v8/tools/run_perf.py
python
RunnableConfig.Run
(self, runner, trybot)
return ( AccumulateResults( self.graphs, self._children, iter_output=self.PostProcess(stdout), perform_measurement=True, calc_total=self.total, ), AccumulateResults( self.graphs, self._children, iter_output=self.PostProcess(stdout_secondary), perform_measurement=trybot, # only run second time on trybots calc_total=self.total, ), )
Iterates over several runs and handles the output for all traces.
Iterates over several runs and handles the output for all traces.
[ "Iterates", "over", "several", "runs", "and", "handles", "the", "output", "for", "all", "traces", "." ]
def Run(self, runner, trybot): """Iterates over several runs and handles the output for all traces.""" stdout, stdout_secondary = Unzip(runner()) return ( AccumulateResults( self.graphs, self._children, iter_output=self.PostProcess(stdout), perform_measurement=True, calc_total=self.total, ), AccumulateResults( self.graphs, self._children, iter_output=self.PostProcess(stdout_secondary), perform_measurement=trybot, # only run second time on trybots calc_total=self.total, ), )
[ "def", "Run", "(", "self", ",", "runner", ",", "trybot", ")", ":", "stdout", ",", "stdout_secondary", "=", "Unzip", "(", "runner", "(", ")", ")", "return", "(", "AccumulateResults", "(", "self", ".", "graphs", ",", "self", ".", "_children", ",", "iter_output", "=", "self", ".", "PostProcess", "(", "stdout", ")", ",", "perform_measurement", "=", "True", ",", "calc_total", "=", "self", ".", "total", ",", ")", ",", "AccumulateResults", "(", "self", ".", "graphs", ",", "self", ".", "_children", ",", "iter_output", "=", "self", ".", "PostProcess", "(", "stdout_secondary", ")", ",", "perform_measurement", "=", "trybot", ",", "# only run second time on trybots", "calc_total", "=", "self", ".", "total", ",", ")", ",", ")" ]
https://github.com/LiquidPlayer/LiquidCore/blob/9405979363f2353ac9a71ad8ab59685dd7f919c9/deps/node-10.15.3/deps/v8/tools/run_perf.py#L521-L539
swift/swift
12d031cf8177fdec0137f9aa7e2912fa23c4416b
3rdParty/SCons/scons-3.0.1/engine/SCons/Tool/GettextCommon.py
python
RPaths.__init__
(self, env)
Initialize `RPaths` callable object. **Arguments**: - *env* - a `SCons.Environment.Environment` object, defines *current working dir*.
Initialize `RPaths` callable object. **Arguments**: - *env* - a `SCons.Environment.Environment` object, defines *current working dir*.
[ "Initialize", "RPaths", "callable", "object", ".", "**", "Arguments", "**", ":", "-", "*", "env", "*", "-", "a", "SCons", ".", "Environment", ".", "Environment", "object", "defines", "*", "current", "working", "dir", "*", "." ]
def __init__(self, env): """ Initialize `RPaths` callable object. **Arguments**: - *env* - a `SCons.Environment.Environment` object, defines *current working dir*. """ self.env = env
[ "def", "__init__", "(", "self", ",", "env", ")", ":", "self", ".", "env", "=", "env" ]
https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Tool/GettextCommon.py#L319-L327
SpaceNetChallenge/BuildingDetectors
3def3c44b5847c744cd2f3356182892d92496579
qinhaifang/src/caffe-mnc/scripts/cpp_lint.py
python
_IncludeState.CheckNextIncludeOrder
(self, header_type)
return ''
Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong.
Returns a non-empty error message if the next header is out of order.
[ "Returns", "a", "non", "-", "empty", "error", "message", "if", "the", "next", "header", "is", "out", "of", "order", "." ]
def CheckNextIncludeOrder(self, header_type): """Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong. """ error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return ''
[ "def", "CheckNextIncludeOrder", "(", "self", ",", "header_type", ")", ":", "error_message", "=", "(", "'Found %s after %s'", "%", "(", "self", ".", "_TYPE_NAMES", "[", "header_type", "]", ",", "self", ".", "_SECTION_NAMES", "[", "self", ".", "_section", "]", ")", ")", "last_section", "=", "self", ".", "_section", "if", "header_type", "==", "_C_SYS_HEADER", ":", "if", "self", ".", "_section", "<=", "self", ".", "_C_SECTION", ":", "self", ".", "_section", "=", "self", ".", "_C_SECTION", "else", ":", "self", ".", "_last_header", "=", "''", "return", "error_message", "elif", "header_type", "==", "_CPP_SYS_HEADER", ":", "if", "self", ".", "_section", "<=", "self", ".", "_CPP_SECTION", ":", "self", ".", "_section", "=", "self", ".", "_CPP_SECTION", "else", ":", "self", ".", "_last_header", "=", "''", "return", "error_message", "elif", "header_type", "==", "_LIKELY_MY_HEADER", ":", "if", "self", ".", "_section", "<=", "self", ".", "_MY_H_SECTION", ":", "self", ".", "_section", "=", "self", ".", "_MY_H_SECTION", "else", ":", "self", ".", "_section", "=", "self", ".", "_OTHER_H_SECTION", "elif", "header_type", "==", "_POSSIBLE_MY_HEADER", ":", "if", "self", ".", "_section", "<=", "self", ".", "_MY_H_SECTION", ":", "self", ".", "_section", "=", "self", ".", "_MY_H_SECTION", "else", ":", "# This will always be the fallback because we're not sure", "# enough that the header is associated with this file.", "self", ".", "_section", "=", "self", ".", "_OTHER_H_SECTION", "else", ":", "assert", "header_type", "==", "_OTHER_HEADER", "self", ".", "_section", "=", "self", ".", "_OTHER_H_SECTION", "if", "last_section", "!=", "self", ".", "_section", ":", "self", ".", "_last_header", "=", "''", "return", "''" ]
https://github.com/SpaceNetChallenge/BuildingDetectors/blob/3def3c44b5847c744cd2f3356182892d92496579/qinhaifang/src/caffe-mnc/scripts/cpp_lint.py#L633-L684
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/zipfile.py
python
PyZipFile.writepy
(self, pathname, basename = "")
Add all files from "pathname" to the ZIP archive. If pathname is a package directory, search the directory and all package subdirectories recursively for all *.py and enter the modules into the archive. If pathname is a plain directory, listdir *.py and enter all modules. Else, pathname must be a Python *.py file and the module will be put into the archive. Added modules are always module.pyo or module.pyc. This method will compile the module.py into module.pyc if necessary.
Add all files from "pathname" to the ZIP archive.
[ "Add", "all", "files", "from", "pathname", "to", "the", "ZIP", "archive", "." ]
def writepy(self, pathname, basename = ""): """Add all files from "pathname" to the ZIP archive. If pathname is a package directory, search the directory and all package subdirectories recursively for all *.py and enter the modules into the archive. If pathname is a plain directory, listdir *.py and enter all modules. Else, pathname must be a Python *.py file and the module will be put into the archive. Added modules are always module.pyo or module.pyc. This method will compile the module.py into module.pyc if necessary. """ dir, name = os.path.split(pathname) if os.path.isdir(pathname): initname = os.path.join(pathname, "__init__.py") if os.path.isfile(initname): # This is a package directory, add it if basename: basename = "%s/%s" % (basename, name) else: basename = name if self.debug: print "Adding package in", pathname, "as", basename fname, arcname = self._get_codename(initname[0:-3], basename) if self.debug: print "Adding", arcname self.write(fname, arcname) dirlist = os.listdir(pathname) dirlist.remove("__init__.py") # Add all *.py files and package subdirectories for filename in dirlist: path = os.path.join(pathname, filename) root, ext = os.path.splitext(filename) if os.path.isdir(path): if os.path.isfile(os.path.join(path, "__init__.py")): # This is a package directory, add it self.writepy(path, basename) # Recursive call elif ext == ".py": fname, arcname = self._get_codename(path[0:-3], basename) if self.debug: print "Adding", arcname self.write(fname, arcname) else: # This is NOT a package directory, add its files at top level if self.debug: print "Adding files from directory", pathname for filename in os.listdir(pathname): path = os.path.join(pathname, filename) root, ext = os.path.splitext(filename) if ext == ".py": fname, arcname = self._get_codename(path[0:-3], basename) if self.debug: print "Adding", arcname self.write(fname, arcname) else: if pathname[-3:] != ".py": raise RuntimeError, \ 'Files added with writepy() must end with ".py"' fname, arcname = self._get_codename(pathname[0:-3], basename) if self.debug: print "Adding file", arcname self.write(fname, arcname)
[ "def", "writepy", "(", "self", ",", "pathname", ",", "basename", "=", "\"\"", ")", ":", "dir", ",", "name", "=", "os", ".", "path", ".", "split", "(", "pathname", ")", "if", "os", ".", "path", ".", "isdir", "(", "pathname", ")", ":", "initname", "=", "os", ".", "path", ".", "join", "(", "pathname", ",", "\"__init__.py\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "initname", ")", ":", "# This is a package directory, add it", "if", "basename", ":", "basename", "=", "\"%s/%s\"", "%", "(", "basename", ",", "name", ")", "else", ":", "basename", "=", "name", "if", "self", ".", "debug", ":", "print", "\"Adding package in\"", ",", "pathname", ",", "\"as\"", ",", "basename", "fname", ",", "arcname", "=", "self", ".", "_get_codename", "(", "initname", "[", "0", ":", "-", "3", "]", ",", "basename", ")", "if", "self", ".", "debug", ":", "print", "\"Adding\"", ",", "arcname", "self", ".", "write", "(", "fname", ",", "arcname", ")", "dirlist", "=", "os", ".", "listdir", "(", "pathname", ")", "dirlist", ".", "remove", "(", "\"__init__.py\"", ")", "# Add all *.py files and package subdirectories", "for", "filename", "in", "dirlist", ":", "path", "=", "os", ".", "path", ".", "join", "(", "pathname", ",", "filename", ")", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "path", ",", "\"__init__.py\"", ")", ")", ":", "# This is a package directory, add it", "self", ".", "writepy", "(", "path", ",", "basename", ")", "# Recursive call", "elif", "ext", "==", "\".py\"", ":", "fname", ",", "arcname", "=", "self", ".", "_get_codename", "(", "path", "[", "0", ":", "-", "3", "]", ",", "basename", ")", "if", "self", ".", "debug", ":", "print", "\"Adding\"", ",", "arcname", "self", ".", "write", "(", "fname", ",", "arcname", ")", "else", ":", "# This is NOT a package directory, add its files at top level", "if", "self", ".", "debug", ":", "print", "\"Adding files from directory\"", ",", "pathname", "for", "filename", "in", "os", ".", "listdir", "(", "pathname", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "pathname", ",", "filename", ")", "root", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "==", "\".py\"", ":", "fname", ",", "arcname", "=", "self", ".", "_get_codename", "(", "path", "[", "0", ":", "-", "3", "]", ",", "basename", ")", "if", "self", ".", "debug", ":", "print", "\"Adding\"", ",", "arcname", "self", ".", "write", "(", "fname", ",", "arcname", ")", "else", ":", "if", "pathname", "[", "-", "3", ":", "]", "!=", "\".py\"", ":", "raise", "RuntimeError", ",", "'Files added with writepy() must end with \".py\"'", "fname", ",", "arcname", "=", "self", ".", "_get_codename", "(", "pathname", "[", "0", ":", "-", "3", "]", ",", "basename", ")", "if", "self", ".", "debug", ":", "print", "\"Adding file\"", ",", "arcname", "self", ".", "write", "(", "fname", ",", "arcname", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/zipfile.py#L1356-L1419
MythTV/mythtv
d282a209cb8be85d036f85a62a8ec971b67d45f4
mythtv/bindings/python/MythTV/utility/dicttoxml.py
python
make_attrstring
(attr)
return '%s%s' % (' ' if attrstring != '' else '', attrstring)
Returns an attribute string in the form key="val"
Returns an attribute string in the form key="val"
[ "Returns", "an", "attribute", "string", "in", "the", "form", "key", "=", "val" ]
def make_attrstring(attr): """Returns an attribute string in the form key="val" """ attrstring = ' '.join(['%s="%s"' % (k, v) for k, v in attr.items()]) return '%s%s' % (' ' if attrstring != '' else '', attrstring)
[ "def", "make_attrstring", "(", "attr", ")", ":", "attrstring", "=", "' '", ".", "join", "(", "[", "'%s=\"%s\"'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "attr", ".", "items", "(", ")", "]", ")", "return", "'%s%s'", "%", "(", "' '", "if", "attrstring", "!=", "''", "else", "''", ",", "attrstring", ")" ]
https://github.com/MythTV/mythtv/blob/d282a209cb8be85d036f85a62a8ec971b67d45f4/mythtv/bindings/python/MythTV/utility/dicttoxml.py#L117-L120