nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/dataview.py
python
DataViewItem.IsOk
(*args, **kwargs)
return _dataview.DataViewItem_IsOk(*args, **kwargs)
IsOk(self) -> bool Returns ``True`` if the object refers to an actual item in the data view control.
IsOk(self) -> bool
[ "IsOk", "(", "self", ")", "-", ">", "bool" ]
def IsOk(*args, **kwargs): """ IsOk(self) -> bool Returns ``True`` if the object refers to an actual item in the data view control. """ return _dataview.DataViewItem_IsOk(*args, **kwargs)
[ "def", "IsOk", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_dataview", ".", "DataViewItem_IsOk", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/dataview.py#L98-L105
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/ops.py
python
masked_arith_op
(x, y, op)
return result
If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator
If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s).
[ "If", "the", "given", "arithmetic", "operation", "fails", "attempt", "it", "again", "on", "only", "the", "non", "-", "null", "elements", "of", "the", "input", "array", "(", "s", ")", "." ]
def masked_arith_op(x, y, op): """ If the given arithmetic operation fails, attempt it again on only the non-null elements of the input array(s). Parameters ---------- x : np.ndarray y : np.ndarray, Series, Index op : binary operator """ # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes # the logic valid for both Series and DataFrame ops. xrav = x.ravel() assert isinstance(x, (np.ndarray, ABCSeries)), type(x) if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) # PeriodIndex.ravel() returns int64 dtype, so we have # to work around that case. See GH#19956 yrav = y if is_period_dtype(y) else y.ravel() mask = notna(xrav) & notna(yrav) if yrav.shape != mask.shape: # FIXME: GH#5284, GH#5035, GH#19448 # Without specifically raising here we get mismatched # errors in Py3 (TypeError) vs Py2 (ValueError) # Note: Only = an issue in DataFrame case raise ValueError('Cannot broadcast operands together.') if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], com.values_from_object(yrav[mask])) else: assert is_scalar(y), type(y) assert isinstance(x, np.ndarray), type(x) # mask is only meaningful for x result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) # 1 ** np.nan is 1. So we have to unmask those. if op == pow: mask = np.where(x == 1, False, mask) elif op == rpow: mask = np.where(y == 1, False, mask) if mask.any(): with np.errstate(all='ignore'): result[mask] = op(xrav[mask], y) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) # 2D compat return result
[ "def", "masked_arith_op", "(", "x", ",", "y", ",", "op", ")", ":", "# For Series `x` is 1D so ravel() is a no-op; calling it anyway makes", "# the logic valid for both Series and DataFrame ops.", "xrav", "=", "x", ".", "ravel", "(", ")", "assert", "isinstance", "(", "x", ",", "(", "np", ".", "ndarray", ",", "ABCSeries", ")", ")", ",", "type", "(", "x", ")", "if", "isinstance", "(", "y", ",", "(", "np", ".", "ndarray", ",", "ABCSeries", ",", "ABCIndexClass", ")", ")", ":", "dtype", "=", "find_common_type", "(", "[", "x", ".", "dtype", ",", "y", ".", "dtype", "]", ")", "result", "=", "np", ".", "empty", "(", "x", ".", "size", ",", "dtype", "=", "dtype", ")", "# PeriodIndex.ravel() returns int64 dtype, so we have", "# to work around that case. See GH#19956", "yrav", "=", "y", "if", "is_period_dtype", "(", "y", ")", "else", "y", ".", "ravel", "(", ")", "mask", "=", "notna", "(", "xrav", ")", "&", "notna", "(", "yrav", ")", "if", "yrav", ".", "shape", "!=", "mask", ".", "shape", ":", "# FIXME: GH#5284, GH#5035, GH#19448", "# Without specifically raising here we get mismatched", "# errors in Py3 (TypeError) vs Py2 (ValueError)", "# Note: Only = an issue in DataFrame case", "raise", "ValueError", "(", "'Cannot broadcast operands together.'", ")", "if", "mask", ".", "any", "(", ")", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "[", "mask", "]", "=", "op", "(", "xrav", "[", "mask", "]", ",", "com", ".", "values_from_object", "(", "yrav", "[", "mask", "]", ")", ")", "else", ":", "assert", "is_scalar", "(", "y", ")", ",", "type", "(", "y", ")", "assert", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ",", "type", "(", "x", ")", "# mask is only meaningful for x", "result", "=", "np", ".", "empty", "(", "x", ".", "size", ",", "dtype", "=", "x", ".", "dtype", ")", "mask", "=", "notna", "(", "xrav", ")", "# 1 ** np.nan is 1. So we have to unmask those.", "if", "op", "==", "pow", ":", "mask", "=", "np", ".", "where", "(", "x", "==", "1", ",", "False", ",", "mask", ")", "elif", "op", "==", "rpow", ":", "mask", "=", "np", ".", "where", "(", "y", "==", "1", ",", "False", ",", "mask", ")", "if", "mask", ".", "any", "(", ")", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "[", "mask", "]", "=", "op", "(", "xrav", "[", "mask", "]", ",", "y", ")", "result", ",", "changed", "=", "maybe_upcast_putmask", "(", "result", ",", "~", "mask", ",", "np", ".", "nan", ")", "result", "=", "result", ".", "reshape", "(", "x", ".", "shape", ")", "# 2D compat", "return", "result" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/ops.py#L975-L1030
Slicer/SlicerGitSVNArchive
65e92bb16c2b32ea47a1a66bee71f238891ee1ca
Base/Python/slicer/ScriptedLoadableModule.py
python
ScriptedLoadableModuleLogic.getAllParameterNodes
(self)
return foundParameterNodes
Return a list of all parameter nodes for this module Multiple parameter nodes are useful for storing multiple parameter sets in a single scene.
Return a list of all parameter nodes for this module Multiple parameter nodes are useful for storing multiple parameter sets in a single scene.
[ "Return", "a", "list", "of", "all", "parameter", "nodes", "for", "this", "module", "Multiple", "parameter", "nodes", "are", "useful", "for", "storing", "multiple", "parameter", "sets", "in", "a", "single", "scene", "." ]
def getAllParameterNodes(self): """ Return a list of all parameter nodes for this module Multiple parameter nodes are useful for storing multiple parameter sets in a single scene. """ foundParameterNodes = [] numberOfScriptedModuleNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLScriptedModuleNode") for nodeIndex in range(numberOfScriptedModuleNodes): parameterNode = slicer.mrmlScene.GetNthNodeByClass( nodeIndex, "vtkMRMLScriptedModuleNode" ) if parameterNode.GetAttribute("ModuleName") == self.moduleName: foundParameterNodes.append(parameterNode) return foundParameterNodes
[ "def", "getAllParameterNodes", "(", "self", ")", ":", "foundParameterNodes", "=", "[", "]", "numberOfScriptedModuleNodes", "=", "slicer", ".", "mrmlScene", ".", "GetNumberOfNodesByClass", "(", "\"vtkMRMLScriptedModuleNode\"", ")", "for", "nodeIndex", "in", "range", "(", "numberOfScriptedModuleNodes", ")", ":", "parameterNode", "=", "slicer", ".", "mrmlScene", ".", "GetNthNodeByClass", "(", "nodeIndex", ",", "\"vtkMRMLScriptedModuleNode\"", ")", "if", "parameterNode", ".", "GetAttribute", "(", "\"ModuleName\"", ")", "==", "self", ".", "moduleName", ":", "foundParameterNodes", ".", "append", "(", "parameterNode", ")", "return", "foundParameterNodes" ]
https://github.com/Slicer/SlicerGitSVNArchive/blob/65e92bb16c2b32ea47a1a66bee71f238891ee1ca/Base/Python/slicer/ScriptedLoadableModule.py#L247-L258
thalium/icebox
99d147d5b9269222225443ce171b4fd46d8985d4
third_party/virtualbox/src/VBox/ValidationKit/common/utils.py
python
processKillWithNameCheck
(uPid, sName)
return processKill(uPid)
Like processKill(), but checks if the process name matches before killing it. This is intended for killing using potentially stale pid values. Returns True on success, False on failure.
Like processKill(), but checks if the process name matches before killing it. This is intended for killing using potentially stale pid values.
[ "Like", "processKill", "()", "but", "checks", "if", "the", "process", "name", "matches", "before", "killing", "it", ".", "This", "is", "intended", "for", "killing", "using", "potentially", "stale", "pid", "values", "." ]
def processKillWithNameCheck(uPid, sName): """ Like processKill(), but checks if the process name matches before killing it. This is intended for killing using potentially stale pid values. Returns True on success, False on failure. """ if processCheckPidAndName(uPid, sName) is not True: return False; return processKill(uPid);
[ "def", "processKillWithNameCheck", "(", "uPid", ",", "sName", ")", ":", "if", "processCheckPidAndName", "(", "uPid", ",", "sName", ")", "is", "not", "True", ":", "return", "False", "return", "processKill", "(", "uPid", ")" ]
https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/VBox/ValidationKit/common/utils.py#L765-L775
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/multiprocessing/__init__.py
python
freeze_support
()
Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit.
Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit.
[ "Check", "whether", "this", "is", "a", "fake", "forked", "process", "in", "a", "frozen", "executable", ".", "If", "so", "then", "run", "code", "specified", "by", "commandline", "and", "exit", "." ]
def freeze_support(): ''' Check whether this is a fake forked process in a frozen executable. If so then run code specified by commandline and exit. ''' if sys.platform == 'win32' and getattr(sys, 'frozen', False): from multiprocessing.forking import freeze_support freeze_support()
[ "def", "freeze_support", "(", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", "and", "getattr", "(", "sys", ",", "'frozen'", ",", "False", ")", ":", "from", "multiprocessing", ".", "forking", "import", "freeze_support", "freeze_support", "(", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/multiprocessing/__init__.py#L138-L145
BlzFans/wke
b0fa21158312e40c5fbd84682d643022b6c34a93
cygwin/lib/python2.6/lib2to3/pytree.py
python
Node.insert_child
(self, i, child)
Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately.
Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately.
[ "Equivalent", "to", "node", ".", "children", ".", "insert", "(", "i", "child", ")", ".", "This", "method", "also", "sets", "the", "child", "s", "parent", "attribute", "appropriately", "." ]
def insert_child(self, i, child): """ Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.insert(i, child) self.changed()
[ "def", "insert_child", "(", "self", ",", "i", ",", "child", ")", ":", "child", ".", "parent", "=", "self", "self", ".", "children", ".", "insert", "(", "i", ",", "child", ")", "self", ".", "changed", "(", ")" ]
https://github.com/BlzFans/wke/blob/b0fa21158312e40c5fbd84682d643022b6c34a93/cygwin/lib/python2.6/lib2to3/pytree.py#L316-L323
FEniCS/dolfinx
3dfdf038cccdb70962865b58a63bf29c2e55ec6e
python/dolfinx/io.py
python
extract_gmsh_topology_and_markers
(gmsh_model, model_name=None)
return topologies
Extract all entities tagged with a physical marker in the gmsh model, and collects the data per cell type. Returns a nested dictionary where the first key is the gmsh MSH element type integer. Each element type present in the model contains the cell topology of the elements and corresponding markers.
Extract all entities tagged with a physical marker in the gmsh model, and collects the data per cell type. Returns a nested dictionary where the first key is the gmsh MSH element type integer. Each element type present in the model contains the cell topology of the elements and corresponding markers.
[ "Extract", "all", "entities", "tagged", "with", "a", "physical", "marker", "in", "the", "gmsh", "model", "and", "collects", "the", "data", "per", "cell", "type", ".", "Returns", "a", "nested", "dictionary", "where", "the", "first", "key", "is", "the", "gmsh", "MSH", "element", "type", "integer", ".", "Each", "element", "type", "present", "in", "the", "model", "contains", "the", "cell", "topology", "of", "the", "elements", "and", "corresponding", "markers", "." ]
def extract_gmsh_topology_and_markers(gmsh_model, model_name=None): """Extract all entities tagged with a physical marker in the gmsh model, and collects the data per cell type. Returns a nested dictionary where the first key is the gmsh MSH element type integer. Each element type present in the model contains the cell topology of the elements and corresponding markers. """ if model_name is not None: gmsh_model.setCurrent(model_name) # Get the physical groups from gmsh on the form [(dim1, tag1),(dim1, # tag2), (dim2, tag3),...] phys_grps = gmsh_model.getPhysicalGroups() topologies = {} for dim, tag in phys_grps: # Get the entities for a given dimension: # dim=0->Points, dim=1->Lines, dim=2->Triangles/Quadrilaterals, # etc. entities = gmsh_model.getEntitiesForPhysicalGroup(dim, tag) for entity in entities: # Get data about the elements on a given entity: # NOTE: Assumes that each entity only have one cell-type element_data = gmsh_model.mesh.getElements(dim, tag=entity) element_types, element_tags, node_tags = element_data assert len(element_types) == 1 # The MSH type of the cells on the element element_type = element_types[0] num_el = len(element_tags[0]) # Determine number of local nodes per element to create the # topology of the elements properties = gmsh_model.mesh.getElementProperties(element_type) name, dim, order, num_nodes, local_coords, _ = properties # 2D array of shape (num_elements,num_nodes_per_element) # containing the topology of the elements on this entity # NOTE: GMSH indexing starts with 1 and not zero element_topology = node_tags[0].reshape(-1, num_nodes) - 1 # Gather data for each element type and the # corresponding physical markers if element_type in topologies.keys(): topologies[element_type]["topology"] = np.concatenate( (topologies[element_type]["topology"], element_topology), axis=0) topologies[element_type]["cell_data"] = np.concatenate( (topologies[element_type]["cell_data"], np.full(num_el, tag)), axis=0) else: topologies[element_type] = {"topology": element_topology, "cell_data": np.full(num_el, tag)} return topologies
[ "def", "extract_gmsh_topology_and_markers", "(", "gmsh_model", ",", "model_name", "=", "None", ")", ":", "if", "model_name", "is", "not", "None", ":", "gmsh_model", ".", "setCurrent", "(", "model_name", ")", "# Get the physical groups from gmsh on the form [(dim1, tag1),(dim1,", "# tag2), (dim2, tag3),...]", "phys_grps", "=", "gmsh_model", ".", "getPhysicalGroups", "(", ")", "topologies", "=", "{", "}", "for", "dim", ",", "tag", "in", "phys_grps", ":", "# Get the entities for a given dimension:", "# dim=0->Points, dim=1->Lines, dim=2->Triangles/Quadrilaterals,", "# etc.", "entities", "=", "gmsh_model", ".", "getEntitiesForPhysicalGroup", "(", "dim", ",", "tag", ")", "for", "entity", "in", "entities", ":", "# Get data about the elements on a given entity:", "# NOTE: Assumes that each entity only have one cell-type", "element_data", "=", "gmsh_model", ".", "mesh", ".", "getElements", "(", "dim", ",", "tag", "=", "entity", ")", "element_types", ",", "element_tags", ",", "node_tags", "=", "element_data", "assert", "len", "(", "element_types", ")", "==", "1", "# The MSH type of the cells on the element", "element_type", "=", "element_types", "[", "0", "]", "num_el", "=", "len", "(", "element_tags", "[", "0", "]", ")", "# Determine number of local nodes per element to create the", "# topology of the elements", "properties", "=", "gmsh_model", ".", "mesh", ".", "getElementProperties", "(", "element_type", ")", "name", ",", "dim", ",", "order", ",", "num_nodes", ",", "local_coords", ",", "_", "=", "properties", "# 2D array of shape (num_elements,num_nodes_per_element)", "# containing the topology of the elements on this entity", "# NOTE: GMSH indexing starts with 1 and not zero", "element_topology", "=", "node_tags", "[", "0", "]", ".", "reshape", "(", "-", "1", ",", "num_nodes", ")", "-", "1", "# Gather data for each element type and the", "# corresponding physical markers", "if", "element_type", "in", "topologies", ".", "keys", "(", ")", ":", "topologies", "[", "element_type", "]", "[", "\"topology\"", "]", "=", "np", ".", "concatenate", "(", "(", "topologies", "[", "element_type", "]", "[", "\"topology\"", "]", ",", "element_topology", ")", ",", "axis", "=", "0", ")", "topologies", "[", "element_type", "]", "[", "\"cell_data\"", "]", "=", "np", ".", "concatenate", "(", "(", "topologies", "[", "element_type", "]", "[", "\"cell_data\"", "]", ",", "np", ".", "full", "(", "num_el", ",", "tag", ")", ")", ",", "axis", "=", "0", ")", "else", ":", "topologies", "[", "element_type", "]", "=", "{", "\"topology\"", ":", "element_topology", ",", "\"cell_data\"", ":", "np", ".", "full", "(", "num_el", ",", "tag", ")", "}", "return", "topologies" ]
https://github.com/FEniCS/dolfinx/blob/3dfdf038cccdb70962865b58a63bf29c2e55ec6e/python/dolfinx/io.py#L75-L127
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/nanops.py
python
_get_fill_value
(dtype, fill_value=None, fill_value_typ=None)
return the correct fill value for the dtype of the values
return the correct fill value for the dtype of the values
[ "return", "the", "correct", "fill", "value", "for", "the", "dtype", "of", "the", "values" ]
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): """ return the correct fill value for the dtype of the values """ if fill_value is not None: return fill_value if _na_ok_dtype(dtype): if fill_value_typ is None: return np.nan else: if fill_value_typ == '+inf': return np.inf else: return -np.inf else: if fill_value_typ is None: return tslibs.iNaT else: if fill_value_typ == '+inf': # need the max int here return _int64_max else: return tslibs.iNaT
[ "def", "_get_fill_value", "(", "dtype", ",", "fill_value", "=", "None", ",", "fill_value_typ", "=", "None", ")", ":", "if", "fill_value", "is", "not", "None", ":", "return", "fill_value", "if", "_na_ok_dtype", "(", "dtype", ")", ":", "if", "fill_value_typ", "is", "None", ":", "return", "np", ".", "nan", "else", ":", "if", "fill_value_typ", "==", "'+inf'", ":", "return", "np", ".", "inf", "else", ":", "return", "-", "np", ".", "inf", "else", ":", "if", "fill_value_typ", "is", "None", ":", "return", "tslibs", ".", "iNaT", "else", ":", "if", "fill_value_typ", "==", "'+inf'", ":", "# need the max int here", "return", "_int64_max", "else", ":", "return", "tslibs", ".", "iNaT" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/nanops.py#L180-L200
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
TextEntryBase.Undo
(*args, **kwargs)
return _core_.TextEntryBase_Undo(*args, **kwargs)
Undo(self) Undoes the last edit in the text field
Undo(self)
[ "Undo", "(", "self", ")" ]
def Undo(*args, **kwargs): """ Undo(self) Undoes the last edit in the text field """ return _core_.TextEntryBase_Undo(*args, **kwargs)
[ "def", "Undo", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "TextEntryBase_Undo", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L13211-L13217
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Action.py
python
CommandGeneratorAction.get_presig
(self, target, source, env, executor=None)
return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
Return the signature contents of this action's command line. This strips $(-$) and everything in between the string, since those parts don't affect signatures.
Return the signature contents of this action's command line.
[ "Return", "the", "signature", "contents", "of", "this", "action", "s", "command", "line", "." ]
def get_presig(self, target, source, env, executor=None): """Return the signature contents of this action's command line. This strips $(-$) and everything in between the string, since those parts don't affect signatures. """ return self._generate(target, source, env, 1, executor).get_presig(target, source, env)
[ "def", "get_presig", "(", "self", ",", "target", ",", "source", ",", "env", ",", "executor", "=", "None", ")", ":", "return", "self", ".", "_generate", "(", "target", ",", "source", ",", "env", ",", "1", ",", "executor", ")", ".", "get_presig", "(", "target", ",", "source", ",", "env", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Action.py#L1055-L1061
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
polly/lib/External/isl/imath/tools/mkdoc.py
python
parse_decls
(text)
return decls
Parse a dictionary of declarations from text.
Parse a dictionary of declarations from text.
[ "Parse", "a", "dictionary", "of", "declarations", "from", "text", "." ]
def parse_decls(text): """Parse a dictionary of declarations from text.""" decls = collections.OrderedDict() idx = LIndex(text) for m in doc.finditer(text): line, _ = idx.linecol(m.span('decl')[0]) d = Decl(m.group('text'), m.group('decl'), line) decls[d.name] = d return decls
[ "def", "parse_decls", "(", "text", ")", ":", "decls", "=", "collections", ".", "OrderedDict", "(", ")", "idx", "=", "LIndex", "(", "text", ")", "for", "m", "in", "doc", ".", "finditer", "(", "text", ")", ":", "line", ",", "_", "=", "idx", ".", "linecol", "(", "m", ".", "span", "(", "'decl'", ")", "[", "0", "]", ")", "d", "=", "Decl", "(", "m", ".", "group", "(", "'text'", ")", ",", "m", ".", "group", "(", "'decl'", ")", ",", "line", ")", "decls", "[", "d", ".", "name", "]", "=", "d", "return", "decls" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/polly/lib/External/isl/imath/tools/mkdoc.py#L147-L155
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python/src/Lib/asynchat.py
python
async_chat.set_terminator
(self, term)
Set the input delimiter. Can be a fixed string of any length, an integer, or None
Set the input delimiter. Can be a fixed string of any length, an integer, or None
[ "Set", "the", "input", "delimiter", ".", "Can", "be", "a", "fixed", "string", "of", "any", "length", "an", "integer", "or", "None" ]
def set_terminator (self, term): "Set the input delimiter. Can be a fixed string of any length, an integer, or None" self.terminator = term
[ "def", "set_terminator", "(", "self", ",", "term", ")", ":", "self", ".", "terminator", "=", "term" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/asynchat.py#L100-L102
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/s3fs/core.py
python
S3FileSystem.setxattr
(self, path, copy_kwargs=None, **kw_args)
Set metadata. Attributes have to be of the form documented in the `Metadata Reference`_. Parameters ---------- kw_args : key-value pairs like field="value", where the values must be strings. Does not alter existing fields, unless the field appears here - if the value is None, delete the field. copy_kwargs : dict, optional dictionary of additional params to use for the underlying s3.copy_object. Examples -------- >>> mys3file.setxattr(attribute_1='value1', attribute_2='value2') # doctest: +SKIP # Example for use with copy_args >>> mys3file.setxattr(copy_kwargs={'ContentType': 'application/pdf'}, ... attribute_1='value1') # doctest: +SKIP .. Metadata Reference: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#object-metadata
Set metadata.
[ "Set", "metadata", "." ]
def setxattr(self, path, copy_kwargs=None, **kw_args): """ Set metadata. Attributes have to be of the form documented in the `Metadata Reference`_. Parameters ---------- kw_args : key-value pairs like field="value", where the values must be strings. Does not alter existing fields, unless the field appears here - if the value is None, delete the field. copy_kwargs : dict, optional dictionary of additional params to use for the underlying s3.copy_object. Examples -------- >>> mys3file.setxattr(attribute_1='value1', attribute_2='value2') # doctest: +SKIP # Example for use with copy_args >>> mys3file.setxattr(copy_kwargs={'ContentType': 'application/pdf'}, ... attribute_1='value1') # doctest: +SKIP .. Metadata Reference: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#object-metadata """ bucket, key, version_id = self.split_path(path) metadata = self.metadata(path) metadata.update(**kw_args) copy_kwargs = copy_kwargs or {} # remove all keys that are None for kw_key in kw_args: if kw_args[kw_key] is None: metadata.pop(kw_key, None) src = {'Bucket': bucket, 'Key': key} if version_id: src['VersionId'] = version_id self._call_s3( self.s3.copy_object, copy_kwargs, CopySource=src, Bucket=bucket, Key=key, Metadata=metadata, MetadataDirective='REPLACE', ) # refresh metadata self._metadata_cache[path] = metadata
[ "def", "setxattr", "(", "self", ",", "path", ",", "copy_kwargs", "=", "None", ",", "*", "*", "kw_args", ")", ":", "bucket", ",", "key", ",", "version_id", "=", "self", ".", "split_path", "(", "path", ")", "metadata", "=", "self", ".", "metadata", "(", "path", ")", "metadata", ".", "update", "(", "*", "*", "kw_args", ")", "copy_kwargs", "=", "copy_kwargs", "or", "{", "}", "# remove all keys that are None", "for", "kw_key", "in", "kw_args", ":", "if", "kw_args", "[", "kw_key", "]", "is", "None", ":", "metadata", ".", "pop", "(", "kw_key", ",", "None", ")", "src", "=", "{", "'Bucket'", ":", "bucket", ",", "'Key'", ":", "key", "}", "if", "version_id", ":", "src", "[", "'VersionId'", "]", "=", "version_id", "self", ".", "_call_s3", "(", "self", ".", "s3", ".", "copy_object", ",", "copy_kwargs", ",", "CopySource", "=", "src", ",", "Bucket", "=", "bucket", ",", "Key", "=", "key", ",", "Metadata", "=", "metadata", ",", "MetadataDirective", "=", "'REPLACE'", ",", ")", "# refresh metadata", "self", ".", "_metadata_cache", "[", "path", "]", "=", "metadata" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/s3fs/core.py#L732-L785
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/auth.py
python
_basic_auth_str
(username, password)
return authstr
Returns a Basic Auth string.
Returns a Basic Auth string.
[ "Returns", "a", "Basic", "Auth", "string", "." ]
def _basic_auth_str(username, password): """Returns a Basic Auth string.""" # "I want us to put a big-ol' comment on top of it that # says that this behaviour is dumb but we need to preserve # it because people are relying on it." # - Lukasa # # These are here solely to maintain backwards compatibility # for things like ints. This will be removed in 3.0.0. if not isinstance(username, basestring): warnings.warn( "Non-string usernames will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(username), category=DeprecationWarning, ) username = str(username) if not isinstance(password, basestring): warnings.warn( "Non-string passwords will no longer be supported in Requests " "3.0.0. Please convert the object you've passed in ({!r}) to " "a string or bytes object in the near future to avoid " "problems.".format(type(password)), category=DeprecationWarning, ) password = str(password) # -- End Removal -- if isinstance(username, str): username = username.encode('latin1') if isinstance(password, str): password = password.encode('latin1') authstr = 'Basic ' + to_native_string( b64encode(b':'.join((username, password))).strip() ) return authstr
[ "def", "_basic_auth_str", "(", "username", ",", "password", ")", ":", "# \"I want us to put a big-ol' comment on top of it that", "# says that this behaviour is dumb but we need to preserve", "# it because people are relying on it.\"", "# - Lukasa", "#", "# These are here solely to maintain backwards compatibility", "# for things like ints. This will be removed in 3.0.0.", "if", "not", "isinstance", "(", "username", ",", "basestring", ")", ":", "warnings", ".", "warn", "(", "\"Non-string usernames will no longer be supported in Requests \"", "\"3.0.0. Please convert the object you've passed in ({!r}) to \"", "\"a string or bytes object in the near future to avoid \"", "\"problems.\"", ".", "format", "(", "username", ")", ",", "category", "=", "DeprecationWarning", ",", ")", "username", "=", "str", "(", "username", ")", "if", "not", "isinstance", "(", "password", ",", "basestring", ")", ":", "warnings", ".", "warn", "(", "\"Non-string passwords will no longer be supported in Requests \"", "\"3.0.0. Please convert the object you've passed in ({!r}) to \"", "\"a string or bytes object in the near future to avoid \"", "\"problems.\"", ".", "format", "(", "type", "(", "password", ")", ")", ",", "category", "=", "DeprecationWarning", ",", ")", "password", "=", "str", "(", "password", ")", "# -- End Removal --", "if", "isinstance", "(", "username", ",", "str", ")", ":", "username", "=", "username", ".", "encode", "(", "'latin1'", ")", "if", "isinstance", "(", "password", ",", "str", ")", ":", "password", "=", "password", ".", "encode", "(", "'latin1'", ")", "authstr", "=", "'Basic '", "+", "to_native_string", "(", "b64encode", "(", "b':'", ".", "join", "(", "(", "username", ",", "password", ")", ")", ")", ".", "strip", "(", ")", ")", "return", "authstr" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/requests/auth.py#L28-L69
sigmaai/self-driving-golf-cart
8d891600af3d851add27a10ae45cf3c2108bb87c
ros/src/detection/object_detection/scripts/yolo3/model.py
python
preprocess_true_boxes
(true_boxes, input_shape, anchors, num_classes)
return y_true
Preprocess true boxes to training input format Parameters ---------- true_boxes: array, shape=(m, T, 5) Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape. input_shape: array-like, hw, multiples of 32 anchors: array, shape=(N, 2), wh num_classes: integer Returns ------- y_true: list of array, shape like yolo_outputs, xywh are reletive value
Preprocess true boxes to training input format
[ "Preprocess", "true", "boxes", "to", "training", "input", "format" ]
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes): """Preprocess true boxes to training input format Parameters ---------- true_boxes: array, shape=(m, T, 5) Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape. input_shape: array-like, hw, multiples of 32 anchors: array, shape=(N, 2), wh num_classes: integer Returns ------- y_true: list of array, shape like yolo_outputs, xywh are reletive value """ assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes' num_layers = len(anchors)//3 # default setting anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] true_boxes = np.array(true_boxes, dtype='float32') input_shape = np.array(input_shape, dtype='int32') boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2 boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2] true_boxes[..., 0:2] = boxes_xy/input_shape[::-1] true_boxes[..., 2:4] = boxes_wh/input_shape[::-1] m = true_boxes.shape[0] grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)] y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes), dtype='float32') for l in range(num_layers)] # Expand dim to apply broadcasting. anchors = np.expand_dims(anchors, 0) anchor_maxes = anchors / 2. anchor_mins = -anchor_maxes valid_mask = boxes_wh[..., 0]>0 for b in range(m): # Discard zero rows. wh = boxes_wh[b, valid_mask[b]] if len(wh)==0: continue # Expand dim to apply broadcasting. wh = np.expand_dims(wh, -2) box_maxes = wh / 2. box_mins = -box_maxes intersect_mins = np.maximum(box_mins, anchor_mins) intersect_maxes = np.minimum(box_maxes, anchor_maxes) intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.) intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1] box_area = wh[..., 0] * wh[..., 1] anchor_area = anchors[..., 0] * anchors[..., 1] iou = intersect_area / (box_area + anchor_area - intersect_area) # Find best anchor for each true box best_anchor = np.argmax(iou, axis=-1) for t, n in enumerate(best_anchor): for l in range(num_layers): if n in anchor_mask[l]: i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32') j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32') k = anchor_mask[l].index(n) c = true_boxes[b,t, 4].astype('int32') y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4] y_true[l][b, j, i, k, 4] = 1 y_true[l][b, j, i, k, 5+c] = 1 return y_true
[ "def", "preprocess_true_boxes", "(", "true_boxes", ",", "input_shape", ",", "anchors", ",", "num_classes", ")", ":", "assert", "(", "true_boxes", "[", "...", ",", "4", "]", "<", "num_classes", ")", ".", "all", "(", ")", ",", "'class id must be less than num_classes'", "num_layers", "=", "len", "(", "anchors", ")", "//", "3", "# default setting", "anchor_mask", "=", "[", "[", "6", ",", "7", ",", "8", "]", ",", "[", "3", ",", "4", ",", "5", "]", ",", "[", "0", ",", "1", ",", "2", "]", "]", "if", "num_layers", "==", "3", "else", "[", "[", "3", ",", "4", ",", "5", "]", ",", "[", "1", ",", "2", ",", "3", "]", "]", "true_boxes", "=", "np", ".", "array", "(", "true_boxes", ",", "dtype", "=", "'float32'", ")", "input_shape", "=", "np", ".", "array", "(", "input_shape", ",", "dtype", "=", "'int32'", ")", "boxes_xy", "=", "(", "true_boxes", "[", "...", ",", "0", ":", "2", "]", "+", "true_boxes", "[", "...", ",", "2", ":", "4", "]", ")", "//", "2", "boxes_wh", "=", "true_boxes", "[", "...", ",", "2", ":", "4", "]", "-", "true_boxes", "[", "...", ",", "0", ":", "2", "]", "true_boxes", "[", "...", ",", "0", ":", "2", "]", "=", "boxes_xy", "/", "input_shape", "[", ":", ":", "-", "1", "]", "true_boxes", "[", "...", ",", "2", ":", "4", "]", "=", "boxes_wh", "/", "input_shape", "[", ":", ":", "-", "1", "]", "m", "=", "true_boxes", ".", "shape", "[", "0", "]", "grid_shapes", "=", "[", "input_shape", "//", "{", "0", ":", "32", ",", "1", ":", "16", ",", "2", ":", "8", "}", "[", "l", "]", "for", "l", "in", "range", "(", "num_layers", ")", "]", "y_true", "=", "[", "np", ".", "zeros", "(", "(", "m", ",", "grid_shapes", "[", "l", "]", "[", "0", "]", ",", "grid_shapes", "[", "l", "]", "[", "1", "]", ",", "len", "(", "anchor_mask", "[", "l", "]", ")", ",", "5", "+", "num_classes", ")", ",", "dtype", "=", "'float32'", ")", "for", "l", "in", "range", "(", "num_layers", ")", "]", "# Expand dim to apply broadcasting.", "anchors", "=", "np", ".", "expand_dims", "(", "anchors", ",", "0", ")", "anchor_maxes", "=", "anchors", "/", "2.", "anchor_mins", "=", "-", "anchor_maxes", "valid_mask", "=", "boxes_wh", "[", "...", ",", "0", "]", ">", "0", "for", "b", "in", "range", "(", "m", ")", ":", "# Discard zero rows.", "wh", "=", "boxes_wh", "[", "b", ",", "valid_mask", "[", "b", "]", "]", "if", "len", "(", "wh", ")", "==", "0", ":", "continue", "# Expand dim to apply broadcasting.", "wh", "=", "np", ".", "expand_dims", "(", "wh", ",", "-", "2", ")", "box_maxes", "=", "wh", "/", "2.", "box_mins", "=", "-", "box_maxes", "intersect_mins", "=", "np", ".", "maximum", "(", "box_mins", ",", "anchor_mins", ")", "intersect_maxes", "=", "np", ".", "minimum", "(", "box_maxes", ",", "anchor_maxes", ")", "intersect_wh", "=", "np", ".", "maximum", "(", "intersect_maxes", "-", "intersect_mins", ",", "0.", ")", "intersect_area", "=", "intersect_wh", "[", "...", ",", "0", "]", "*", "intersect_wh", "[", "...", ",", "1", "]", "box_area", "=", "wh", "[", "...", ",", "0", "]", "*", "wh", "[", "...", ",", "1", "]", "anchor_area", "=", "anchors", "[", "...", ",", "0", "]", "*", "anchors", "[", "...", ",", "1", "]", "iou", "=", "intersect_area", "/", "(", "box_area", "+", "anchor_area", "-", "intersect_area", ")", "# Find best anchor for each true box", "best_anchor", "=", "np", ".", "argmax", "(", "iou", ",", "axis", "=", "-", "1", ")", "for", "t", ",", "n", "in", "enumerate", "(", "best_anchor", ")", ":", "for", "l", "in", "range", "(", "num_layers", ")", ":", "if", "n", "in", "anchor_mask", "[", "l", "]", ":", "i", "=", "np", ".", "floor", "(", "true_boxes", "[", "b", ",", "t", ",", "0", "]", "*", "grid_shapes", "[", "l", "]", "[", "1", "]", ")", ".", "astype", "(", "'int32'", ")", "j", "=", "np", ".", "floor", "(", "true_boxes", "[", "b", ",", "t", ",", "1", "]", "*", "grid_shapes", "[", "l", "]", "[", "0", "]", ")", ".", "astype", "(", "'int32'", ")", "k", "=", "anchor_mask", "[", "l", "]", ".", "index", "(", "n", ")", "c", "=", "true_boxes", "[", "b", ",", "t", ",", "4", "]", ".", "astype", "(", "'int32'", ")", "y_true", "[", "l", "]", "[", "b", ",", "j", ",", "i", ",", "k", ",", "0", ":", "4", "]", "=", "true_boxes", "[", "b", ",", "t", ",", "0", ":", "4", "]", "y_true", "[", "l", "]", "[", "b", ",", "j", ",", "i", ",", "k", ",", "4", "]", "=", "1", "y_true", "[", "l", "]", "[", "b", ",", "j", ",", "i", ",", "k", ",", "5", "+", "c", "]", "=", "1", "return", "y_true" ]
https://github.com/sigmaai/self-driving-golf-cart/blob/8d891600af3d851add27a10ae45cf3c2108bb87c/ros/src/detection/object_detection/scripts/yolo3/model.py#L244-L313
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/thrift/transport/TZlibTransport.py
python
TZlibTransport.open
(self)
return self.__trans.open()
Open the underlying transport
Open the underlying transport
[ "Open", "the", "underlying", "transport" ]
def open(self): """Open the underlying transport""" self._init_stats() return self.__trans.open()
[ "def", "open", "(", "self", ")", ":", "self", ".", "_init_stats", "(", ")", "return", "self", ".", "__trans", ".", "open", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/thrift/transport/TZlibTransport.py#L162-L165
trilinos/Trilinos
6168be6dd51e35e1cd681e9c4b24433e709df140
packages/seacas/scripts/exomerge3.py
python
import_model
(filename, *args, **kwargs)
return model
Load information from an ExodusII file. This function is a wrapper around 'ExodusModel.import_model(...)' and is provided for convenience. Internally, this is equivalent to executing the following two statements. >>> model = ExodusModel() >>> model.import_model(...) See 'ExodusModel.import_model' for additional information.
Load information from an ExodusII file.
[ "Load", "information", "from", "an", "ExodusII", "file", "." ]
def import_model(filename, *args, **kwargs): """ Load information from an ExodusII file. This function is a wrapper around 'ExodusModel.import_model(...)' and is provided for convenience. Internally, this is equivalent to executing the following two statements. >>> model = ExodusModel() >>> model.import_model(...) See 'ExodusModel.import_model' for additional information. """ model = ExodusModel() model.import_model(filename, *args, **kwargs) return model
[ "def", "import_model", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "model", "=", "ExodusModel", "(", ")", "model", ".", "import_model", "(", "filename", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "model" ]
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/scripts/exomerge3.py#L94-L110
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_tab.py
python
EdTabBase.DoOnIdle
(self)
Called when the notebook is idle and this instance is the active tab.
Called when the notebook is idle and this instance is the active tab.
[ "Called", "when", "the", "notebook", "is", "idle", "and", "this", "instance", "is", "the", "active", "tab", "." ]
def DoOnIdle(self): """Called when the notebook is idle and this instance is the active tab. """ pass
[ "def", "DoOnIdle", "(", "self", ")", ":", "pass" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_tab.py#L52-L57
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/logging/handlers.py
python
SocketHandler.emit
(self, record)
Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket.
Emit a record.
[ "Emit", "a", "record", "." ]
def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except Exception: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "s", "=", "self", ".", "makePickle", "(", "record", ")", "self", ".", "send", "(", "s", ")", "except", "Exception", ":", "self", ".", "handleError", "(", "record", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/logging/handlers.py#L621-L634
microsoft/ivy
9f3c7ecc0b2383129fdd0953e10890d98d09a82d
ivy/ui_extensions_api.py
python
apply_goal_tactic
(goal, tactic)
Create a new cell in the notebook that applies the tactic
Create a new cell in the notebook that applies the tactic
[ "Create", "a", "new", "cell", "in", "the", "notebook", "that", "applies", "the", "tactic" ]
def apply_goal_tactic(goal, tactic): """ Create a new cell in the notebook that applies the tactic """ code = '''{}(goal({!r}))'''.format(tactic, goal.id) yield ExecuteNewCell(code)
[ "def", "apply_goal_tactic", "(", "goal", ",", "tactic", ")", ":", "code", "=", "'''{}(goal({!r}))'''", ".", "format", "(", "tactic", ",", "goal", ".", "id", ")", "yield", "ExecuteNewCell", "(", "code", ")" ]
https://github.com/microsoft/ivy/blob/9f3c7ecc0b2383129fdd0953e10890d98d09a82d/ivy/ui_extensions_api.py#L383-L388
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/distutils/msvccompiler.py
python
MSVCCompiler.find_exe
(self, exe)
return exe
Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'.
Return path to an MSVC executable program.
[ "Return", "path", "to", "an", "MSVC", "executable", "program", "." ]
def find_exe(self, exe): """Return path to an MSVC executable program. Tries to find the program in several places: first, one of the MSVC program search paths from the registry; next, the directories in the PATH environment variable. If any of those work, return an absolute path that is known to exist. If none of them work, just return the original program name, 'exe'. """ for p in self.__paths: fn = os.path.join(os.path.abspath(p), exe) if os.path.isfile(fn): return fn # didn't find it; try existing path for p in os.environ['Path'].split(';'): fn = os.path.join(os.path.abspath(p),exe) if os.path.isfile(fn): return fn return exe
[ "def", "find_exe", "(", "self", ",", "exe", ")", ":", "for", "p", "in", "self", ".", "__paths", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "p", ")", ",", "exe", ")", "if", "os", ".", "path", ".", "isfile", "(", "fn", ")", ":", "return", "fn", "# didn't find it; try existing path", "for", "p", "in", "os", ".", "environ", "[", "'Path'", "]", ".", "split", "(", "';'", ")", ":", "fn", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "p", ")", ",", "exe", ")", "if", "os", ".", "path", ".", "isfile", "(", "fn", ")", ":", "return", "fn", "return", "exe" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/distutils/msvccompiler.py#L565-L585
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py
python
ParserElement.setDefaultWhitespaceChars
( chars )
r""" Overrides the default whitespace chars Example:: # default whitespace chars are space, <TAB> and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
r""" Overrides the default whitespace chars
[ "r", "Overrides", "the", "default", "whitespace", "chars" ]
def setDefaultWhitespaceChars( chars ): r""" Overrides the default whitespace chars Example:: # default whitespace chars are space, <TAB> and newline OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] # change to just treat newline as significant ParserElement.setDefaultWhitespaceChars(" \t") OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] """ ParserElement.DEFAULT_WHITE_CHARS = chars
[ "def", "setDefaultWhitespaceChars", "(", "chars", ")", ":", "ParserElement", ".", "DEFAULT_WHITE_CHARS", "=", "chars" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/pkg_resources/_vendor/pyparsing.py#L1109-L1121
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/misc_util.py
python
msvc_runtime_version
()
return msc_ver
Return version of MSVC runtime library, as defined by __MSC_VER__ macro
Return version of MSVC runtime library, as defined by __MSC_VER__ macro
[ "Return", "version", "of", "MSVC", "runtime", "library", "as", "defined", "by", "__MSC_VER__", "macro" ]
def msvc_runtime_version(): "Return version of MSVC runtime library, as defined by __MSC_VER__ macro" msc_pos = sys.version.find('MSC v.') if msc_pos != -1: msc_ver = int(sys.version[msc_pos+6:msc_pos+10]) else: msc_ver = None return msc_ver
[ "def", "msvc_runtime_version", "(", ")", ":", "msc_pos", "=", "sys", ".", "version", ".", "find", "(", "'MSC v.'", ")", "if", "msc_pos", "!=", "-", "1", ":", "msc_ver", "=", "int", "(", "sys", ".", "version", "[", "msc_pos", "+", "6", ":", "msc_pos", "+", "10", "]", ")", "else", ":", "msc_ver", "=", "None", "return", "msc_ver" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numpy/distutils/misc_util.py#L401-L408
psi4/psi4
be533f7f426b6ccc263904e55122899b16663395
psi4/driver/qcdb/dbwrap.py
python
cure_weight
(refrxn, refeq, rrat, xi=0.2)
return weight
:param refeq: value of benchmark for equilibrium Reaction :param rrat: ratio of intermonomer separation for Reaction to equilibrium Reaction :param xi: parameter :return: weight for CURE
:param refeq: value of benchmark for equilibrium Reaction :param rrat: ratio of intermonomer separation for Reaction to equilibrium Reaction :param xi: parameter :return: weight for CURE
[ ":", "param", "refeq", ":", "value", "of", "benchmark", "for", "equilibrium", "Reaction", ":", "param", "rrat", ":", "ratio", "of", "intermonomer", "separation", "for", "Reaction", "to", "equilibrium", "Reaction", ":", "param", "xi", ":", "parameter", ":", "return", ":", "weight", "for", "CURE" ]
def cure_weight(refrxn, refeq, rrat, xi=0.2): """ :param refeq: value of benchmark for equilibrium Reaction :param rrat: ratio of intermonomer separation for Reaction to equilibrium Reaction :param xi: parameter :return: weight for CURE """ sigma = xi * abs(refeq) / (rrat ** 3) weight = max(abs(refrxn), sigma) return weight
[ "def", "cure_weight", "(", "refrxn", ",", "refeq", ",", "rrat", ",", "xi", "=", "0.2", ")", ":", "sigma", "=", "xi", "*", "abs", "(", "refeq", ")", "/", "(", "rrat", "**", "3", ")", "weight", "=", "max", "(", "abs", "(", "refrxn", ")", ",", "sigma", ")", "return", "weight" ]
https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/dbwrap.py#L232-L242
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/idl/idl/writer.py
python
UnindentedBlock.__exit__
(self, *args)
Write the ending of the block.
Write the ending of the block.
[ "Write", "the", "ending", "of", "the", "block", "." ]
def __exit__(self, *args): # type: (*str) -> None """Write the ending of the block.""" self._writer.write_unindented_line(self._closing)
[ "def", "__exit__", "(", "self", ",", "*", "args", ")", ":", "# type: (*str) -> None", "self", ".", "_writer", ".", "write_unindented_line", "(", "self", ".", "_closing", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/idl/idl/writer.py#L274-L277
neoml-lib/neoml
a0d370fba05269a1b2258cef126f77bbd2054a3e
NeoML/Python/neoml/Dnn/Dnn.py
python
Dnn.delete_layer
(self, layer)
Deletes a layer from the network. :param layer: the layer to be deleted, or its name :type layer: neoml.Dnn.Layer or str
Deletes a layer from the network.
[ "Deletes", "a", "layer", "from", "the", "network", "." ]
def delete_layer(self, layer): """Deletes a layer from the network. :param layer: the layer to be deleted, or its name :type layer: neoml.Dnn.Layer or str """ if type(layer) is str: self._delete_layer(layer) elif isinstance(layer, Layer): self._delete_layer(layer.name) else: raise ValueError('The `layer` is expected to be `str` or `neoml.Dnn.Layer`')
[ "def", "delete_layer", "(", "self", ",", "layer", ")", ":", "if", "type", "(", "layer", ")", "is", "str", ":", "self", ".", "_delete_layer", "(", "layer", ")", "elif", "isinstance", "(", "layer", ",", "Layer", ")", ":", "self", ".", "_delete_layer", "(", "layer", ".", "name", ")", "else", ":", "raise", "ValueError", "(", "'The `layer` is expected to be `str` or `neoml.Dnn.Layer`'", ")" ]
https://github.com/neoml-lib/neoml/blob/a0d370fba05269a1b2258cef126f77bbd2054a3e/NeoML/Python/neoml/Dnn/Dnn.py#L139-L150
openthread/openthread
9fcdbed9c526c70f1556d1ed84099c1535c7cd32
tools/harness-thci/OpenThread.py
python
OpenThreadTHCI.__init__
(self, **kwargs)
initialize the serial port and default network parameters Args: **kwargs: Arbitrary keyword arguments Includes 'EUI' and 'SerialPort'
initialize the serial port and default network parameters Args: **kwargs: Arbitrary keyword arguments Includes 'EUI' and 'SerialPort'
[ "initialize", "the", "serial", "port", "and", "default", "network", "parameters", "Args", ":", "**", "kwargs", ":", "Arbitrary", "keyword", "arguments", "Includes", "EUI", "and", "SerialPort" ]
def __init__(self, **kwargs): """initialize the serial port and default network parameters Args: **kwargs: Arbitrary keyword arguments Includes 'EUI' and 'SerialPort' """ self.intialize(kwargs)
[ "def", "__init__", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "intialize", "(", "kwargs", ")" ]
https://github.com/openthread/openthread/blob/9fcdbed9c526c70f1556d1ed84099c1535c7cd32/tools/harness-thci/OpenThread.py#L223-L229
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/core/_internal.py
python
_ctypes.get_strides
(self)
return self.strides
Deprecated getter for the `_ctypes.strides` property. .. deprecated:: 1.21
Deprecated getter for the `_ctypes.strides` property.
[ "Deprecated", "getter", "for", "the", "_ctypes", ".", "strides", "property", "." ]
def get_strides(self): """Deprecated getter for the `_ctypes.strides` property. .. deprecated:: 1.21 """ warnings.warn('"get_strides" is deprecated. Use "strides" instead', DeprecationWarning, stacklevel=2) return self.strides
[ "def", "get_strides", "(", "self", ")", ":", "warnings", ".", "warn", "(", "'\"get_strides\" is deprecated. Use \"strides\" instead'", ",", "DeprecationWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "strides" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/core/_internal.py#L374-L381
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/buildscripts/packager-enterprise.py
python
make_package
(distro, build_os, arch, spec, srcdir)
return distro.make_pkg(build_os, arch, spec, srcdir)
Construct the package for (arch, distro, spec), getting packaging files from srcdir and any user-specified suffix from suffixes
Construct the package for (arch, distro, spec), getting packaging files from srcdir and any user-specified suffix from suffixes
[ "Construct", "the", "package", "for", "(", "arch", "distro", "spec", ")", "getting", "packaging", "files", "from", "srcdir", "and", "any", "user", "-", "specified", "suffix", "from", "suffixes" ]
def make_package(distro, build_os, arch, spec, srcdir): """Construct the package for (arch, distro, spec), getting packaging files from srcdir and any user-specified suffix from suffixes""" sdir=setupdir(distro, build_os, arch, spec) packager.ensure_dir(sdir) # Note that the RPM packages get their man pages from the debian # directory, so the debian directory is needed in all cases (and # innocuous in the debianoids' sdirs). for pkgdir in ["debian", "rpm"]: print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir) # FIXME: sh-dash-cee is bad. See if tarfile can do this. packager.sysassert(["sh", "-c", "(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.metadata_gitspec(), pkgdir, sdir)]) # Splat the binaries and snmp files under sdir. The "build" stages of the # packaging infrastructure will move the files to wherever they # need to go. unpack_binaries_into(build_os, arch, spec, sdir) # Remove the mongoreplay binary due to libpcap dynamic # linkage. if os.path.exists(sdir + "bin/mongoreplay"): os.unlink(sdir + "bin/mongoreplay") return distro.make_pkg(build_os, arch, spec, srcdir)
[ "def", "make_package", "(", "distro", ",", "build_os", ",", "arch", ",", "spec", ",", "srcdir", ")", ":", "sdir", "=", "setupdir", "(", "distro", ",", "build_os", ",", "arch", ",", "spec", ")", "packager", ".", "ensure_dir", "(", "sdir", ")", "# Note that the RPM packages get their man pages from the debian", "# directory, so the debian directory is needed in all cases (and", "# innocuous in the debianoids' sdirs).", "for", "pkgdir", "in", "[", "\"debian\"", ",", "\"rpm\"", "]", ":", "print", "\"Copying packaging files from %s to %s\"", "%", "(", "\"%s/%s\"", "%", "(", "srcdir", ",", "pkgdir", ")", ",", "sdir", ")", "# FIXME: sh-dash-cee is bad. See if tarfile can do this.", "packager", ".", "sysassert", "(", "[", "\"sh\"", ",", "\"-c\"", ",", "\"(cd \\\"%s\\\" && git archive %s %s/ ) | (cd \\\"%s\\\" && tar xvf -)\"", "%", "(", "srcdir", ",", "spec", ".", "metadata_gitspec", "(", ")", ",", "pkgdir", ",", "sdir", ")", "]", ")", "# Splat the binaries and snmp files under sdir. The \"build\" stages of the", "# packaging infrastructure will move the files to wherever they", "# need to go.", "unpack_binaries_into", "(", "build_os", ",", "arch", ",", "spec", ",", "sdir", ")", "# Remove the mongoreplay binary due to libpcap dynamic", "# linkage.", "if", "os", ".", "path", ".", "exists", "(", "sdir", "+", "\"bin/mongoreplay\"", ")", ":", "os", ".", "unlink", "(", "sdir", "+", "\"bin/mongoreplay\"", ")", "return", "distro", ".", "make_pkg", "(", "build_os", ",", "arch", ",", "spec", ",", "srcdir", ")" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/buildscripts/packager-enterprise.py#L218-L240
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/valgrind/scan-build.py
python
MultiLineChange.__call__
(self, line)
return False
Test a single line against multi-line change. If it matches the currently active line, advance one line. If the current line is the last line, report a match.
Test a single line against multi-line change.
[ "Test", "a", "single", "line", "against", "multi", "-", "line", "change", "." ]
def __call__(self, line): """ Test a single line against multi-line change. If it matches the currently active line, advance one line. If the current line is the last line, report a match. """ if self._tracked_lines[self._current] in line: self._current = self._current + 1 if self._current == len(self._tracked_lines): self._current = 0 return True else: self._current = 0 return False
[ "def", "__call__", "(", "self", ",", "line", ")", ":", "if", "self", ".", "_tracked_lines", "[", "self", ".", "_current", "]", "in", "line", ":", "self", ".", "_current", "=", "self", ".", "_current", "+", "1", "if", "self", ".", "_current", "==", "len", "(", "self", ".", "_tracked_lines", ")", ":", "self", ".", "_current", "=", "0", "return", "True", "else", ":", "self", ".", "_current", "=", "0", "return", "False" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/valgrind/scan-build.py#L172-L185
kamyu104/LeetCode-Solutions
77605708a927ea3b85aee5a479db733938c7c211
Python/all-elements-in-two-binary-search-trees.py
python
Solution.getAllElements
(self, root1, root2)
return result
:type root1: TreeNode :type root2: TreeNode :rtype: List[int]
:type root1: TreeNode :type root2: TreeNode :rtype: List[int]
[ ":", "type", "root1", ":", "TreeNode", ":", "type", "root2", ":", "TreeNode", ":", "rtype", ":", "List", "[", "int", "]" ]
def getAllElements(self, root1, root2): """ :type root1: TreeNode :type root2: TreeNode :rtype: List[int] """ def inorder_gen(root): result, stack = [], [(root, False)] while stack: root, is_visited = stack.pop() if root is None: continue if is_visited: yield root.val else: stack.append((root.right, False)) stack.append((root, True)) stack.append((root.left, False)) yield None result = [] left_gen, right_gen = inorder_gen(root1), inorder_gen(root2) left, right = next(left_gen), next(right_gen) while left is not None or right is not None: if right is None or (left is not None and left < right): result.append(left) left = next(left_gen) else: result.append(right) right = next(right_gen) return result
[ "def", "getAllElements", "(", "self", ",", "root1", ",", "root2", ")", ":", "def", "inorder_gen", "(", "root", ")", ":", "result", ",", "stack", "=", "[", "]", ",", "[", "(", "root", ",", "False", ")", "]", "while", "stack", ":", "root", ",", "is_visited", "=", "stack", ".", "pop", "(", ")", "if", "root", "is", "None", ":", "continue", "if", "is_visited", ":", "yield", "root", ".", "val", "else", ":", "stack", ".", "append", "(", "(", "root", ".", "right", ",", "False", ")", ")", "stack", ".", "append", "(", "(", "root", ",", "True", ")", ")", "stack", ".", "append", "(", "(", "root", ".", "left", ",", "False", ")", ")", "yield", "None", "result", "=", "[", "]", "left_gen", ",", "right_gen", "=", "inorder_gen", "(", "root1", ")", ",", "inorder_gen", "(", "root2", ")", "left", ",", "right", "=", "next", "(", "left_gen", ")", ",", "next", "(", "right_gen", ")", "while", "left", "is", "not", "None", "or", "right", "is", "not", "None", ":", "if", "right", "is", "None", "or", "(", "left", "is", "not", "None", "and", "left", "<", "right", ")", ":", "result", ".", "append", "(", "left", ")", "left", "=", "next", "(", "left_gen", ")", "else", ":", "result", ".", "append", "(", "right", ")", "right", "=", "next", "(", "right_gen", ")", "return", "result" ]
https://github.com/kamyu104/LeetCode-Solutions/blob/77605708a927ea3b85aee5a479db733938c7c211/Python/all-elements-in-two-binary-search-trees.py#L13-L43
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/setuptools/py3/setuptools/package_index.py
python
ContentChecker.feed
(self, block)
return
Feed a block of data to the hash.
Feed a block of data to the hash.
[ "Feed", "a", "block", "of", "data", "to", "the", "hash", "." ]
def feed(self, block): """ Feed a block of data to the hash. """ return
[ "def", "feed", "(", "self", ",", "block", ")", ":", "return" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/setuptools/py3/setuptools/package_index.py#L228-L232
pyne/pyne
0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3
pyne/alara.py
python
_build_matrix
(N)
return A
This function builds burnup matrix, A. Decay only.
This function builds burnup matrix, A. Decay only.
[ "This", "function", "builds", "burnup", "matrix", "A", ".", "Decay", "only", "." ]
def _build_matrix(N): """ This function builds burnup matrix, A. Decay only. """ A = np.zeros((len(N), len(N))) # convert N to id form N_id = [] for i in range(len(N)): if isinstance(N[i], str): ID = nucname.id(N[i]) else: ID = N[i] N_id.append(ID) sds = SimpleDataSource() # Decay for i in range(len(N)): A[i, i] -= decay_const(N_id[i]) # Find decay parents for k in range(len(N)): if N_id[i] in decay_children(N_id[k]): A[i, k] += branch_ratio(N_id[k], N_id[i])*decay_const(N_id[k]) return A
[ "def", "_build_matrix", "(", "N", ")", ":", "A", "=", "np", ".", "zeros", "(", "(", "len", "(", "N", ")", ",", "len", "(", "N", ")", ")", ")", "# convert N to id form", "N_id", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "N", ")", ")", ":", "if", "isinstance", "(", "N", "[", "i", "]", ",", "str", ")", ":", "ID", "=", "nucname", ".", "id", "(", "N", "[", "i", "]", ")", "else", ":", "ID", "=", "N", "[", "i", "]", "N_id", ".", "append", "(", "ID", ")", "sds", "=", "SimpleDataSource", "(", ")", "# Decay", "for", "i", "in", "range", "(", "len", "(", "N", ")", ")", ":", "A", "[", "i", ",", "i", "]", "-=", "decay_const", "(", "N_id", "[", "i", "]", ")", "# Find decay parents", "for", "k", "in", "range", "(", "len", "(", "N", ")", ")", ":", "if", "N_id", "[", "i", "]", "in", "decay_children", "(", "N_id", "[", "k", "]", ")", ":", "A", "[", "i", ",", "k", "]", "+=", "branch_ratio", "(", "N_id", "[", "k", "]", ",", "N_id", "[", "i", "]", ")", "*", "decay_const", "(", "N_id", "[", "k", "]", ")", "return", "A" ]
https://github.com/pyne/pyne/blob/0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3/pyne/alara.py#L835-L860
rodeofx/OpenWalter
6116fbe3f04f1146c854afbfbdbe944feaee647e
walter/maya/scripts/walterPanel/walterOutliner.py
python
TreeItem.getWalterStandinItem
(self)
return parent.getWalterStandinItem()
Recursively search the the origin parent TreeItem in parents.
Recursively search the the origin parent TreeItem in parents.
[ "Recursively", "search", "the", "the", "origin", "parent", "TreeItem", "in", "parents", "." ]
def getWalterStandinItem(self): """Recursively search the the origin parent TreeItem in parents.""" parent = self.parent() if not parent.parent(): return self return parent.getWalterStandinItem()
[ "def", "getWalterStandinItem", "(", "self", ")", ":", "parent", "=", "self", ".", "parent", "(", ")", "if", "not", "parent", ".", "parent", "(", ")", ":", "return", "self", "return", "parent", ".", "getWalterStandinItem", "(", ")" ]
https://github.com/rodeofx/OpenWalter/blob/6116fbe3f04f1146c854afbfbdbe944feaee647e/walter/maya/scripts/walterPanel/walterOutliner.py#L1898-L1904
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/grid.py
python
GridTableBase.SetValueAsLong
(*args, **kwargs)
return _grid.GridTableBase_SetValueAsLong(*args, **kwargs)
SetValueAsLong(self, int row, int col, long value)
SetValueAsLong(self, int row, int col, long value)
[ "SetValueAsLong", "(", "self", "int", "row", "int", "col", "long", "value", ")" ]
def SetValueAsLong(*args, **kwargs): """SetValueAsLong(self, int row, int col, long value)""" return _grid.GridTableBase_SetValueAsLong(*args, **kwargs)
[ "def", "SetValueAsLong", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "GridTableBase_SetValueAsLong", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/grid.py#L846-L848
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqt/mantidqt/widgets/sliceviewer/roi.py
python
_index_range_spectraaxis
(workspace: MatrixWorkspace, ymin: float, ymax: float)
Return the workspace indicies for the given ymin/ymax values on the given workspace :param workspace: A MatrixWorkspace object spectra Y Axis :param ymin: Minimum Y value in range :param ymax: Maximum Y value in range
Return the workspace indicies for the given ymin/ymax values on the given workspace :param workspace: A MatrixWorkspace object spectra Y Axis :param ymin: Minimum Y value in range :param ymax: Maximum Y value in range
[ "Return", "the", "workspace", "indicies", "for", "the", "given", "ymin", "/", "ymax", "values", "on", "the", "given", "workspace", ":", "param", "workspace", ":", "A", "MatrixWorkspace", "object", "spectra", "Y", "Axis", ":", "param", "ymin", ":", "Minimum", "Y", "value", "in", "range", ":", "param", "ymax", ":", "Maximum", "Y", "value", "in", "range" ]
def _index_range_spectraaxis(workspace: MatrixWorkspace, ymin: float, ymax: float): """ Return the workspace indicies for the given ymin/ymax values on the given workspace :param workspace: A MatrixWorkspace object spectra Y Axis :param ymin: Minimum Y value in range :param ymax: Maximum Y value in range """ if ymin is None or ymax is None: return 0, workspace.getNumberHistograms() - 1 else: spectra_axis = workspace.getAxis(1) return spectra_axis.indexOfValue(ymin), spectra_axis.indexOfValue(ymax)
[ "def", "_index_range_spectraaxis", "(", "workspace", ":", "MatrixWorkspace", ",", "ymin", ":", "float", ",", "ymax", ":", "float", ")", ":", "if", "ymin", "is", "None", "or", "ymax", "is", "None", ":", "return", "0", ",", "workspace", ".", "getNumberHistograms", "(", ")", "-", "1", "else", ":", "spectra_axis", "=", "workspace", ".", "getAxis", "(", "1", ")", "return", "spectra_axis", ".", "indexOfValue", "(", "ymin", ")", ",", "spectra_axis", ".", "indexOfValue", "(", "ymax", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/sliceviewer/roi.py#L247-L258
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/tf_asymmetry_fitting/tf_asymmetry_fitting_presenter.py
python
TFAsymmetryFittingPresenter.handle_normalisation_changed
(self)
Handles when the normalisation line edit has been changed by the user.
Handles when the normalisation line edit has been changed by the user.
[ "Handles", "when", "the", "normalisation", "line", "edit", "has", "been", "changed", "by", "the", "user", "." ]
def handle_normalisation_changed(self) -> None: """Handles when the normalisation line edit has been changed by the user.""" self.model.set_current_normalisation(self.view.normalisation) self.update_plot_guess() self.fit_parameter_changed_notifier.notify_subscribers()
[ "def", "handle_normalisation_changed", "(", "self", ")", "->", "None", ":", "self", ".", "model", ".", "set_current_normalisation", "(", "self", ".", "view", ".", "normalisation", ")", "self", ".", "update_plot_guess", "(", ")", "self", ".", "fit_parameter_changed_notifier", ".", "notify_subscribers", "(", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/tf_asymmetry_fitting/tf_asymmetry_fitting_presenter.py#L82-L87
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
demo/DPU-for-RNN/rnnt_asr_vck5000/inference.py
python
eval
( data_layer, audio_processor, greedy_decoder, labels, args)
performs inference / evaluation Args: data_layer: data layer object that holds data loader audio_processor: data processing module greedy_decoder: greedy decoder labels: list of labels as output vocabulary args: script input arguments
performs inference / evaluation Args: data_layer: data layer object that holds data loader audio_processor: data processing module greedy_decoder: greedy decoder labels: list of labels as output vocabulary args: script input arguments
[ "performs", "inference", "/", "evaluation", "Args", ":", "data_layer", ":", "data", "layer", "object", "that", "holds", "data", "loader", "audio_processor", ":", "data", "processing", "module", "greedy_decoder", ":", "greedy", "decoder", "labels", ":", "list", "of", "labels", "as", "output", "vocabulary", "args", ":", "script", "input", "arguments" ]
def eval( data_layer, audio_processor, greedy_decoder, labels, args): """performs inference / evaluation Args: data_layer: data layer object that holds data loader audio_processor: data processing module greedy_decoder: greedy decoder labels: list of labels as output vocabulary args: script input arguments """ start_t = time.time() if args.mode==1 or args.mode==2: rnnt_hw_model = RNNT_infer_model() else: rnnt_hw_model = None logits_save_to = args.logits_save_to with torch.no_grad(): _global_var_dict = { 'predictions': [], 'transcripts': [], 'logits': [], } Processnum = [] for it, data in enumerate(data_layer.data_iterator): if args.mode == 3: (t_audio_signal_e, t_a_sig_length_e, transcript_list, t_transcript_e, t_transcript_len_e) = audio_processor(data) h_rnns =(None,None) label=[] hidden = None #greedy decode on cpu t_transcript_e = torch.nn.utils.rnn.pad_packed_sequence(t_transcript_e, batch_first=True)[0] t_predictions_e, h_pre_rnns, hidden_predict, decode_batch_length = greedy_decoder.decode(t_audio_signal_e, t_a_sig_length_e, h_rnns,label,hidden, None) else: Process_ver = MyProcess(data, audio_processor, _global_var_dict, labels, ver_Process,rnnt_hw_model,greedy_decoder) Process_ver.start() Processnum.append(it) if args.steps is not None and it + 1 >= args.steps: break if args.mode !=3: for id in Processnum: Process_ver.join() else: values_dict = dict( predictions=[t_predictions_e], transcript=transcript_list, transcript_length=t_transcript_len_e, ) process_evaluation_batch(values_dict, _global_var_dict, labels=labels) wer = process_evaluation_epoch(_global_var_dict) print("=================>Evaluation WER: {0}\n".format(wer)) if args.save_prediction is not None: with open(args.save_prediction, 'w') as fp: fp.write('\n'.join(_global_var_dict['predictions'])) end_t =time.time() if args.mode == 1: print('dpu computation time (lstm_run time)', sum(lstm_run_time_t)) print('e2e decode time:',end_t-start_t)
[ "def", "eval", "(", "data_layer", ",", "audio_processor", ",", "greedy_decoder", ",", "labels", ",", "args", ")", ":", "start_t", "=", "time", ".", "time", "(", ")", "if", "args", ".", "mode", "==", "1", "or", "args", ".", "mode", "==", "2", ":", "rnnt_hw_model", "=", "RNNT_infer_model", "(", ")", "else", ":", "rnnt_hw_model", "=", "None", "logits_save_to", "=", "args", ".", "logits_save_to", "with", "torch", ".", "no_grad", "(", ")", ":", "_global_var_dict", "=", "{", "'predictions'", ":", "[", "]", ",", "'transcripts'", ":", "[", "]", ",", "'logits'", ":", "[", "]", ",", "}", "Processnum", "=", "[", "]", "for", "it", ",", "data", "in", "enumerate", "(", "data_layer", ".", "data_iterator", ")", ":", "if", "args", ".", "mode", "==", "3", ":", "(", "t_audio_signal_e", ",", "t_a_sig_length_e", ",", "transcript_list", ",", "t_transcript_e", ",", "t_transcript_len_e", ")", "=", "audio_processor", "(", "data", ")", "h_rnns", "=", "(", "None", ",", "None", ")", "label", "=", "[", "]", "hidden", "=", "None", "#greedy decode on cpu ", "t_transcript_e", "=", "torch", ".", "nn", ".", "utils", ".", "rnn", ".", "pad_packed_sequence", "(", "t_transcript_e", ",", "batch_first", "=", "True", ")", "[", "0", "]", "t_predictions_e", ",", "h_pre_rnns", ",", "hidden_predict", ",", "decode_batch_length", "=", "greedy_decoder", ".", "decode", "(", "t_audio_signal_e", ",", "t_a_sig_length_e", ",", "h_rnns", ",", "label", ",", "hidden", ",", "None", ")", "else", ":", "Process_ver", "=", "MyProcess", "(", "data", ",", "audio_processor", ",", "_global_var_dict", ",", "labels", ",", "ver_Process", ",", "rnnt_hw_model", ",", "greedy_decoder", ")", "Process_ver", ".", "start", "(", ")", "Processnum", ".", "append", "(", "it", ")", "if", "args", ".", "steps", "is", "not", "None", "and", "it", "+", "1", ">=", "args", ".", "steps", ":", "break", "if", "args", ".", "mode", "!=", "3", ":", "for", "id", "in", "Processnum", ":", "Process_ver", ".", "join", "(", ")", "else", ":", "values_dict", "=", "dict", "(", "predictions", "=", "[", "t_predictions_e", "]", ",", "transcript", "=", "transcript_list", ",", "transcript_length", "=", "t_transcript_len_e", ",", ")", "process_evaluation_batch", "(", "values_dict", ",", "_global_var_dict", ",", "labels", "=", "labels", ")", "wer", "=", "process_evaluation_epoch", "(", "_global_var_dict", ")", "print", "(", "\"=================>Evaluation WER: {0}\\n\"", ".", "format", "(", "wer", ")", ")", "if", "args", ".", "save_prediction", "is", "not", "None", ":", "with", "open", "(", "args", ".", "save_prediction", ",", "'w'", ")", "as", "fp", ":", "fp", ".", "write", "(", "'\\n'", ".", "join", "(", "_global_var_dict", "[", "'predictions'", "]", ")", ")", "end_t", "=", "time", ".", "time", "(", ")", "if", "args", ".", "mode", "==", "1", ":", "print", "(", "'dpu computation time (lstm_run time)'", ",", "sum", "(", "lstm_run_time_t", ")", ")", "print", "(", "'e2e decode time:'", ",", "end_t", "-", "start_t", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/demo/DPU-for-RNN/rnnt_asr_vck5000/inference.py#L138-L205
blackberry/Boost
fc90c3fde129c62565c023f091eddc4a7ed9902b
tools/build/v2/build/build_request.py
python
expand_no_defaults
(property_sets)
return [property_set.create(p) for p in product]
Expand the given build request by combining all property_sets which don't specify conflicting non-free features.
Expand the given build request by combining all property_sets which don't specify conflicting non-free features.
[ "Expand", "the", "given", "build", "request", "by", "combining", "all", "property_sets", "which", "don", "t", "specify", "conflicting", "non", "-", "free", "features", "." ]
def expand_no_defaults (property_sets): """ Expand the given build request by combining all property_sets which don't specify conflicting non-free features. """ # First make all features and subfeatures explicit expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets] # Now combine all of the expanded property_sets product = __x_product (expanded_property_sets) return [property_set.create(p) for p in product]
[ "def", "expand_no_defaults", "(", "property_sets", ")", ":", "# First make all features and subfeatures explicit", "expanded_property_sets", "=", "[", "ps", ".", "expand_subfeatures", "(", ")", "for", "ps", "in", "property_sets", "]", "# Now combine all of the expanded property_sets", "product", "=", "__x_product", "(", "expanded_property_sets", ")", "return", "[", "property_set", ".", "create", "(", "p", ")", "for", "p", "in", "product", "]" ]
https://github.com/blackberry/Boost/blob/fc90c3fde129c62565c023f091eddc4a7ed9902b/tools/build/v2/build/build_request.py#L16-L26
pyne/pyne
0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3
pyne/dbgen/ndsfpy.py
python
parse_num
(dst)
return base * 10 ** float(exp)
Converts html numbers with exponents to floats
Converts html numbers with exponents to floats
[ "Converts", "html", "numbers", "with", "exponents", "to", "floats" ]
def parse_num(dst): """Converts html numbers with exponents to floats """ nums = dst.split('x') base = float(nums[0]) exp = (nums[1].split('<sup>')[1]).split('</sup>')[0] return base * 10 ** float(exp)
[ "def", "parse_num", "(", "dst", ")", ":", "nums", "=", "dst", ".", "split", "(", "'x'", ")", "base", "=", "float", "(", "nums", "[", "0", "]", ")", "exp", "=", "(", "nums", "[", "1", "]", ".", "split", "(", "'<sup>'", ")", "[", "1", "]", ")", ".", "split", "(", "'</sup>'", ")", "[", "0", "]", "return", "base", "*", "10", "**", "float", "(", "exp", ")" ]
https://github.com/pyne/pyne/blob/0c2714d7c0d1b5e20be6ae6527da2c660dd6b1b3/pyne/dbgen/ndsfpy.py#L98-L104
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/telemetry/telemetry/value/summarizable.py
python
SummarizableValue.GetBuildbotValue
(self)
Returns the buildbot's equivalent value.
Returns the buildbot's equivalent value.
[ "Returns", "the", "buildbot", "s", "equivalent", "value", "." ]
def GetBuildbotValue(self): """Returns the buildbot's equivalent value.""" raise NotImplementedError()
[ "def", "GetBuildbotValue", "(", "self", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/telemetry/telemetry/value/summarizable.py#L48-L50
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_windows.py
python
PrintDialogData.GetNoCopies
(*args, **kwargs)
return _windows_.PrintDialogData_GetNoCopies(*args, **kwargs)
GetNoCopies(self) -> int
GetNoCopies(self) -> int
[ "GetNoCopies", "(", "self", ")", "-", ">", "int" ]
def GetNoCopies(*args, **kwargs): """GetNoCopies(self) -> int""" return _windows_.PrintDialogData_GetNoCopies(*args, **kwargs)
[ "def", "GetNoCopies", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "PrintDialogData_GetNoCopies", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_windows.py#L5058-L5060
linyouhappy/kongkongxiyou
7a69b2913eb29f4be77f9a62fb90cdd72c4160f1
cocosjs/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py
python
TokenKind.register
(value, name)
Register a new TokenKind enumeration. This should only be called at module load time by code within this package.
Register a new TokenKind enumeration.
[ "Register", "a", "new", "TokenKind", "enumeration", "." ]
def register(value, name): """Register a new TokenKind enumeration. This should only be called at module load time by code within this package. """ if value in TokenKind._value_map: raise ValueError('TokenKind already registered: %d' % value) kind = TokenKind(value, name) TokenKind._value_map[value] = kind setattr(TokenKind, name, kind)
[ "def", "register", "(", "value", ",", "name", ")", ":", "if", "value", "in", "TokenKind", ".", "_value_map", ":", "raise", "ValueError", "(", "'TokenKind already registered: %d'", "%", "value", ")", "kind", "=", "TokenKind", "(", "value", ",", "name", ")", "TokenKind", ".", "_value_map", "[", "value", "]", "=", "kind", "setattr", "(", "TokenKind", ",", "name", ",", "kind", ")" ]
https://github.com/linyouhappy/kongkongxiyou/blob/7a69b2913eb29f4be77f9a62fb90cdd72c4160f1/cocosjs/frameworks/cocos2d-x/tools/bindings-generator/clang/cindex.py#L575-L586
larroy/clearskies_core
3574ddf0edc8555454c7044126e786a6c29444dc
tools/gyp/pylib/gyp/MSVSProject.py
python
Writer._AddFilesToNode
(self, parent, files)
Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects.
Adds files and/or filters to the parent node.
[ "Adds", "files", "and", "/", "or", "filters", "to", "the", "parent", "node", "." ]
def _AddFilesToNode(self, parent, files): """Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects. """ for f in files: if isinstance(f, Filter): node = ['Filter', {'Name': f.name}] self._AddFilesToNode(node, f.contents) else: node = ['File', {'RelativePath': f}] self.files_dict[f] = node parent.append(node)
[ "def", "_AddFilesToNode", "(", "self", ",", "parent", ",", "files", ")", ":", "for", "f", "in", "files", ":", "if", "isinstance", "(", "f", ",", "Filter", ")", ":", "node", "=", "[", "'Filter'", ",", "{", "'Name'", ":", "f", ".", "name", "}", "]", "self", ".", "_AddFilesToNode", "(", "node", ",", "f", ".", "contents", ")", "else", ":", "node", "=", "[", "'File'", ",", "{", "'RelativePath'", ":", "f", "}", "]", "self", ".", "files_dict", "[", "f", "]", "=", "node", "parent", ".", "append", "(", "node", ")" ]
https://github.com/larroy/clearskies_core/blob/3574ddf0edc8555454c7044126e786a6c29444dc/tools/gyp/pylib/gyp/MSVSProject.py#L134-L150
ros-planning/moveit2
dd240ef6fd8b9932a7a53964140f2952786187a9
moveit_ros/benchmarks/scripts/moveit_benchmark_statistics.py
python
plotAttribute
(cur, planners, attribute, typename)
Create a plot for a particular attribute. It will include data for all planners that have data for this attribute.
Create a plot for a particular attribute. It will include data for all planners that have data for this attribute.
[ "Create", "a", "plot", "for", "a", "particular", "attribute", ".", "It", "will", "include", "data", "for", "all", "planners", "that", "have", "data", "for", "this", "attribute", "." ]
def plotAttribute(cur, planners, attribute, typename): """Create a plot for a particular attribute. It will include data for all planners that have data for this attribute.""" labels = [] measurements = [] nanCounts = [] if typename == "ENUM": cur.execute('SELECT description FROM enums where name IS "%s"' % attribute) descriptions = [t[0] for t in cur.fetchall()] numValues = len(descriptions) for planner in planners: cur.execute( "SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL" % (attribute, planner[0], attribute) ) measurement = [t[0] for t in cur.fetchall() if t[0] != None] if len(measurement) > 0: cur.execute( "SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL" % (planner[0], attribute) ) nanCounts.append(cur.fetchone()[0]) labels.append(planner[1]) if typename == "ENUM": scale = 100.0 / len(measurement) measurements.append( [measurement.count(i) * scale for i in range(numValues)] ) else: measurements.append(measurement) if len(measurements) == 0: print('Skipping "%s": no available measurements' % attribute) return plt.clf() ax = plt.gca() if typename == "ENUM": width = 0.5 measurements = np.transpose(np.vstack(measurements)) colsum = np.sum(measurements, axis=1) rows = np.where(colsum != 0)[0] heights = np.zeros((1, measurements.shape[1])) ind = range(measurements.shape[1]) legend_labels = [] for i in rows: plt.bar( ind, measurements[i], width, bottom=heights[0], color=matplotlib.cm.hot(int(floor(i * 256 / numValues))), label=descriptions[i], ) heights = heights + measurements[i] xtickNames = plt.xticks( [x + width / 2.0 for x in ind], labels, rotation=30, fontsize=8, ha="right" ) ax.set_ylabel(attribute.replace("_", " ") + " (%)") box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) props = matplotlib.font_manager.FontProperties() props.set_size("small") ax.legend(loc="center left", bbox_to_anchor=(1, 0.5), prop=props) elif typename == "BOOLEAN": width = 0.5 measurementsPercentage = [sum(m) * 100.0 / len(m) for m in measurements] ind = range(len(measurements)) plt.bar(ind, measurementsPercentage, width) ### uncommenting this line will remove the term 'kConfigDefault' from the labels for OMPL Solvers. ### Fits situations where you need more control in the plot, such as in an academic publication for example # labels = [l.replace('kConfigDefault', '') for l in labels] xtickNames = plt.xticks( [x + width / 2.0 for x in ind], labels, rotation=30, fontsize=8, ha="right" ) ax.set_ylabel(attribute.replace("_", " ") + " (%)") plt.subplots_adjust( bottom=0.3 ) # Squish the plot into the upper 2/3 of the page. Leave room for labels else: if int(matplotlibversion.split(".")[0]) < 1: plt.boxplot(measurements, notch=0, sym="k+", vert=1, whis=1.5) else: plt.boxplot( measurements, notch=0, sym="k+", vert=1, whis=1.5, bootstrap=1000 ) ax.set_ylabel(attribute.replace("_", " ")) # xtickNames = plt.xticks(labels, rotation=30, fontsize=10) # plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels ### uncommenting this line will remove the term 'kConfigDefault' from the labels for OMPL Solvers. ### Fits situations where you need more control in the plot, such as in an academic publication for example # labels = [l.replace('kConfigDefault', '') for l in labels] xtickNames = plt.setp(ax, xticklabels=labels) plt.setp(xtickNames, rotation=30, fontsize=8, ha="right") for ( tick ) in ax.xaxis.get_major_ticks(): # shrink the font size of the x tick labels tick.label.set_fontsize(8) plt.subplots_adjust( bottom=0.3 ) # Squish the plot into the upper 2/3 of the page. Leave room for labels ax.set_xlabel("Motion planning algorithm", fontsize=12) ax.yaxis.grid(True, linestyle="-", which="major", color="lightgrey", alpha=0.5) if max(nanCounts) > 0: maxy = max([max(y) for y in measurements]) for i in range(len(labels)): x = i + width / 2 if typename == "BOOLEAN" else i + 1 ### uncommenting the next line, the number of failed planning attempts will be added to each bar # ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small') plt.show()
[ "def", "plotAttribute", "(", "cur", ",", "planners", ",", "attribute", ",", "typename", ")", ":", "labels", "=", "[", "]", "measurements", "=", "[", "]", "nanCounts", "=", "[", "]", "if", "typename", "==", "\"ENUM\"", ":", "cur", ".", "execute", "(", "'SELECT description FROM enums where name IS \"%s\"'", "%", "attribute", ")", "descriptions", "=", "[", "t", "[", "0", "]", "for", "t", "in", "cur", ".", "fetchall", "(", ")", "]", "numValues", "=", "len", "(", "descriptions", ")", "for", "planner", "in", "planners", ":", "cur", ".", "execute", "(", "\"SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL\"", "%", "(", "attribute", ",", "planner", "[", "0", "]", ",", "attribute", ")", ")", "measurement", "=", "[", "t", "[", "0", "]", "for", "t", "in", "cur", ".", "fetchall", "(", ")", "if", "t", "[", "0", "]", "!=", "None", "]", "if", "len", "(", "measurement", ")", ">", "0", ":", "cur", ".", "execute", "(", "\"SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL\"", "%", "(", "planner", "[", "0", "]", ",", "attribute", ")", ")", "nanCounts", ".", "append", "(", "cur", ".", "fetchone", "(", ")", "[", "0", "]", ")", "labels", ".", "append", "(", "planner", "[", "1", "]", ")", "if", "typename", "==", "\"ENUM\"", ":", "scale", "=", "100.0", "/", "len", "(", "measurement", ")", "measurements", ".", "append", "(", "[", "measurement", ".", "count", "(", "i", ")", "*", "scale", "for", "i", "in", "range", "(", "numValues", ")", "]", ")", "else", ":", "measurements", ".", "append", "(", "measurement", ")", "if", "len", "(", "measurements", ")", "==", "0", ":", "print", "(", "'Skipping \"%s\": no available measurements'", "%", "attribute", ")", "return", "plt", ".", "clf", "(", ")", "ax", "=", "plt", ".", "gca", "(", ")", "if", "typename", "==", "\"ENUM\"", ":", "width", "=", "0.5", "measurements", "=", "np", ".", "transpose", "(", "np", ".", "vstack", "(", "measurements", ")", ")", "colsum", "=", "np", ".", "sum", "(", "measurements", ",", "axis", "=", "1", ")", "rows", "=", "np", ".", "where", "(", "colsum", "!=", "0", ")", "[", "0", "]", "heights", "=", "np", ".", "zeros", "(", "(", "1", ",", "measurements", ".", "shape", "[", "1", "]", ")", ")", "ind", "=", "range", "(", "measurements", ".", "shape", "[", "1", "]", ")", "legend_labels", "=", "[", "]", "for", "i", "in", "rows", ":", "plt", ".", "bar", "(", "ind", ",", "measurements", "[", "i", "]", ",", "width", ",", "bottom", "=", "heights", "[", "0", "]", ",", "color", "=", "matplotlib", ".", "cm", ".", "hot", "(", "int", "(", "floor", "(", "i", "*", "256", "/", "numValues", ")", ")", ")", ",", "label", "=", "descriptions", "[", "i", "]", ",", ")", "heights", "=", "heights", "+", "measurements", "[", "i", "]", "xtickNames", "=", "plt", ".", "xticks", "(", "[", "x", "+", "width", "/", "2.0", "for", "x", "in", "ind", "]", ",", "labels", ",", "rotation", "=", "30", ",", "fontsize", "=", "8", ",", "ha", "=", "\"right\"", ")", "ax", ".", "set_ylabel", "(", "attribute", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "+", "\" (%)\"", ")", "box", "=", "ax", ".", "get_position", "(", ")", "ax", ".", "set_position", "(", "[", "box", ".", "x0", ",", "box", ".", "y0", ",", "box", ".", "width", "*", "0.8", ",", "box", ".", "height", "]", ")", "props", "=", "matplotlib", ".", "font_manager", ".", "FontProperties", "(", ")", "props", ".", "set_size", "(", "\"small\"", ")", "ax", ".", "legend", "(", "loc", "=", "\"center left\"", ",", "bbox_to_anchor", "=", "(", "1", ",", "0.5", ")", ",", "prop", "=", "props", ")", "elif", "typename", "==", "\"BOOLEAN\"", ":", "width", "=", "0.5", "measurementsPercentage", "=", "[", "sum", "(", "m", ")", "*", "100.0", "/", "len", "(", "m", ")", "for", "m", "in", "measurements", "]", "ind", "=", "range", "(", "len", "(", "measurements", ")", ")", "plt", ".", "bar", "(", "ind", ",", "measurementsPercentage", ",", "width", ")", "### uncommenting this line will remove the term 'kConfigDefault' from the labels for OMPL Solvers.", "### Fits situations where you need more control in the plot, such as in an academic publication for example", "# labels = [l.replace('kConfigDefault', '') for l in labels]", "xtickNames", "=", "plt", ".", "xticks", "(", "[", "x", "+", "width", "/", "2.0", "for", "x", "in", "ind", "]", ",", "labels", ",", "rotation", "=", "30", ",", "fontsize", "=", "8", ",", "ha", "=", "\"right\"", ")", "ax", ".", "set_ylabel", "(", "attribute", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", "+", "\" (%)\"", ")", "plt", ".", "subplots_adjust", "(", "bottom", "=", "0.3", ")", "# Squish the plot into the upper 2/3 of the page. Leave room for labels", "else", ":", "if", "int", "(", "matplotlibversion", ".", "split", "(", "\".\"", ")", "[", "0", "]", ")", "<", "1", ":", "plt", ".", "boxplot", "(", "measurements", ",", "notch", "=", "0", ",", "sym", "=", "\"k+\"", ",", "vert", "=", "1", ",", "whis", "=", "1.5", ")", "else", ":", "plt", ".", "boxplot", "(", "measurements", ",", "notch", "=", "0", ",", "sym", "=", "\"k+\"", ",", "vert", "=", "1", ",", "whis", "=", "1.5", ",", "bootstrap", "=", "1000", ")", "ax", ".", "set_ylabel", "(", "attribute", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", ")", "# xtickNames = plt.xticks(labels, rotation=30, fontsize=10)", "# plt.subplots_adjust(bottom=0.3) # Squish the plot into the upper 2/3 of the page. Leave room for labels", "### uncommenting this line will remove the term 'kConfigDefault' from the labels for OMPL Solvers.", "### Fits situations where you need more control in the plot, such as in an academic publication for example", "# labels = [l.replace('kConfigDefault', '') for l in labels]", "xtickNames", "=", "plt", ".", "setp", "(", "ax", ",", "xticklabels", "=", "labels", ")", "plt", ".", "setp", "(", "xtickNames", ",", "rotation", "=", "30", ",", "fontsize", "=", "8", ",", "ha", "=", "\"right\"", ")", "for", "(", "tick", ")", "in", "ax", ".", "xaxis", ".", "get_major_ticks", "(", ")", ":", "# shrink the font size of the x tick labels", "tick", ".", "label", ".", "set_fontsize", "(", "8", ")", "plt", ".", "subplots_adjust", "(", "bottom", "=", "0.3", ")", "# Squish the plot into the upper 2/3 of the page. Leave room for labels", "ax", ".", "set_xlabel", "(", "\"Motion planning algorithm\"", ",", "fontsize", "=", "12", ")", "ax", ".", "yaxis", ".", "grid", "(", "True", ",", "linestyle", "=", "\"-\"", ",", "which", "=", "\"major\"", ",", "color", "=", "\"lightgrey\"", ",", "alpha", "=", "0.5", ")", "if", "max", "(", "nanCounts", ")", ">", "0", ":", "maxy", "=", "max", "(", "[", "max", "(", "y", ")", "for", "y", "in", "measurements", "]", ")", "for", "i", "in", "range", "(", "len", "(", "labels", ")", ")", ":", "x", "=", "i", "+", "width", "/", "2", "if", "typename", "==", "\"BOOLEAN\"", "else", "i", "+", "1", "### uncommenting the next line, the number of failed planning attempts will be added to each bar", "# ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')", "plt", ".", "show", "(", ")" ]
https://github.com/ros-planning/moveit2/blob/dd240ef6fd8b9932a7a53964140f2952786187a9/moveit_ros/benchmarks/scripts/moveit_benchmark_statistics.py#L392-L505
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/shutil.py
python
get_archive_formats
()
return formats
Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description)
Returns a list of supported formats for archiving and unarchiving.
[ "Returns", "a", "list", "of", "supported", "formats", "for", "archiving", "and", "unarchiving", "." ]
def get_archive_formats(): """Returns a list of supported formats for archiving and unarchiving. Each element of the returned sequence is a tuple (name, description) """ formats = [(name, registry[2]) for name, registry in _ARCHIVE_FORMATS.items()] formats.sort() return formats
[ "def", "get_archive_formats", "(", ")", ":", "formats", "=", "[", "(", "name", ",", "registry", "[", "2", "]", ")", "for", "name", ",", "registry", "in", "_ARCHIVE_FORMATS", ".", "items", "(", ")", "]", "formats", ".", "sort", "(", ")", "return", "formats" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/shutil.py#L470-L478
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/dataview.py
python
DataViewModel.IsContainer
(*args, **kwargs)
return _dataview.DataViewModel_IsContainer(*args, **kwargs)
IsContainer(self, DataViewItem item) -> bool Override this to indicate whether an item is a container, in other words, if it is a parent item that can have children.
IsContainer(self, DataViewItem item) -> bool
[ "IsContainer", "(", "self", "DataViewItem", "item", ")", "-", ">", "bool" ]
def IsContainer(*args, **kwargs): """ IsContainer(self, DataViewItem item) -> bool Override this to indicate whether an item is a container, in other words, if it is a parent item that can have children. """ return _dataview.DataViewModel_IsContainer(*args, **kwargs)
[ "def", "IsContainer", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_dataview", ".", "DataViewModel_IsContainer", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/dataview.py#L524-L531
hpi-xnor/BMXNet-v2
af2b1859eafc5c721b1397cef02f946aaf2ce20d
python/mxnet/gluon/utils.py
python
shape_is_known
(shape)
return True
Check whether a shape is completely known with or without np semantics. Please see the doc of is_np_shape for more details.
Check whether a shape is completely known with or without np semantics.
[ "Check", "whether", "a", "shape", "is", "completely", "known", "with", "or", "without", "np", "semantics", "." ]
def shape_is_known(shape): """Check whether a shape is completely known with or without np semantics. Please see the doc of is_np_shape for more details. """ if shape is None: return False unknown_dim_size = -1 if is_np_shape() else 0 if len(shape) == 0: return unknown_dim_size == -1 for dim_size in shape: if dim_size == unknown_dim_size: return False assert dim_size > unknown_dim_size, "shape dimension size cannot be less than {}, while " \ "received {}".format(unknown_dim_size, dim_size) return True
[ "def", "shape_is_known", "(", "shape", ")", ":", "if", "shape", "is", "None", ":", "return", "False", "unknown_dim_size", "=", "-", "1", "if", "is_np_shape", "(", ")", "else", "0", "if", "len", "(", "shape", ")", "==", "0", ":", "return", "unknown_dim_size", "==", "-", "1", "for", "dim_size", "in", "shape", ":", "if", "dim_size", "==", "unknown_dim_size", ":", "return", "False", "assert", "dim_size", ">", "unknown_dim_size", ",", "\"shape dimension size cannot be less than {}, while \"", "\"received {}\"", ".", "format", "(", "unknown_dim_size", ",", "dim_size", ")", "return", "True" ]
https://github.com/hpi-xnor/BMXNet-v2/blob/af2b1859eafc5c721b1397cef02f946aaf2ce20d/python/mxnet/gluon/utils.py#L417-L432
polyworld/polyworld
eb7e6bbc82fe77ba79e3bc48c3da2ad8c8238c26
scripts/agent/agent.py
python
Agent.func
(self)
return self._get_func()
Lazy loading of brain function file
Lazy loading of brain function file
[ "Lazy", "loading", "of", "brain", "function", "file" ]
def func(self): ''' Lazy loading of brain function file ''' return self._get_func()
[ "def", "func", "(", "self", ")", ":", "return", "self", ".", "_get_func", "(", ")" ]
https://github.com/polyworld/polyworld/blob/eb7e6bbc82fe77ba79e3bc48c3da2ad8c8238c26/scripts/agent/agent.py#L185-L187
DLR-SC/tigl
d1c5901e948e33d10b1f9659ff3e22c4717b455f
thirdparty/doxy2swig/doxy2swig.py
python
Doxy2SWIG.get_specific_nodes
(self, node, names)
return dict(nodes)
Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name.
Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name.
[ "Given", "a", "node", "and", "a", "sequence", "of", "strings", "in", "names", "return", "a", "dictionary", "containing", "the", "names", "as", "keys", "and", "child", "ELEMENT_NODEs", "that", "have", "a", "tagName", "equal", "to", "the", "name", "." ]
def get_specific_nodes(self, node, names): """Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name. """ nodes = [(x.tagName, x) for x in node.childNodes if x.nodeType == x.ELEMENT_NODE and x.tagName in names] return dict(nodes)
[ "def", "get_specific_nodes", "(", "self", ",", "node", ",", "names", ")", ":", "nodes", "=", "[", "(", "x", ".", "tagName", ",", "x", ")", "for", "x", "in", "node", ".", "childNodes", "if", "x", ".", "nodeType", "==", "x", ".", "ELEMENT_NODE", "and", "x", ".", "tagName", "in", "names", "]", "return", "dict", "(", "nodes", ")" ]
https://github.com/DLR-SC/tigl/blob/d1c5901e948e33d10b1f9659ff3e22c4717b455f/thirdparty/doxy2swig/doxy2swig.py#L275-L284
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/indexes/multi.py
python
MultiIndex.from_tuples
(cls, tuples, sortorder=None, names=None)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color'])
Convert list of tuples to MultiIndex.
[ "Convert", "list", "of", "tuples", "to", "MultiIndex", "." ]
def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex([(1, 'red'), (1, 'blue'), (2, 'red'), (2, 'blue')], names=['number', 'color']) """ if not is_list_like(tuples): raise TypeError("Input must be a list / sequence of tuple-likes.") elif is_iterator(tuples): tuples = list(tuples) if len(tuples) == 0: if names is None: raise TypeError("Cannot infer number of levels from empty list") arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = zip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
[ "def", "from_tuples", "(", "cls", ",", "tuples", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "if", "not", "is_list_like", "(", "tuples", ")", ":", "raise", "TypeError", "(", "\"Input must be a list / sequence of tuple-likes.\"", ")", "elif", "is_iterator", "(", "tuples", ")", ":", "tuples", "=", "list", "(", "tuples", ")", "if", "len", "(", "tuples", ")", "==", "0", ":", "if", "names", "is", "None", ":", "raise", "TypeError", "(", "\"Cannot infer number of levels from empty list\"", ")", "arrays", "=", "[", "[", "]", "]", "*", "len", "(", "names", ")", "elif", "isinstance", "(", "tuples", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "if", "isinstance", "(", "tuples", ",", "Index", ")", ":", "tuples", "=", "tuples", ".", "_values", "arrays", "=", "list", "(", "lib", ".", "tuples_to_object_array", "(", "tuples", ")", ".", "T", ")", "elif", "isinstance", "(", "tuples", ",", "list", ")", ":", "arrays", "=", "list", "(", "lib", ".", "to_object_array_tuples", "(", "tuples", ")", ".", "T", ")", "else", ":", "arrays", "=", "zip", "(", "*", "tuples", ")", "return", "MultiIndex", ".", "from_arrays", "(", "arrays", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/indexes/multi.py#L440-L495
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/package_index.py
python
unique_everseen
(iterable, key=None)
List unique elements, preserving order. Remember all elements ever seen.
List unique elements, preserving order. Remember all elements ever seen.
[ "List", "unique", "elements", "preserving", "order", ".", "Remember", "all", "elements", "ever", "seen", "." ]
def unique_everseen(iterable, key=None): "List unique elements, preserving order. Remember all elements ever seen." # unique_everseen('AAAABBBCCDAABBB') --> A B C D # unique_everseen('ABBCcAD', str.lower) --> A B C D seen = set() seen_add = seen.add if key is None: for element in six.moves.filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element
[ "def", "unique_everseen", "(", "iterable", ",", "key", "=", "None", ")", ":", "# unique_everseen('AAAABBBCCDAABBB') --> A B C D", "# unique_everseen('ABBCcAD', str.lower) --> A B C D", "seen", "=", "set", "(", ")", "seen_add", "=", "seen", ".", "add", "if", "key", "is", "None", ":", "for", "element", "in", "six", ".", "moves", ".", "filterfalse", "(", "seen", ".", "__contains__", ",", "iterable", ")", ":", "seen_add", "(", "element", ")", "yield", "element", "else", ":", "for", "element", "in", "iterable", ":", "k", "=", "key", "(", "element", ")", "if", "k", "not", "in", "seen", ":", "seen_add", "(", "k", ")", "yield", "element" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/package_index.py#L187-L202
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/indexes/multi.py
python
MultiIndex._format_attrs
(self)
return format_object_attrs(self, include_dtype=False)
Return a list of tuples of the (attr,formatted_value).
Return a list of tuples of the (attr,formatted_value).
[ "Return", "a", "list", "of", "tuples", "of", "the", "(", "attr", "formatted_value", ")", "." ]
def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value). """ return format_object_attrs(self, include_dtype=False)
[ "def", "_format_attrs", "(", "self", ")", ":", "return", "format_object_attrs", "(", "self", ",", "include_dtype", "=", "False", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/indexes/multi.py#L1063-L1067
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/boringssl/src/util/bot/go/bootstrap.py
python
read_file
(path)
Returns contents of a given file or None if not readable.
Returns contents of a given file or None if not readable.
[ "Returns", "contents", "of", "a", "given", "file", "or", "None", "if", "not", "readable", "." ]
def read_file(path): """Returns contents of a given file or None if not readable.""" assert isinstance(path, (list, tuple)) try: with open(os.path.join(*path), 'r') as f: return f.read() except IOError: return None
[ "def", "read_file", "(", "path", ")", ":", "assert", "isinstance", "(", "path", ",", "(", "list", ",", "tuple", ")", ")", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "*", "path", ")", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "IOError", ":", "return", "None" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/boringssl/src/util/bot/go/bootstrap.py#L83-L90
nsnam/ns-3-dev-git
efdb2e21f45c0a87a60b47c547b68fa140a7b686
waf-tools/misc.py
python
subst_func
(tsk)
Substitutes variables in a .in file
Substitutes variables in a .in file
[ "Substitutes", "variables", "in", "a", ".", "in", "file" ]
def subst_func(tsk): "Substitutes variables in a .in file" m4_re = re.compile('@(\w+)@', re.M) code = tsk.inputs[0].read() #Utils.readf(infile) # replace all % by %% to prevent errors by % signs in the input file while string formatting code = code.replace('%', '%%') s = m4_re.sub(r'%(\1)s', code) env = tsk.env di = getattr(tsk, 'dict', {}) or getattr(tsk.generator, 'dict', {}) if not di: names = m4_re.findall(code) for i in names: di[i] = env.get_flat(i) or env.get_flat(i.upper()) tsk.outputs[0].write(s % di)
[ "def", "subst_func", "(", "tsk", ")", ":", "m4_re", "=", "re", ".", "compile", "(", "'@(\\w+)@'", ",", "re", ".", "M", ")", "code", "=", "tsk", ".", "inputs", "[", "0", "]", ".", "read", "(", ")", "#Utils.readf(infile)", "# replace all % by %% to prevent errors by % signs in the input file while string formatting", "code", "=", "code", ".", "replace", "(", "'%'", ",", "'%%'", ")", "s", "=", "m4_re", ".", "sub", "(", "r'%(\\1)s'", ",", "code", ")", "env", "=", "tsk", ".", "env", "di", "=", "getattr", "(", "tsk", ",", "'dict'", ",", "{", "}", ")", "or", "getattr", "(", "tsk", ".", "generator", ",", "'dict'", ",", "{", "}", ")", "if", "not", "di", ":", "names", "=", "m4_re", ".", "findall", "(", "code", ")", "for", "i", "in", "names", ":", "di", "[", "i", "]", "=", "env", ".", "get_flat", "(", "i", ")", "or", "env", ".", "get_flat", "(", "i", ".", "upper", "(", ")", ")", "tsk", ".", "outputs", "[", "0", "]", ".", "write", "(", "s", "%", "di", ")" ]
https://github.com/nsnam/ns-3-dev-git/blob/efdb2e21f45c0a87a60b47c547b68fa140a7b686/waf-tools/misc.py#L83-L102
KratosMultiphysics/Kratos
0000833054ed0503424eb28205d6508d9ca6cbbc
kratos/python_scripts/python_solver.py
python
PythonSolver.PrepareModelPart
(self)
This function prepares the ModelPart for being used by the PythonSolver
This function prepares the ModelPart for being used by the PythonSolver
[ "This", "function", "prepares", "the", "ModelPart", "for", "being", "used", "by", "the", "PythonSolver" ]
def PrepareModelPart(self): """This function prepares the ModelPart for being used by the PythonSolver """ pass
[ "def", "PrepareModelPart", "(", "self", ")", ":", "pass" ]
https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/kratos/python_scripts/python_solver.py#L76-L79
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/_grad/grad_array_ops.py
python
get_bprop_reshape
(self)
return bprop
Generate bprop for Reshape
Generate bprop for Reshape
[ "Generate", "bprop", "for", "Reshape" ]
def get_bprop_reshape(self): """Generate bprop for Reshape""" def bprop(x, shp, out, dout): shapex = shape_op(x) if -1 in shapex: shapex = dyn_shape_op(x) return reshape(dout, shapex), zeros_like(shp) return bprop
[ "def", "get_bprop_reshape", "(", "self", ")", ":", "def", "bprop", "(", "x", ",", "shp", ",", "out", ",", "dout", ")", ":", "shapex", "=", "shape_op", "(", "x", ")", "if", "-", "1", "in", "shapex", ":", "shapex", "=", "dyn_shape_op", "(", "x", ")", "return", "reshape", "(", "dout", ",", "shapex", ")", ",", "zeros_like", "(", "shp", ")", "return", "bprop" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_grad/grad_array_ops.py#L180-L189
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/python_gflags/gflags.py
python
_GetMainModule
()
return main_module_name
Returns: string, name of the module from which execution started.
Returns: string, name of the module from which execution started.
[ "Returns", ":", "string", "name", "of", "the", "module", "from", "which", "execution", "started", "." ]
def _GetMainModule(): """Returns: string, name of the module from which execution started.""" # First, try to use the same logic used by _GetCallingModuleObjectAndName(), # i.e., call _GetModuleObjectAndName(). For that we first need to # find the dictionary that the main module uses to store the # globals. # # That's (normally) the same dictionary object that the deepest # (oldest) stack frame is using for globals. deepest_frame = sys._getframe(0) while deepest_frame.f_back is not None: deepest_frame = deepest_frame.f_back globals_for_main_module = deepest_frame.f_globals main_module_name = _GetModuleObjectAndName(globals_for_main_module)[1] # The above strategy fails in some cases (e.g., tools that compute # code coverage by redefining, among other things, the main module). # If so, just use sys.argv[0]. We can probably always do this, but # it's safest to try to use the same logic as _GetCallingModuleObjectAndName() if main_module_name is None: main_module_name = sys.argv[0] return main_module_name
[ "def", "_GetMainModule", "(", ")", ":", "# First, try to use the same logic used by _GetCallingModuleObjectAndName(),", "# i.e., call _GetModuleObjectAndName(). For that we first need to", "# find the dictionary that the main module uses to store the", "# globals.", "#", "# That's (normally) the same dictionary object that the deepest", "# (oldest) stack frame is using for globals.", "deepest_frame", "=", "sys", ".", "_getframe", "(", "0", ")", "while", "deepest_frame", ".", "f_back", "is", "not", "None", ":", "deepest_frame", "=", "deepest_frame", ".", "f_back", "globals_for_main_module", "=", "deepest_frame", ".", "f_globals", "main_module_name", "=", "_GetModuleObjectAndName", "(", "globals_for_main_module", ")", "[", "1", "]", "# The above strategy fails in some cases (e.g., tools that compute", "# code coverage by redefining, among other things, the main module).", "# If so, just use sys.argv[0]. We can probably always do this, but", "# it's safest to try to use the same logic as _GetCallingModuleObjectAndName()", "if", "main_module_name", "is", "None", ":", "main_module_name", "=", "sys", ".", "argv", "[", "0", "]", "return", "main_module_name" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/python_gflags/gflags.py#L757-L777
ablab/quast
5f6709528129a6ad266a6b24ef3f40b88f0fe04b
quast_libs/site_packages/joblib2/numpy_pickle.py
python
dump
(value, filename, compress=0, cache_size=100)
return pickler._filenames
Fast persistence of an arbitrary Python object into a files, with dedicated storage for numpy arrays. Parameters ----------- value: any Python object The object to store to disk filename: string The name of the file in which it is to be stored compress: integer for 0 to 9, optional Optional compression level for the data. 0 is no compression. Higher means more compression, but also slower read and write times. Using a value of 3 is often a good compromise. See the notes for more details. cache_size: positive number, optional Fixes the order of magnitude (in megabytes) of the cache used for in-memory compression. Note that this is just an order of magnitude estimate and that for big arrays, the code will go over this value at dump and at load time. Returns ------- filenames: list of strings The list of file names in which the data is stored. If compress is false, each array is stored in a different file. See Also -------- joblib.load : corresponding loader Notes ----- Memmapping on load cannot be used for compressed files. Thus using compression can significantly slow down loading. In addition, compressed files take extra extra memory during dump and load.
Fast persistence of an arbitrary Python object into a files, with dedicated storage for numpy arrays.
[ "Fast", "persistence", "of", "an", "arbitrary", "Python", "object", "into", "a", "files", "with", "dedicated", "storage", "for", "numpy", "arrays", "." ]
def dump(value, filename, compress=0, cache_size=100): """Fast persistence of an arbitrary Python object into a files, with dedicated storage for numpy arrays. Parameters ----------- value: any Python object The object to store to disk filename: string The name of the file in which it is to be stored compress: integer for 0 to 9, optional Optional compression level for the data. 0 is no compression. Higher means more compression, but also slower read and write times. Using a value of 3 is often a good compromise. See the notes for more details. cache_size: positive number, optional Fixes the order of magnitude (in megabytes) of the cache used for in-memory compression. Note that this is just an order of magnitude estimate and that for big arrays, the code will go over this value at dump and at load time. Returns ------- filenames: list of strings The list of file names in which the data is stored. If compress is false, each array is stored in a different file. See Also -------- joblib.load : corresponding loader Notes ----- Memmapping on load cannot be used for compressed files. Thus using compression can significantly slow down loading. In addition, compressed files take extra extra memory during dump and load. """ if not isinstance(filename, basestring): # People keep inverting arguments, and the resulting error is # incomprehensible raise ValueError( 'Second argument should be a filename, %s (type %s) was given' % (filename, type(filename)) ) try: pickler = NumpyPickler(filename, compress=compress, cache_size=cache_size) pickler.dump(value) pickler.close() finally: if 'pickler' in locals() and hasattr(pickler, 'file'): pickler.file.flush() pickler.file.close() return pickler._filenames
[ "def", "dump", "(", "value", ",", "filename", ",", "compress", "=", "0", ",", "cache_size", "=", "100", ")", ":", "if", "not", "isinstance", "(", "filename", ",", "basestring", ")", ":", "# People keep inverting arguments, and the resulting error is", "# incomprehensible", "raise", "ValueError", "(", "'Second argument should be a filename, %s (type %s) was given'", "%", "(", "filename", ",", "type", "(", "filename", ")", ")", ")", "try", ":", "pickler", "=", "NumpyPickler", "(", "filename", ",", "compress", "=", "compress", ",", "cache_size", "=", "cache_size", ")", "pickler", ".", "dump", "(", "value", ")", "pickler", ".", "close", "(", ")", "finally", ":", "if", "'pickler'", "in", "locals", "(", ")", "and", "hasattr", "(", "pickler", ",", "'file'", ")", ":", "pickler", ".", "file", ".", "flush", "(", ")", "pickler", ".", "file", ".", "close", "(", ")", "return", "pickler", ".", "_filenames" ]
https://github.com/ablab/quast/blob/5f6709528129a6ad266a6b24ef3f40b88f0fe04b/quast_libs/site_packages/joblib2/numpy_pickle.py#L313-L367
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/autograph/pyct/templates.py
python
ReplaceTransformer._prepare_replacement
(self, replaced, key)
return new_nodes
Prepares a replacement AST that's safe to swap in for a node. Args: replaced: ast.AST, the node being replaced key: Hashable, the key of the replacement AST Returns: ast.AST, the replacement AST
Prepares a replacement AST that's safe to swap in for a node.
[ "Prepares", "a", "replacement", "AST", "that", "s", "safe", "to", "swap", "in", "for", "a", "node", "." ]
def _prepare_replacement(self, replaced, key): """Prepares a replacement AST that's safe to swap in for a node. Args: replaced: ast.AST, the node being replaced key: Hashable, the key of the replacement AST Returns: ast.AST, the replacement AST """ repl = self.replacements[key] new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos) if isinstance(new_nodes, gast.AST): new_nodes = [new_nodes] return new_nodes
[ "def", "_prepare_replacement", "(", "self", ",", "replaced", ",", "key", ")", ":", "repl", "=", "self", ".", "replacements", "[", "key", "]", "new_nodes", "=", "ast_util", ".", "copy_clean", "(", "repl", ",", "preserve_annos", "=", "self", ".", "preserved_annos", ")", "if", "isinstance", "(", "new_nodes", ",", "gast", ".", "AST", ")", ":", "new_nodes", "=", "[", "new_nodes", "]", "return", "new_nodes" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/autograph/pyct/templates.py#L125-L140
appcelerator-archive/titanium_desktop
37dbaab5664e595115e2fcdc348ed125cd50b48d
site_scons/simplejson/encoder.py
python
JSONEncoder.iterencode
(self, o, _one_shot=False)
return _iterencode(o, 0)
Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk)
Encode the given object and yield each string representation as available.
[ "Encode", "the", "given", "object", "and", "yield", "each", "string", "representation", "as", "available", "." ]
def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): # Check for specials. Note that this type of test is processor- and/or # platform-specific, so do tests which don't depend on the internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o)) return text if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys: _iterencode = c_make_encoder( markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan) else: _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot) return _iterencode(o, 0)
[ "def", "iterencode", "(", "self", ",", "o", ",", "_one_shot", "=", "False", ")", ":", "if", "self", ".", "check_circular", ":", "markers", "=", "{", "}", "else", ":", "markers", "=", "None", "if", "self", ".", "ensure_ascii", ":", "_encoder", "=", "encode_basestring_ascii", "else", ":", "_encoder", "=", "encode_basestring", "if", "self", ".", "encoding", "!=", "'utf-8'", ":", "def", "_encoder", "(", "o", ",", "_orig_encoder", "=", "_encoder", ",", "_encoding", "=", "self", ".", "encoding", ")", ":", "if", "isinstance", "(", "o", ",", "str", ")", ":", "o", "=", "o", ".", "decode", "(", "_encoding", ")", "return", "_orig_encoder", "(", "o", ")", "def", "floatstr", "(", "o", ",", "allow_nan", "=", "self", ".", "allow_nan", ",", "_repr", "=", "FLOAT_REPR", ",", "_inf", "=", "INFINITY", ",", "_neginf", "=", "-", "INFINITY", ")", ":", "# Check for specials. Note that this type of test is processor- and/or", "# platform-specific, so do tests which don't depend on the internals.", "if", "o", "!=", "o", ":", "text", "=", "'NaN'", "elif", "o", "==", "_inf", ":", "text", "=", "'Infinity'", "elif", "o", "==", "_neginf", ":", "text", "=", "'-Infinity'", "else", ":", "return", "_repr", "(", "o", ")", "if", "not", "allow_nan", ":", "raise", "ValueError", "(", "\"Out of range float values are not JSON compliant: \"", "+", "repr", "(", "o", ")", ")", "return", "text", "if", "_one_shot", "and", "c_make_encoder", "is", "not", "None", "and", "not", "self", ".", "indent", "and", "not", "self", ".", "sort_keys", ":", "_iterencode", "=", "c_make_encoder", "(", "markers", ",", "self", ".", "default", ",", "_encoder", ",", "self", ".", "indent", ",", "self", ".", "key_separator", ",", "self", ".", "item_separator", ",", "self", ".", "sort_keys", ",", "self", ".", "skipkeys", ",", "self", ".", "allow_nan", ")", "else", ":", "_iterencode", "=", "_make_iterencode", "(", "markers", ",", "self", ".", "default", ",", "_encoder", ",", "self", ".", "indent", ",", "floatstr", ",", "self", ".", "key_separator", ",", "self", ".", "item_separator", ",", "self", ".", "sort_keys", ",", "self", ".", "skipkeys", ",", "_one_shot", ")", "return", "_iterencode", "(", "o", ",", "0", ")" ]
https://github.com/appcelerator-archive/titanium_desktop/blob/37dbaab5664e595115e2fcdc348ed125cd50b48d/site_scons/simplejson/encoder.py#L205-L260
apache/kudu
90895ce76590f10730ad7aac3613b69d89ff5422
src/kudu/scripts/parse_metrics_log.py
python
delta
(prev, cur, m)
return cur[m] - prev[m]
Compute the delta in metric 'm' between two metric snapshots.
Compute the delta in metric 'm' between two metric snapshots.
[ "Compute", "the", "delta", "in", "metric", "m", "between", "two", "metric", "snapshots", "." ]
def delta(prev, cur, m): """ Compute the delta in metric 'm' between two metric snapshots. """ if m not in prev or m not in cur: return 0 return cur[m] - prev[m]
[ "def", "delta", "(", "prev", ",", "cur", ",", "m", ")", ":", "if", "m", "not", "in", "prev", "or", "m", "not", "in", "cur", ":", "return", "0", "return", "cur", "[", "m", "]", "-", "prev", "[", "m", "]" ]
https://github.com/apache/kudu/blob/90895ce76590f10730ad7aac3613b69d89ff5422/src/kudu/scripts/parse_metrics_log.py#L117-L122
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
lts/tools/gyp/pylib/gyp/generator/cmake.py
python
SetFilesProperty
(output, variable, property_name, values, sep)
Given a set of source files, sets the given property on them.
Given a set of source files, sets the given property on them.
[ "Given", "a", "set", "of", "source", "files", "sets", "the", "given", "property", "on", "them", "." ]
def SetFilesProperty(output, variable, property_name, values, sep): """Given a set of source files, sets the given property on them.""" output.write('set_source_files_properties(') WriteVariable(output, variable) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n')
[ "def", "SetFilesProperty", "(", "output", ",", "variable", ",", "property_name", ",", "values", ",", "sep", ")", ":", "output", ".", "write", "(", "'set_source_files_properties('", ")", "WriteVariable", "(", "output", ",", "variable", ")", "output", ".", "write", "(", "' PROPERTIES '", ")", "output", ".", "write", "(", "property_name", ")", "output", ".", "write", "(", "' \"'", ")", "for", "value", "in", "values", ":", "output", ".", "write", "(", "CMakeStringEscape", "(", "value", ")", ")", "output", ".", "write", "(", "sep", ")", "output", ".", "write", "(", "'\")\\n'", ")" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/gyp/pylib/gyp/generator/cmake.py#L156-L166
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py2/IPython/utils/_process_posix.py
python
_find_cmd
(cmd)
return py3compat.bytes_to_str(path)
Find the full path to a command using which.
Find the full path to a command using which.
[ "Find", "the", "full", "path", "to", "a", "command", "using", "which", "." ]
def _find_cmd(cmd): """Find the full path to a command using which.""" path = sp.Popen(['/usr/bin/env', 'which', cmd], stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0] return py3compat.bytes_to_str(path)
[ "def", "_find_cmd", "(", "cmd", ")", ":", "path", "=", "sp", ".", "Popen", "(", "[", "'/usr/bin/env'", ",", "'which'", ",", "cmd", "]", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", ".", "communicate", "(", ")", "[", "0", "]", "return", "py3compat", ".", "bytes_to_str", "(", "path", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py2/IPython/utils/_process_posix.py#L35-L40
hpi-xnor/BMXNet
ed0b201da6667887222b8e4b5f997c4f6b61943d
python/mxnet/executor_manager.py
python
_load_general
(data, targets)
Load a list of arrays into a list of arrays specified by slices.
Load a list of arrays into a list of arrays specified by slices.
[ "Load", "a", "list", "of", "arrays", "into", "a", "list", "of", "arrays", "specified", "by", "slices", "." ]
def _load_general(data, targets): """Load a list of arrays into a list of arrays specified by slices.""" for d_src, d_targets in zip(data, targets): if isinstance(d_targets, nd.NDArray): d_src.copyto(d_targets) else: assert d_targets[-1][0].stop == d_src.shape[0], \ "Batch size miss match. Expected %d, got %d"%( \ d_targets[-1][0].stop, d_src.shape[0]) for slice_idx, d_dst in d_targets: d_src[slice_idx].copyto(d_dst)
[ "def", "_load_general", "(", "data", ",", "targets", ")", ":", "for", "d_src", ",", "d_targets", "in", "zip", "(", "data", ",", "targets", ")", ":", "if", "isinstance", "(", "d_targets", ",", "nd", ".", "NDArray", ")", ":", "d_src", ".", "copyto", "(", "d_targets", ")", "else", ":", "assert", "d_targets", "[", "-", "1", "]", "[", "0", "]", ".", "stop", "==", "d_src", ".", "shape", "[", "0", "]", ",", "\"Batch size miss match. Expected %d, got %d\"", "%", "(", "d_targets", "[", "-", "1", "]", "[", "0", "]", ".", "stop", ",", "d_src", ".", "shape", "[", "0", "]", ")", "for", "slice_idx", ",", "d_dst", "in", "d_targets", ":", "d_src", "[", "slice_idx", "]", ".", "copyto", "(", "d_dst", ")" ]
https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/executor_manager.py#L98-L108
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/oauth2client/oauth2client/clientsecrets.py
python
loadfile
(filename, cache=None)
return next(six.iteritems(obj))
Loading of client_secrets JSON file, optionally backed by a cache. Typical cache storage would be App Engine memcache service, but you can pass in any other cache client that implements these methods: * ``get(key, namespace=ns)`` * ``set(key, value, namespace=ns)`` Usage:: # without caching client_type, client_info = loadfile('secrets.json') # using App Engine memcache service from google.appengine.api import memcache client_type, client_info = loadfile('secrets.json', cache=memcache) Args: filename: string, Path to a client_secrets.json file on a filesystem. cache: An optional cache service client that implements get() and set() methods. If not specified, the file is always being loaded from a filesystem. Raises: InvalidClientSecretsError: In case of a validation error or some I/O failure. Can happen only on cache miss. Returns: (client_type, client_info) tuple, as _loadfile() normally would. JSON contents is validated only during first load. Cache hits are not validated.
Loading of client_secrets JSON file, optionally backed by a cache.
[ "Loading", "of", "client_secrets", "JSON", "file", "optionally", "backed", "by", "a", "cache", "." ]
def loadfile(filename, cache=None): """Loading of client_secrets JSON file, optionally backed by a cache. Typical cache storage would be App Engine memcache service, but you can pass in any other cache client that implements these methods: * ``get(key, namespace=ns)`` * ``set(key, value, namespace=ns)`` Usage:: # without caching client_type, client_info = loadfile('secrets.json') # using App Engine memcache service from google.appengine.api import memcache client_type, client_info = loadfile('secrets.json', cache=memcache) Args: filename: string, Path to a client_secrets.json file on a filesystem. cache: An optional cache service client that implements get() and set() methods. If not specified, the file is always being loaded from a filesystem. Raises: InvalidClientSecretsError: In case of a validation error or some I/O failure. Can happen only on cache miss. Returns: (client_type, client_info) tuple, as _loadfile() normally would. JSON contents is validated only during first load. Cache hits are not validated. """ _SECRET_NAMESPACE = 'oauth2client:secrets#ns' if not cache: return _loadfile(filename) obj = cache.get(filename, namespace=_SECRET_NAMESPACE) if obj is None: client_type, client_info = _loadfile(filename) obj = {client_type: client_info} cache.set(filename, obj, namespace=_SECRET_NAMESPACE) return next(six.iteritems(obj))
[ "def", "loadfile", "(", "filename", ",", "cache", "=", "None", ")", ":", "_SECRET_NAMESPACE", "=", "'oauth2client:secrets#ns'", "if", "not", "cache", ":", "return", "_loadfile", "(", "filename", ")", "obj", "=", "cache", ".", "get", "(", "filename", ",", "namespace", "=", "_SECRET_NAMESPACE", ")", "if", "obj", "is", "None", ":", "client_type", ",", "client_info", "=", "_loadfile", "(", "filename", ")", "obj", "=", "{", "client_type", ":", "client_info", "}", "cache", ".", "set", "(", "filename", ",", "obj", ",", "namespace", "=", "_SECRET_NAMESPACE", ")", "return", "next", "(", "six", ".", "iteritems", "(", "obj", ")", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/oauth2client/oauth2client/clientsecrets.py#L119-L163
apache/impala
8ddac48f3428c86f2cbd037ced89cfb903298b12
bin/single_node_perf_run.py
python
backup_workloads
()
return temp_dir
Copy the workload folder to a temporary directory and returns its name. Used to keep workloads from being clobbered by git checkout.
Copy the workload folder to a temporary directory and returns its name.
[ "Copy", "the", "workload", "folder", "to", "a", "temporary", "directory", "and", "returns", "its", "name", "." ]
def backup_workloads(): """Copy the workload folder to a temporary directory and returns its name. Used to keep workloads from being clobbered by git checkout. """ temp_dir = mkdtemp() sh.cp(os.path.join(IMPALA_HOME, "testdata", "workloads"), temp_dir, R=True, _out=sys.stdout, _err=sys.stderr) print "Backed up workloads to {0}".format(temp_dir) return temp_dir
[ "def", "backup_workloads", "(", ")", ":", "temp_dir", "=", "mkdtemp", "(", ")", "sh", ".", "cp", "(", "os", ".", "path", ".", "join", "(", "IMPALA_HOME", ",", "\"testdata\"", ",", "\"workloads\"", ")", ",", "temp_dir", ",", "R", "=", "True", ",", "_out", "=", "sys", ".", "stdout", ",", "_err", "=", "sys", ".", "stderr", ")", "print", "\"Backed up workloads to {0}\"", ".", "format", "(", "temp_dir", ")", "return", "temp_dir" ]
https://github.com/apache/impala/blob/8ddac48f3428c86f2cbd037ced89cfb903298b12/bin/single_node_perf_run.py#L203-L212
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_core.py
python
Validator.GetWindow
(*args, **kwargs)
return _core_.Validator_GetWindow(*args, **kwargs)
GetWindow(self) -> Window
GetWindow(self) -> Window
[ "GetWindow", "(", "self", ")", "-", ">", "Window" ]
def GetWindow(*args, **kwargs): """GetWindow(self) -> Window""" return _core_.Validator_GetWindow(*args, **kwargs)
[ "def", "GetWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Validator_GetWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L11892-L11894
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py
python
NeuralNetworkBuilder.add_concat_nd
(self, name, input_names, output_name, axis)
return spec_layer
Add a concat_nd layer to the model that performs concatenation along the given axis. Refer to the **ConcatNDLayerParams** message in specification (NeuralNetwork.proto) for more details. Parameters ---------- name: str The name of this layer. input_names: list of str The input blob names of this layer. output_name: str The output blob name of this layer. axis: int Axis to perform the concat operation on.
Add a concat_nd layer to the model that performs concatenation along the given axis. Refer to the **ConcatNDLayerParams** message in specification (NeuralNetwork.proto) for more details.
[ "Add", "a", "concat_nd", "layer", "to", "the", "model", "that", "performs", "concatenation", "along", "the", "given", "axis", ".", "Refer", "to", "the", "**", "ConcatNDLayerParams", "**", "message", "in", "specification", "(", "NeuralNetwork", ".", "proto", ")", "for", "more", "details", "." ]
def add_concat_nd(self, name, input_names, output_name, axis): """ Add a concat_nd layer to the model that performs concatenation along the given axis. Refer to the **ConcatNDLayerParams** message in specification (NeuralNetwork.proto) for more details. Parameters ---------- name: str The name of this layer. input_names: list of str The input blob names of this layer. output_name: str The output blob name of this layer. axis: int Axis to perform the concat operation on. """ spec_layer = self._add_generic_layer(name, input_names, [output_name]) spec_layer_params = spec_layer.concatND spec_layer_params.axis = axis return spec_layer
[ "def", "add_concat_nd", "(", "self", ",", "name", ",", "input_names", ",", "output_name", ",", "axis", ")", ":", "spec_layer", "=", "self", ".", "_add_generic_layer", "(", "name", ",", "input_names", ",", "[", "output_name", "]", ")", "spec_layer_params", "=", "spec_layer", ".", "concatND", "spec_layer_params", ".", "axis", "=", "axis", "return", "spec_layer" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/external/coremltools_wrap/coremltools/coremltools/models/neural_network/builder.py#L4608-L4629
apiaryio/snowcrash
b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3
tools/gyp/pylib/gyp/xcode_emulation.py
python
XcodeSettings.GetMachOType
(self)
return { 'executable': 'mh_execute', 'static_library': 'staticlib', 'shared_library': 'mh_dylib', 'loadable_module': 'mh_bundle', }[self.spec['type']]
Returns the MACH_O_TYPE of this target.
Returns the MACH_O_TYPE of this target.
[ "Returns", "the", "MACH_O_TYPE", "of", "this", "target", "." ]
def GetMachOType(self): """Returns the MACH_O_TYPE of this target.""" # Weird, but matches Xcode. if not self._IsBundle() and self.spec['type'] == 'executable': return '' return { 'executable': 'mh_execute', 'static_library': 'staticlib', 'shared_library': 'mh_dylib', 'loadable_module': 'mh_bundle', }[self.spec['type']]
[ "def", "GetMachOType", "(", "self", ")", ":", "# Weird, but matches Xcode.", "if", "not", "self", ".", "_IsBundle", "(", ")", "and", "self", ".", "spec", "[", "'type'", "]", "==", "'executable'", ":", "return", "''", "return", "{", "'executable'", ":", "'mh_execute'", ",", "'static_library'", ":", "'staticlib'", ",", "'shared_library'", ":", "'mh_dylib'", ",", "'loadable_module'", ":", "'mh_bundle'", ",", "}", "[", "self", ".", "spec", "[", "'type'", "]", "]" ]
https://github.com/apiaryio/snowcrash/blob/b5b39faa85f88ee17459edf39fdc6fe4fc70d2e3/tools/gyp/pylib/gyp/xcode_emulation.py#L343-L353
scylladb/seastar
0cdd2329beb1cc4c0af8828598c26114397ffa9c
scripts/perftune.py
python
NetPerfTuner.__get_irqs_one
(self, iface)
return self.__nic2irqs[iface]
Returns the list of IRQ numbers for the given interface.
Returns the list of IRQ numbers for the given interface.
[ "Returns", "the", "list", "of", "IRQ", "numbers", "for", "the", "given", "interface", "." ]
def __get_irqs_one(self, iface): """ Returns the list of IRQ numbers for the given interface. """ return self.__nic2irqs[iface]
[ "def", "__get_irqs_one", "(", "self", ",", "iface", ")", ":", "return", "self", ".", "__nic2irqs", "[", "iface", "]" ]
https://github.com/scylladb/seastar/blob/0cdd2329beb1cc4c0af8828598c26114397ffa9c/scripts/perftune.py#L564-L568
RoboJackets/robocup-software
bce13ce53ddb2ecb9696266d980722c34617dc15
rj_gameplay/stp/role/assignment/__init__.py
python
IRoleAssignment.assign_roles
( flat_requests: FlatRoleRequests, world_state: stp.rc.WorldState, prev_results: FlatRoleResults, )
Assigns roles. :param flat_requests: The role requests. :param world_state: The current state of the world. :param prev_results: The previous results. :return: The results of the role assignment.
Assigns roles. :param flat_requests: The role requests. :param world_state: The current state of the world. :param prev_results: The previous results. :return: The results of the role assignment.
[ "Assigns", "roles", ".", ":", "param", "flat_requests", ":", "The", "role", "requests", ".", ":", "param", "world_state", ":", "The", "current", "state", "of", "the", "world", ".", ":", "param", "prev_results", ":", "The", "previous", "results", ".", ":", "return", ":", "The", "results", "of", "the", "role", "assignment", "." ]
def assign_roles( flat_requests: FlatRoleRequests, world_state: stp.rc.WorldState, prev_results: FlatRoleResults, ) -> FlatRoleResults: """Assigns roles. :param flat_requests: The role requests. :param world_state: The current state of the world. :param prev_results: The previous results. :return: The results of the role assignment. """ ...
[ "def", "assign_roles", "(", "flat_requests", ":", "FlatRoleRequests", ",", "world_state", ":", "stp", ".", "rc", ".", "WorldState", ",", "prev_results", ":", "FlatRoleResults", ",", ")", "->", "FlatRoleResults", ":", "..." ]
https://github.com/RoboJackets/robocup-software/blob/bce13ce53ddb2ecb9696266d980722c34617dc15/rj_gameplay/stp/role/assignment/__init__.py#L23-L34
google/syzygy
8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5
third_party/numpy/files/numpy/f2py/capi_maps.py
python
getctype
(var)
return ctype
Determines C type
Determines C type
[ "Determines", "C", "type" ]
def getctype(var): """ Determines C type """ ctype='void' if isfunction(var): if 'result' in var: a=var['result'] else: a=var['name'] if a in var['vars']: return getctype(var['vars'][a]) else: errmess('getctype: function %s has no return value?!\n'%a) elif issubroutine(var): return ctype elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: typespec = var['typespec'].lower() f2cmap=f2cmap_all[typespec] ctype=f2cmap[''] # default type if 'kindselector' in var: if '*' in var['kindselector']: try: ctype=f2cmap[var['kindselector']['*']] except KeyError: errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'],'*',var['kindselector']['*'])) elif 'kind' in var['kindselector']: if typespec+'kind' in f2cmap_all: f2cmap=f2cmap_all[typespec+'kind'] try: ctype=f2cmap[var['kindselector']['kind']] except KeyError: if typespec in f2cmap_all: f2cmap=f2cmap_all[typespec] try: ctype=f2cmap[str(var['kindselector']['kind'])] except KeyError: errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'\ %(typespec,var['kindselector']['kind'], ctype, typespec,var['kindselector']['kind'], os.getcwd())) else: if not isexternal(var): errmess('getctype: No C-type found in "%s", assuming void.\n'%var) return ctype
[ "def", "getctype", "(", "var", ")", ":", "ctype", "=", "'void'", "if", "isfunction", "(", "var", ")", ":", "if", "'result'", "in", "var", ":", "a", "=", "var", "[", "'result'", "]", "else", ":", "a", "=", "var", "[", "'name'", "]", "if", "a", "in", "var", "[", "'vars'", "]", ":", "return", "getctype", "(", "var", "[", "'vars'", "]", "[", "a", "]", ")", "else", ":", "errmess", "(", "'getctype: function %s has no return value?!\\n'", "%", "a", ")", "elif", "issubroutine", "(", "var", ")", ":", "return", "ctype", "elif", "'typespec'", "in", "var", "and", "var", "[", "'typespec'", "]", ".", "lower", "(", ")", "in", "f2cmap_all", ":", "typespec", "=", "var", "[", "'typespec'", "]", ".", "lower", "(", ")", "f2cmap", "=", "f2cmap_all", "[", "typespec", "]", "ctype", "=", "f2cmap", "[", "''", "]", "# default type", "if", "'kindselector'", "in", "var", ":", "if", "'*'", "in", "var", "[", "'kindselector'", "]", ":", "try", ":", "ctype", "=", "f2cmap", "[", "var", "[", "'kindselector'", "]", "[", "'*'", "]", "]", "except", "KeyError", ":", "errmess", "(", "'getctype: \"%s %s %s\" not supported.\\n'", "%", "(", "var", "[", "'typespec'", "]", ",", "'*'", ",", "var", "[", "'kindselector'", "]", "[", "'*'", "]", ")", ")", "elif", "'kind'", "in", "var", "[", "'kindselector'", "]", ":", "if", "typespec", "+", "'kind'", "in", "f2cmap_all", ":", "f2cmap", "=", "f2cmap_all", "[", "typespec", "+", "'kind'", "]", "try", ":", "ctype", "=", "f2cmap", "[", "var", "[", "'kindselector'", "]", "[", "'kind'", "]", "]", "except", "KeyError", ":", "if", "typespec", "in", "f2cmap_all", ":", "f2cmap", "=", "f2cmap_all", "[", "typespec", "]", "try", ":", "ctype", "=", "f2cmap", "[", "str", "(", "var", "[", "'kindselector'", "]", "[", "'kind'", "]", ")", "]", "except", "KeyError", ":", "errmess", "(", "'getctype: \"%s(kind=%s)\" is mapped to C \"%s\" (to override define dict(%s = dict(%s=\"<C typespec>\")) in %s/.f2py_f2cmap file).\\n'", "%", "(", "typespec", ",", "var", "[", "'kindselector'", "]", "[", "'kind'", "]", ",", "ctype", ",", "typespec", ",", "var", "[", "'kindselector'", "]", "[", "'kind'", "]", ",", "os", ".", "getcwd", "(", ")", ")", ")", "else", ":", "if", "not", "isexternal", "(", "var", ")", ":", "errmess", "(", "'getctype: No C-type found in \"%s\", assuming void.\\n'", "%", "var", ")", "return", "ctype" ]
https://github.com/google/syzygy/blob/8164b24ebde9c5649c9a09e88a7fc0b0fcbd1bc5/third_party/numpy/files/numpy/f2py/capi_maps.py#L217-L261
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py2/numpy/ma/core.py
python
MaskedArray.nonzero
(self)
return narray(self.filled(0), copy=False).nonzero()
Return the indices of unmasked elements that are not zero. Returns a tuple of arrays, one for each dimension, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: a[a.nonzero()] To group the indices by element, rather than dimension, use instead:: np.transpose(a.nonzero()) The result of this is always a 2d array, with a row for each non-zero element. Parameters ---------- None Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- numpy.nonzero : Function operating on ndarrays. flatnonzero : Return indices that are non-zero in the flattened version of the input array. ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x masked_array(data = [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.]], mask = False, fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) Masked elements are ignored. >>> x[1, 1] = ma.masked >>> x masked_array(data = [[1.0 0.0 0.0] [0.0 -- 0.0] [0.0 0.0 1.0]], mask = [[False False False] [False True False] [False False False]], fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) Indices can also be grouped by element. >>> np.transpose(x.nonzero()) array([[0, 0], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, ma.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 masked_array(data = [[False False False] [ True True True] [ True True True]], mask = False, fill_value=999999) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the condition array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Return the indices of unmasked elements that are not zero.
[ "Return", "the", "indices", "of", "unmasked", "elements", "that", "are", "not", "zero", "." ]
def nonzero(self): """ Return the indices of unmasked elements that are not zero. Returns a tuple of arrays, one for each dimension, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: a[a.nonzero()] To group the indices by element, rather than dimension, use instead:: np.transpose(a.nonzero()) The result of this is always a 2d array, with a row for each non-zero element. Parameters ---------- None Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- numpy.nonzero : Function operating on ndarrays. flatnonzero : Return indices that are non-zero in the flattened version of the input array. ndarray.nonzero : Equivalent ndarray method. count_nonzero : Counts the number of non-zero elements in the input array. Examples -------- >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x masked_array(data = [[ 1. 0. 0.] [ 0. 1. 0.] [ 0. 0. 1.]], mask = False, fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) Masked elements are ignored. >>> x[1, 1] = ma.masked >>> x masked_array(data = [[1.0 0.0 0.0] [0.0 -- 0.0] [0.0 0.0 1.0]], mask = [[False False False] [False True False] [False False False]], fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) Indices can also be grouped by element. >>> np.transpose(x.nonzero()) array([[0, 0], [2, 2]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, ma.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 masked_array(data = [[False False False] [ True True True] [ True True True]], mask = False, fill_value=999999) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) The ``nonzero`` method of the condition array can also be called. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) """ return narray(self.filled(0), copy=False).nonzero()
[ "def", "nonzero", "(", "self", ")", ":", "return", "narray", "(", "self", ".", "filled", "(", "0", ")", ",", "copy", "=", "False", ")", ".", "nonzero", "(", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/ma/core.py#L4810-L4909
microsoft/TSS.MSR
0f2516fca2cd9929c31d5450e39301c9bde43688
TSS.Py/src/TpmTypes.py
python
TPM2_Startup_REQUEST.fromBytes
(buffer)
return TpmBuffer(buffer).createObj(TPM2_Startup_REQUEST)
Returns new TPM2_Startup_REQUEST object constructed from its marshaled representation in the given byte buffer
Returns new TPM2_Startup_REQUEST object constructed from its marshaled representation in the given byte buffer
[ "Returns", "new", "TPM2_Startup_REQUEST", "object", "constructed", "from", "its", "marshaled", "representation", "in", "the", "given", "byte", "buffer" ]
def fromBytes(buffer): """ Returns new TPM2_Startup_REQUEST object constructed from its marshaled representation in the given byte buffer """ return TpmBuffer(buffer).createObj(TPM2_Startup_REQUEST)
[ "def", "fromBytes", "(", "buffer", ")", ":", "return", "TpmBuffer", "(", "buffer", ")", ".", "createObj", "(", "TPM2_Startup_REQUEST", ")" ]
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L9082-L9086
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/third_party/oauth2client/oauth2client/locked_file.py
python
LockedFile.unlock_and_close
(self)
Unlock and close a file.
Unlock and close a file.
[ "Unlock", "and", "close", "a", "file", "." ]
def unlock_and_close(self): """Unlock and close a file.""" self._opener.unlock_and_close()
[ "def", "unlock_and_close", "(", "self", ")", ":", "self", ".", "_opener", ".", "unlock_and_close", "(", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/third_party/oauth2client/oauth2client/locked_file.py#L376-L378
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/vis/visualization.py
python
hideLabel
(name : ItemPath, hidden=True)
return scene().hideLabel(name,hidden)
Hides or shows the label of an item in the visualization
Hides or shows the label of an item in the visualization
[ "Hides", "or", "shows", "the", "label", "of", "an", "item", "in", "the", "visualization" ]
def hideLabel(name : ItemPath, hidden=True) -> None: """Hides or shows the label of an item in the visualization""" return scene().hideLabel(name,hidden)
[ "def", "hideLabel", "(", "name", ":", "ItemPath", ",", "hidden", "=", "True", ")", "->", "None", ":", "return", "scene", "(", ")", ".", "hideLabel", "(", "name", ",", "hidden", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/vis/visualization.py#L1283-L1285
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/eager/profiler.py
python
start
()
Start profiling. Raises: ProfilerAlreadyRunningError: If another profiling session is running.
Start profiling.
[ "Start", "profiling", "." ]
def start(): """Start profiling. Raises: ProfilerAlreadyRunningError: If another profiling session is running. """ global _profiler with _profiler_lock: if _profiler is not None: raise ProfilerAlreadyRunningError('Another profiler is running.') if context.default_execution_mode == context.EAGER_MODE: context.ensure_initialized() _profiler = pywrap_tensorflow.TFE_NewProfiler() if not pywrap_tensorflow.TFE_ProfilerIsOk(_profiler): logging.warning('Another profiler session is running which is probably ' 'created by profiler server. Please avoid using profiler ' 'server and profiler APIs at the same time.')
[ "def", "start", "(", ")", ":", "global", "_profiler", "with", "_profiler_lock", ":", "if", "_profiler", "is", "not", "None", ":", "raise", "ProfilerAlreadyRunningError", "(", "'Another profiler is running.'", ")", "if", "context", ".", "default_execution_mode", "==", "context", ".", "EAGER_MODE", ":", "context", ".", "ensure_initialized", "(", ")", "_profiler", "=", "pywrap_tensorflow", ".", "TFE_NewProfiler", "(", ")", "if", "not", "pywrap_tensorflow", ".", "TFE_ProfilerIsOk", "(", "_profiler", ")", ":", "logging", ".", "warning", "(", "'Another profiler session is running which is probably '", "'created by profiler server. Please avoid using profiler '", "'server and profiler APIs at the same time.'", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/eager/profiler.py#L64-L80
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/richtext.py
python
RichTextParagraphLayoutBox.SetStyle
(*args, **kwargs)
return _richtext.RichTextParagraphLayoutBox_SetStyle(*args, **kwargs)
SetStyle(self, RichTextRange range, RichTextAttr style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool
SetStyle(self, RichTextRange range, RichTextAttr style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool
[ "SetStyle", "(", "self", "RichTextRange", "range", "RichTextAttr", "style", "int", "flags", "=", "RICHTEXT_SETSTYLE_WITH_UNDO", ")", "-", ">", "bool" ]
def SetStyle(*args, **kwargs): """SetStyle(self, RichTextRange range, RichTextAttr style, int flags=RICHTEXT_SETSTYLE_WITH_UNDO) -> bool""" return _richtext.RichTextParagraphLayoutBox_SetStyle(*args, **kwargs)
[ "def", "SetStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextParagraphLayoutBox_SetStyle", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L1728-L1730
linyouhappy/kongkongxiyou
7a69b2913eb29f4be77f9a62fb90cdd72c4160f1
cocosjs/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py
python
Cursor.displayname
(self)
return self._displayname
Return the display name for the entity referenced by this cursor. The display name contains extra information that helps identify the cursor, such as the parameters of a function or template or the arguments of a class template specialization.
Return the display name for the entity referenced by this cursor.
[ "Return", "the", "display", "name", "for", "the", "entity", "referenced", "by", "this", "cursor", "." ]
def displayname(self): """ Return the display name for the entity referenced by this cursor. The display name contains extra information that helps identify the cursor, such as the parameters of a function or template or the arguments of a class template specialization. """ if not hasattr(self, '_displayname'): self._displayname = conf.lib.clang_getCursorDisplayName(self) return self._displayname
[ "def", "displayname", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_displayname'", ")", ":", "self", ".", "_displayname", "=", "conf", ".", "lib", ".", "clang_getCursorDisplayName", "(", "self", ")", "return", "self", ".", "_displayname" ]
https://github.com/linyouhappy/kongkongxiyou/blob/7a69b2913eb29f4be77f9a62fb90cdd72c4160f1/cocosjs/frameworks/cocos2d-x/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/cindex.py#L1115-L1126
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/syntax/synxml.py
python
Syntax.GetLangId
(self)
return self.langid
Get the language id @return: string
Get the language id @return: string
[ "Get", "the", "language", "id", "@return", ":", "string" ]
def GetLangId(self): """Get the language id @return: string """ return self.langid
[ "def", "GetLangId", "(", "self", ")", ":", "return", "self", ".", "langid" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/syntax/synxml.py#L575-L580
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/framework/sparse_tensor.py
python
SparseTensor.graph
(self)
return self._indices.graph
The `Graph` that contains the index, value, and dense_shape tensors.
The `Graph` that contains the index, value, and dense_shape tensors.
[ "The", "Graph", "that", "contains", "the", "index", "value", "and", "dense_shape", "tensors", "." ]
def graph(self): """The `Graph` that contains the index, value, and dense_shape tensors.""" return self._indices.graph
[ "def", "graph", "(", "self", ")", ":", "return", "self", ".", "_indices", ".", "graph" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/framework/sparse_tensor.py#L185-L187
nsnam/ns-3-dev-git
efdb2e21f45c0a87a60b47c547b68fa140a7b686
utils/grid.py
python
TimelinesRenderer.draw_events
(self, ctx, events, x, y, width, height)
! Draw Event @param self this object @param ctx ctx @param events events @param x x @param y y @param width width @param height height @return none
! Draw Event
[ "!", "Draw", "Event" ]
def draw_events(self, ctx, events, x, y, width, height): """! Draw Event @param self this object @param ctx ctx @param events events @param x x @param y y @param width width @param height height @return none """ if (self.grey_background % 2) == 0: ctx.rectangle(x, y - self.padding / 2, width, height + self.padding) ctx.set_source_rgb(0.9, 0.9, 0.9) ctx.fill() last_x_drawn = int(x) (lo, hi) = events.get_events_bounds(self.start, self.end) for event in events.events[lo:hi]: real_x = int(x + (event.at - self.start) * width / (self.end - self.start)) if real_x > last_x_drawn + 2: ctx.rectangle(real_x, y, 1, 1) ctx.set_source_rgb(1, 0, 0) ctx.stroke() ctx.move_to(real_x, y + self.max_text_height) ctx.set_source_rgb(0, 0, 0) ctx.show_text(str(event.value)) last_x_drawn = real_x self.grey_background += 1
[ "def", "draw_events", "(", "self", ",", "ctx", ",", "events", ",", "x", ",", "y", ",", "width", ",", "height", ")", ":", "if", "(", "self", ".", "grey_background", "%", "2", ")", "==", "0", ":", "ctx", ".", "rectangle", "(", "x", ",", "y", "-", "self", ".", "padding", "/", "2", ",", "width", ",", "height", "+", "self", ".", "padding", ")", "ctx", ".", "set_source_rgb", "(", "0.9", ",", "0.9", ",", "0.9", ")", "ctx", ".", "fill", "(", ")", "last_x_drawn", "=", "int", "(", "x", ")", "(", "lo", ",", "hi", ")", "=", "events", ".", "get_events_bounds", "(", "self", ".", "start", ",", "self", ".", "end", ")", "for", "event", "in", "events", ".", "events", "[", "lo", ":", "hi", "]", ":", "real_x", "=", "int", "(", "x", "+", "(", "event", ".", "at", "-", "self", ".", "start", ")", "*", "width", "/", "(", "self", ".", "end", "-", "self", ".", "start", ")", ")", "if", "real_x", ">", "last_x_drawn", "+", "2", ":", "ctx", ".", "rectangle", "(", "real_x", ",", "y", ",", "1", ",", "1", ")", "ctx", ".", "set_source_rgb", "(", "1", ",", "0", ",", "0", ")", "ctx", ".", "stroke", "(", ")", "ctx", ".", "move_to", "(", "real_x", ",", "y", "+", "self", ".", "max_text_height", ")", "ctx", ".", "set_source_rgb", "(", "0", ",", "0", ",", "0", ")", "ctx", ".", "show_text", "(", "str", "(", "event", ".", "value", ")", ")", "last_x_drawn", "=", "real_x", "self", ".", "grey_background", "+=", "1" ]
https://github.com/nsnam/ns-3-dev-git/blob/efdb2e21f45c0a87a60b47c547b68fa140a7b686/utils/grid.py#L717-L745
francinexue/xuefu
b6ff79747a42e020588c0c0a921048e08fe4680c
api/ctpx/ctpmd.py
python
CtpMd.onRspError
(self, RspInfoField, requestId, final)
错误应答
错误应答
[ "错误应答" ]
def onRspError(self, RspInfoField, requestId, final): """错误应答""" log = u'行情服务响应错误,错误码:[{0}], 错误信息:[{1}]'.format( RspInfoField.errorID, RspInfoField.errorMsg.decode('gbk')) logger.info(log)
[ "def", "onRspError", "(", "self", ",", "RspInfoField", ",", "requestId", ",", "final", ")", ":", "log", "=", "u'行情服务响应错误,错误码:[{0}], 错误信息:[{1}]'.format(", "", "", "", "RspInfoField", ".", "errorID", ",", "RspInfoField", ".", "errorMsg", ".", "decode", "(", "'gbk'", ")", ")", "logger", ".", "info", "(", "log", ")" ]
https://github.com/francinexue/xuefu/blob/b6ff79747a42e020588c0c0a921048e08fe4680c/api/ctpx/ctpmd.py#L68-L72
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/fft/fftpack.py
python
ihfft
(a, n=None, axis=-1)
return conjugate(rfft(a, n, axis))/n
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry. Parameters ---------- a : array_like Input array. n : int, optional Length of the inverse FFT. axis : int, optional Axis over which to compute the inverse FFT, assuming Hermitian symmetry of the spectrum. Default is the last axis. Returns ------- out : ndarray The transformed input. See also -------- hfft, irfft Notes ----- `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the opposite case: here the signal is real in the frequency domain and has Hermite symmetry in the time domain. So here it's `hfft` for which you must supply the length of the result if it is to be odd: ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
[ "Compute", "the", "inverse", "FFT", "of", "a", "signal", "whose", "spectrum", "has", "Hermitian", "symmetry", "." ]
def ihfft(a, n=None, axis=-1): """ Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry. Parameters ---------- a : array_like Input array. n : int, optional Length of the inverse FFT. axis : int, optional Axis over which to compute the inverse FFT, assuming Hermitian symmetry of the spectrum. Default is the last axis. Returns ------- out : ndarray The transformed input. See also -------- hfft, irfft Notes ----- `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the opposite case: here the signal is real in the frequency domain and has Hermite symmetry in the time domain. So here it's `hfft` for which you must supply the length of the result if it is to be odd: ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. """ a = asarray(a).astype(float) if n is None: n = shape(a)[axis] return conjugate(rfft(a, n, axis))/n
[ "def", "ihfft", "(", "a", ",", "n", "=", "None", ",", "axis", "=", "-", "1", ")", ":", "a", "=", "asarray", "(", "a", ")", ".", "astype", "(", "float", ")", "if", "n", "is", "None", ":", "n", "=", "shape", "(", "a", ")", "[", "axis", "]", "return", "conjugate", "(", "rfft", "(", "a", ",", "n", ",", "axis", ")", ")", "/", "n" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/fft/fftpack.py#L472-L508
eranif/codelite
076eb332d6d2b7ea9a7654afa0461a01d91543aa
Runtime/gdb_printers/libstdcxx/v6/xmethods.py
python
SharedPtrSubscriptWorker._supports
(self, method_name)
return self._is_array
operator[] is only supported for shared_ptr<T[]>
operator[] is only supported for shared_ptr<T[]>
[ "operator", "[]", "is", "only", "supported", "for", "shared_ptr<T", "[]", ">" ]
def _supports(self, method_name): "operator[] is only supported for shared_ptr<T[]>" return self._is_array
[ "def", "_supports", "(", "self", ",", "method_name", ")", ":", "return", "self", ".", "_is_array" ]
https://github.com/eranif/codelite/blob/076eb332d6d2b7ea9a7654afa0461a01d91543aa/Runtime/gdb_printers/libstdcxx/v6/xmethods.py#L706-L708
OpenGenus/cosmos
1a94e8880068e51d571543be179c323936bd0936
code/artificial_intelligence/src/principal_component_analysis/pca.py
python
visualize_data
(data1, data2)
Create a 3D plot for data visualization
Create a 3D plot for data visualization
[ "Create", "a", "3D", "plot", "for", "data", "visualization" ]
def visualize_data(data1, data2): """ Create a 3D plot for data visualization """ fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(111, projection="3d") plt.rcParams["legend.fontsize"] = 10 ax.plot( data1[0, :], data1[1, :], data1[2, :], "o", markersize=8, color="blue", alpha=0.5, label="class1", ) ax.plot( data2[0, :], data2[1, :], data2[2, :], "^", markersize=8, alpha=0.5, color="red", label="class2", ) ax.set_xlabel("x_values") ax.set_ylabel("y_values") ax.set_zlabel("z_values") plt.title("Samples for class 1 and class 2") ax.legend(loc="upper right") plt.show()
[ "def", "visualize_data", "(", "data1", ",", "data2", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "8", ",", "8", ")", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ",", "projection", "=", "\"3d\"", ")", "plt", ".", "rcParams", "[", "\"legend.fontsize\"", "]", "=", "10", "ax", ".", "plot", "(", "data1", "[", "0", ",", ":", "]", ",", "data1", "[", "1", ",", ":", "]", ",", "data1", "[", "2", ",", ":", "]", ",", "\"o\"", ",", "markersize", "=", "8", ",", "color", "=", "\"blue\"", ",", "alpha", "=", "0.5", ",", "label", "=", "\"class1\"", ",", ")", "ax", ".", "plot", "(", "data2", "[", "0", ",", ":", "]", ",", "data2", "[", "1", ",", ":", "]", ",", "data2", "[", "2", ",", ":", "]", ",", "\"^\"", ",", "markersize", "=", "8", ",", "alpha", "=", "0.5", ",", "color", "=", "\"red\"", ",", "label", "=", "\"class2\"", ",", ")", "ax", ".", "set_xlabel", "(", "\"x_values\"", ")", "ax", ".", "set_ylabel", "(", "\"y_values\"", ")", "ax", ".", "set_zlabel", "(", "\"z_values\"", ")", "plt", ".", "title", "(", "\"Samples for class 1 and class 2\"", ")", "ax", ".", "legend", "(", "loc", "=", "\"upper right\"", ")", "plt", ".", "show", "(", ")" ]
https://github.com/OpenGenus/cosmos/blob/1a94e8880068e51d571543be179c323936bd0936/code/artificial_intelligence/src/principal_component_analysis/pca.py#L23-L55
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/distutils/command/sdist.py
python
sdist.make_distribution
(self)
Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'.
Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'.
[ "Create", "the", "source", "distribution", "(", "s", ")", ".", "First", "we", "create", "the", "release", "tree", "with", "make_release_tree", "()", ";", "then", "we", "create", "all", "required", "archive", "files", "(", "according", "to", "self", ".", "formats", ")", "from", "the", "release", "tree", ".", "Finally", "we", "clean", "up", "by", "blowing", "away", "the", "release", "tree", "(", "unless", "self", ".", "keep_temp", "is", "true", ")", ".", "The", "list", "of", "archive", "files", "created", "is", "stored", "so", "it", "can", "be", "retrieved", "later", "by", "get_archive_files", "()", "." ]
def make_distribution(self): """Create the source distribution(s). First, we create the release tree with 'make_release_tree()'; then, we create all required archive files (according to 'self.formats') from the release tree. Finally, we clean up by blowing away the release tree (unless 'self.keep_temp' is true). The list of archive files created is stored so it can be retrieved later by 'get_archive_files()'. """ # Don't warn about missing meta-data here -- should be (and is!) # done elsewhere. base_dir = self.distribution.get_fullname() base_name = os.path.join(self.dist_dir, base_dir) self.make_release_tree(base_dir, self.filelist.files) archive_files = [] # remember names of files we create # tar archive must be created last to avoid overwrite and remove if 'tar' in self.formats: self.formats.append(self.formats.pop(self.formats.index('tar'))) for fmt in self.formats: file = self.make_archive(base_name, fmt, base_dir=base_dir, owner=self.owner, group=self.group) archive_files.append(file) self.distribution.dist_files.append(('sdist', '', file)) self.archive_files = archive_files if not self.keep_temp: dir_util.remove_tree(base_dir, dry_run=self.dry_run)
[ "def", "make_distribution", "(", "self", ")", ":", "# Don't warn about missing meta-data here -- should be (and is!)", "# done elsewhere.", "base_dir", "=", "self", ".", "distribution", ".", "get_fullname", "(", ")", "base_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dist_dir", ",", "base_dir", ")", "self", ".", "make_release_tree", "(", "base_dir", ",", "self", ".", "filelist", ".", "files", ")", "archive_files", "=", "[", "]", "# remember names of files we create", "# tar archive must be created last to avoid overwrite and remove", "if", "'tar'", "in", "self", ".", "formats", ":", "self", ".", "formats", ".", "append", "(", "self", ".", "formats", ".", "pop", "(", "self", ".", "formats", ".", "index", "(", "'tar'", ")", ")", ")", "for", "fmt", "in", "self", ".", "formats", ":", "file", "=", "self", ".", "make_archive", "(", "base_name", ",", "fmt", ",", "base_dir", "=", "base_dir", ",", "owner", "=", "self", ".", "owner", ",", "group", "=", "self", ".", "group", ")", "archive_files", ".", "append", "(", "file", ")", "self", ".", "distribution", ".", "dist_files", ".", "append", "(", "(", "'sdist'", ",", "''", ",", "file", ")", ")", "self", ".", "archive_files", "=", "archive_files", "if", "not", "self", ".", "keep_temp", ":", "dir_util", ".", "remove_tree", "(", "base_dir", ",", "dry_run", "=", "self", ".", "dry_run", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/distutils/command/sdist.py#L443-L471
intel/caffe
3f494b442ee3f9d17a07b09ecbd5fa2bbda00836
examples/faster-rcnn/lib/pycocotools/coco.py
python
COCO.loadCats
(self, ids=[])
Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects
Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects
[ "Load", "cats", "with", "the", "specified", "ids", ".", ":", "param", "ids", "(", "int", "array", ")", ":", "integer", "ids", "specifying", "cats", ":", "return", ":", "cats", "(", "object", "array", ")", ":", "loaded", "cat", "objects" ]
def loadCats(self, ids=[]): """ Load cats with the specified ids. :param ids (int array) : integer ids specifying cats :return: cats (object array) : loaded cat objects """ if type(ids) == list: return [self.cats[id] for id in ids] elif type(ids) == int: return [self.cats[ids]]
[ "def", "loadCats", "(", "self", ",", "ids", "=", "[", "]", ")", ":", "if", "type", "(", "ids", ")", "==", "list", ":", "return", "[", "self", ".", "cats", "[", "id", "]", "for", "id", "in", "ids", "]", "elif", "type", "(", "ids", ")", "==", "int", ":", "return", "[", "self", ".", "cats", "[", "ids", "]", "]" ]
https://github.com/intel/caffe/blob/3f494b442ee3f9d17a07b09ecbd5fa2bbda00836/examples/faster-rcnn/lib/pycocotools/coco.py#L213-L222
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/callwrapper.py
python
_ArgManager.add_arg
(self, obj, ty)
return native.value
Unbox argument and emit code that handles any error during unboxing. Args are cleaned up in reverse order of the parameter list, and cleanup begins as soon as unboxing of any argument fails. E.g. failure on arg2 will result in control flow going through: arg2.err -> arg1.err -> arg0.err -> arg.end (returns)
Unbox argument and emit code that handles any error during unboxing. Args are cleaned up in reverse order of the parameter list, and cleanup begins as soon as unboxing of any argument fails. E.g. failure on arg2 will result in control flow going through:
[ "Unbox", "argument", "and", "emit", "code", "that", "handles", "any", "error", "during", "unboxing", ".", "Args", "are", "cleaned", "up", "in", "reverse", "order", "of", "the", "parameter", "list", "and", "cleanup", "begins", "as", "soon", "as", "unboxing", "of", "any", "argument", "fails", ".", "E", ".", "g", ".", "failure", "on", "arg2", "will", "result", "in", "control", "flow", "going", "through", ":" ]
def add_arg(self, obj, ty): """ Unbox argument and emit code that handles any error during unboxing. Args are cleaned up in reverse order of the parameter list, and cleanup begins as soon as unboxing of any argument fails. E.g. failure on arg2 will result in control flow going through: arg2.err -> arg1.err -> arg0.err -> arg.end (returns) """ # Unbox argument native = self.api.to_native_value(ty, obj) # If an error occurred, go to the cleanup block for # the previous argument with cgutils.if_unlikely(self.builder, native.is_error): self.builder.branch(self.nextblk) # Define the cleanup function for the argument def cleanup_arg(): # Native value reflection self.api.reflect_native_value(ty, native.value, self.env_manager) # Native value cleanup if native.cleanup is not None: native.cleanup() # NRT cleanup # (happens after the native value cleanup as the latter # may need the native value) if self.context.enable_nrt: self.context.nrt.decref(self.builder, ty, native.value) self.cleanups.append(cleanup_arg) # Write the on-error cleanup block for this argument cleanupblk = self.builder.append_basic_block( "arg%d.err" % self.arg_count) with self.builder.goto_block(cleanupblk): cleanup_arg() # Go to next cleanup block self.builder.branch(self.nextblk) self.nextblk = cleanupblk self.arg_count += 1 return native.value
[ "def", "add_arg", "(", "self", ",", "obj", ",", "ty", ")", ":", "# Unbox argument", "native", "=", "self", ".", "api", ".", "to_native_value", "(", "ty", ",", "obj", ")", "# If an error occurred, go to the cleanup block for", "# the previous argument", "with", "cgutils", ".", "if_unlikely", "(", "self", ".", "builder", ",", "native", ".", "is_error", ")", ":", "self", ".", "builder", ".", "branch", "(", "self", ".", "nextblk", ")", "# Define the cleanup function for the argument", "def", "cleanup_arg", "(", ")", ":", "# Native value reflection", "self", ".", "api", ".", "reflect_native_value", "(", "ty", ",", "native", ".", "value", ",", "self", ".", "env_manager", ")", "# Native value cleanup", "if", "native", ".", "cleanup", "is", "not", "None", ":", "native", ".", "cleanup", "(", ")", "# NRT cleanup", "# (happens after the native value cleanup as the latter", "# may need the native value)", "if", "self", ".", "context", ".", "enable_nrt", ":", "self", ".", "context", ".", "nrt", ".", "decref", "(", "self", ".", "builder", ",", "ty", ",", "native", ".", "value", ")", "self", ".", "cleanups", ".", "append", "(", "cleanup_arg", ")", "# Write the on-error cleanup block for this argument", "cleanupblk", "=", "self", ".", "builder", ".", "append_basic_block", "(", "\"arg%d.err\"", "%", "self", ".", "arg_count", ")", "with", "self", ".", "builder", ".", "goto_block", "(", "cleanupblk", ")", ":", "cleanup_arg", "(", ")", "# Go to next cleanup block", "self", ".", "builder", ".", "branch", "(", "self", ".", "nextblk", ")", "self", ".", "nextblk", "=", "cleanupblk", "self", ".", "arg_count", "+=", "1", "return", "native", ".", "value" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/callwrapper.py#L22-L66
acbull/Unbiased_LambdaMart
7c39abe5caa18ca07df2d23c2db392916d92956c
Unbias_LightGBM/python-package/lightgbm/basic.py
python
Booster.feature_name
(self)
return [string_buffers[i].value.decode() for i in range_(num_feature)]
Get names of features. Returns ------- result : list List with names of features.
Get names of features.
[ "Get", "names", "of", "features", "." ]
def feature_name(self): """Get names of features. Returns ------- result : list List with names of features. """ num_feature = self.num_feature() # Get name of features tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetFeatureNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if num_feature != tmp_out_len.value: raise ValueError("Length of feature names doesn't equal with num_feature") return [string_buffers[i].value.decode() for i in range_(num_feature)]
[ "def", "feature_name", "(", "self", ")", ":", "num_feature", "=", "self", ".", "num_feature", "(", ")", "# Get name of features", "tmp_out_len", "=", "ctypes", ".", "c_int", "(", "0", ")", "string_buffers", "=", "[", "ctypes", ".", "create_string_buffer", "(", "255", ")", "for", "i", "in", "range_", "(", "num_feature", ")", "]", "ptr_string_buffers", "=", "(", "ctypes", ".", "c_char_p", "*", "num_feature", ")", "(", "*", "map", "(", "ctypes", ".", "addressof", ",", "string_buffers", ")", ")", "_safe_call", "(", "_LIB", ".", "LGBM_BoosterGetFeatureNames", "(", "self", ".", "handle", ",", "ctypes", ".", "byref", "(", "tmp_out_len", ")", ",", "ptr_string_buffers", ")", ")", "if", "num_feature", "!=", "tmp_out_len", ".", "value", ":", "raise", "ValueError", "(", "\"Length of feature names doesn't equal with num_feature\"", ")", "return", "[", "string_buffers", "[", "i", "]", ".", "value", ".", "decode", "(", ")", "for", "i", "in", "range_", "(", "num_feature", ")", "]" ]
https://github.com/acbull/Unbiased_LambdaMart/blob/7c39abe5caa18ca07df2d23c2db392916d92956c/Unbias_LightGBM/python-package/lightgbm/basic.py#L1844-L1863
infinit/memo
3a8394d0f647efe03ccb8bfe885a7279cb8be8a6
elle/drake/src/drake/__init__.py
python
ShellCommand.__init__
(self, sources, targets, command, pretty = None, cwd = None, workdir = None, environment = None, stdout = None)
Create a builder that runs command. sources -- List of source nodes, or source node if there's only one. targets -- List of target nodes, or target node if there's only one. command -- The shell command to run. pretty -- Optional pretty printing.
Create a builder that runs command.
[ "Create", "a", "builder", "that", "runs", "command", "." ]
def __init__(self, sources, targets, command, pretty = None, cwd = None, workdir = None, environment = None, stdout = None): """Create a builder that runs command. sources -- List of source nodes, or source node if there's only one. targets -- List of target nodes, or target node if there's only one. command -- The shell command to run. pretty -- Optional pretty printing. """ if isinstance(stdout, Node) and stdout not in targets: targets.append(stdout) Builder.__init__(self, sources, targets) self.__command = command self.__pretty = pretty if cwd is not None: warnings.warn( 'drake.ShellCommand `cwd` argument is deprecated in favor of `workdir`', DeprecationWarning) self.__workdir = cwd else: self.__workdir = workdir self.__environment = environment self.__stdout = stdout
[ "def", "__init__", "(", "self", ",", "sources", ",", "targets", ",", "command", ",", "pretty", "=", "None", ",", "cwd", "=", "None", ",", "workdir", "=", "None", ",", "environment", "=", "None", ",", "stdout", "=", "None", ")", ":", "if", "isinstance", "(", "stdout", ",", "Node", ")", "and", "stdout", "not", "in", "targets", ":", "targets", ".", "append", "(", "stdout", ")", "Builder", ".", "__init__", "(", "self", ",", "sources", ",", "targets", ")", "self", ".", "__command", "=", "command", "self", ".", "__pretty", "=", "pretty", "if", "cwd", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'drake.ShellCommand `cwd` argument is deprecated in favor of `workdir`'", ",", "DeprecationWarning", ")", "self", ".", "__workdir", "=", "cwd", "else", ":", "self", ".", "__workdir", "=", "workdir", "self", ".", "__environment", "=", "environment", "self", ".", "__stdout", "=", "stdout" ]
https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L2479-L2507
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
Window.CacheBestSize
(*args, **kwargs)
return _core_.Window_CacheBestSize(*args, **kwargs)
CacheBestSize(self, Size size) Cache the best size so it doesn't need to be calculated again, (at least until some properties of the window change.)
CacheBestSize(self, Size size)
[ "CacheBestSize", "(", "self", "Size", "size", ")" ]
def CacheBestSize(*args, **kwargs): """ CacheBestSize(self, Size size) Cache the best size so it doesn't need to be calculated again, (at least until some properties of the window change.) """ return _core_.Window_CacheBestSize(*args, **kwargs)
[ "def", "CacheBestSize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Window_CacheBestSize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L9628-L9635
alibaba/weex_js_engine
2bdf4b6f020c1fc99c63f649718f6faf7e27fdde
jni/v8core/v8/build/gyp/pylib/gyp/input.py
python
DependencyGraphNode.LinkDependencies
(self, targets, dependencies=None, initial=True)
return dependencies
Returns a list of dependency targets that are linked into this target. This function has a split personality, depending on the setting of |initial|. Outside callers should always leave |initial| at its default setting. When adding a target to the list of dependencies, this function will recurse into itself with |initial| set to False, to collect dependencies that are linked into the linkable target for which the list is being built.
Returns a list of dependency targets that are linked into this target.
[ "Returns", "a", "list", "of", "dependency", "targets", "that", "are", "linked", "into", "this", "target", "." ]
def LinkDependencies(self, targets, dependencies=None, initial=True): """Returns a list of dependency targets that are linked into this target. This function has a split personality, depending on the setting of |initial|. Outside callers should always leave |initial| at its default setting. When adding a target to the list of dependencies, this function will recurse into itself with |initial| set to False, to collect dependencies that are linked into the linkable target for which the list is being built. """ if dependencies == None: dependencies = [] # Check for None, corresponding to the root node. if self.ref == None: return dependencies # It's kind of sucky that |targets| has to be passed into this function, # but that's presently the easiest way to access the target dicts so that # this function can find target types. if 'target_name' not in targets[self.ref]: raise GypError("Missing 'target_name' field in target.") if 'type' not in targets[self.ref]: raise GypError("Missing 'type' field in target %s" % targets[self.ref]['target_name']) target_type = targets[self.ref]['type'] is_linkable = target_type in linkable_types if initial and not is_linkable: # If this is the first target being examined and it's not linkable, # return an empty list of link dependencies, because the link # dependencies are intended to apply to the target itself (initial is # True) and this target won't be linked. return dependencies # Don't traverse 'none' targets if explicitly excluded. if (target_type == 'none' and not targets[self.ref].get('dependencies_traverse', True)): if self.ref not in dependencies: dependencies.append(self.ref) return dependencies # Executables and loadable modules are already fully and finally linked. # Nothing else can be a link dependency of them, there can only be # dependencies in the sense that a dependent target might run an # executable or load the loadable_module. if not initial and target_type in ('executable', 'loadable_module'): return dependencies # The target is linkable, add it to the list of link dependencies. if self.ref not in dependencies: dependencies.append(self.ref) if initial or not is_linkable: # If this is a subsequent target and it's linkable, don't look any # further for linkable dependencies, as they'll already be linked into # this target linkable. Always look at dependencies of the initial # target, and always look at dependencies of non-linkables. for dependency in self.dependencies: dependency.LinkDependencies(targets, dependencies, False) return dependencies
[ "def", "LinkDependencies", "(", "self", ",", "targets", ",", "dependencies", "=", "None", ",", "initial", "=", "True", ")", ":", "if", "dependencies", "==", "None", ":", "dependencies", "=", "[", "]", "# Check for None, corresponding to the root node.", "if", "self", ".", "ref", "==", "None", ":", "return", "dependencies", "# It's kind of sucky that |targets| has to be passed into this function,", "# but that's presently the easiest way to access the target dicts so that", "# this function can find target types.", "if", "'target_name'", "not", "in", "targets", "[", "self", ".", "ref", "]", ":", "raise", "GypError", "(", "\"Missing 'target_name' field in target.\"", ")", "if", "'type'", "not", "in", "targets", "[", "self", ".", "ref", "]", ":", "raise", "GypError", "(", "\"Missing 'type' field in target %s\"", "%", "targets", "[", "self", ".", "ref", "]", "[", "'target_name'", "]", ")", "target_type", "=", "targets", "[", "self", ".", "ref", "]", "[", "'type'", "]", "is_linkable", "=", "target_type", "in", "linkable_types", "if", "initial", "and", "not", "is_linkable", ":", "# If this is the first target being examined and it's not linkable,", "# return an empty list of link dependencies, because the link", "# dependencies are intended to apply to the target itself (initial is", "# True) and this target won't be linked.", "return", "dependencies", "# Don't traverse 'none' targets if explicitly excluded.", "if", "(", "target_type", "==", "'none'", "and", "not", "targets", "[", "self", ".", "ref", "]", ".", "get", "(", "'dependencies_traverse'", ",", "True", ")", ")", ":", "if", "self", ".", "ref", "not", "in", "dependencies", ":", "dependencies", ".", "append", "(", "self", ".", "ref", ")", "return", "dependencies", "# Executables and loadable modules are already fully and finally linked.", "# Nothing else can be a link dependency of them, there can only be", "# dependencies in the sense that a dependent target might run an", "# executable or load the loadable_module.", "if", "not", "initial", "and", "target_type", "in", "(", "'executable'", ",", "'loadable_module'", ")", ":", "return", "dependencies", "# The target is linkable, add it to the list of link dependencies.", "if", "self", ".", "ref", "not", "in", "dependencies", ":", "dependencies", ".", "append", "(", "self", ".", "ref", ")", "if", "initial", "or", "not", "is_linkable", ":", "# If this is a subsequent target and it's linkable, don't look any", "# further for linkable dependencies, as they'll already be linked into", "# this target linkable. Always look at dependencies of the initial", "# target, and always look at dependencies of non-linkables.", "for", "dependency", "in", "self", ".", "dependencies", ":", "dependency", ".", "LinkDependencies", "(", "targets", ",", "dependencies", ",", "False", ")", "return", "dependencies" ]
https://github.com/alibaba/weex_js_engine/blob/2bdf4b6f020c1fc99c63f649718f6faf7e27fdde/jni/v8core/v8/build/gyp/pylib/gyp/input.py#L1378-L1443
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/richtext.py
python
RichTextBuffer.EndBatchUndo
(*args, **kwargs)
return _richtext.RichTextBuffer_EndBatchUndo(*args, **kwargs)
EndBatchUndo(self) -> bool
EndBatchUndo(self) -> bool
[ "EndBatchUndo", "(", "self", ")", "-", ">", "bool" ]
def EndBatchUndo(*args, **kwargs): """EndBatchUndo(self) -> bool""" return _richtext.RichTextBuffer_EndBatchUndo(*args, **kwargs)
[ "def", "EndBatchUndo", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_richtext", ".", "RichTextBuffer_EndBatchUndo", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/richtext.py#L2273-L2275
NeoGeographyToolkit/StereoPipeline
eedf54a919fb5cce1ab0e280bb0df4050763aa11
src/asp/IceBridge/icebridge_common.py
python
parseDateTimeStrings
(dateString, timeString, useTimeFix, returnMinAndSecOnly)
Parse strings in the format 20110323_17433900.
Parse strings in the format 20110323_17433900.
[ "Parse", "strings", "in", "the", "format", "20110323_17433900", "." ]
def parseDateTimeStrings(dateString, timeString, useTimeFix, returnMinAndSecOnly): '''Parse strings in the format 20110323_17433900.''' MILLISECOND_TO_MICROSECOND = 10000 year = int(dateString[0:4]) month = int(dateString[4:6]) day = int(dateString[6:8]) hour = int(timeString[0:2]) minute = int(timeString[2:4]) second = int(timeString[4:6]) if returnMinAndSecOnly: return (minute, second) if useTimeFix: # Some files number the minutes and seconds from 1-60! minute = minute - 1 second = second - 1 usecond = 0 if len(timeString) > 6: usecond = int(timeString[6:8]) * MILLISECOND_TO_MICROSECOND try: result = datetime.datetime(year, month, day, hour, minute, second, usecond) return result except Exception as e: raise Exception('Caught exception processing dateString: ' + dateString +', timeString: ' + timeString +'\n with values: ' + str((year, month, day, hour, minute, second, usecond)) +'\n' + str(e))
[ "def", "parseDateTimeStrings", "(", "dateString", ",", "timeString", ",", "useTimeFix", ",", "returnMinAndSecOnly", ")", ":", "MILLISECOND_TO_MICROSECOND", "=", "10000", "year", "=", "int", "(", "dateString", "[", "0", ":", "4", "]", ")", "month", "=", "int", "(", "dateString", "[", "4", ":", "6", "]", ")", "day", "=", "int", "(", "dateString", "[", "6", ":", "8", "]", ")", "hour", "=", "int", "(", "timeString", "[", "0", ":", "2", "]", ")", "minute", "=", "int", "(", "timeString", "[", "2", ":", "4", "]", ")", "second", "=", "int", "(", "timeString", "[", "4", ":", "6", "]", ")", "if", "returnMinAndSecOnly", ":", "return", "(", "minute", ",", "second", ")", "if", "useTimeFix", ":", "# Some files number the minutes and seconds from 1-60!", "minute", "=", "minute", "-", "1", "second", "=", "second", "-", "1", "usecond", "=", "0", "if", "len", "(", "timeString", ")", ">", "6", ":", "usecond", "=", "int", "(", "timeString", "[", "6", ":", "8", "]", ")", "*", "MILLISECOND_TO_MICROSECOND", "try", ":", "result", "=", "datetime", ".", "datetime", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ",", "usecond", ")", "return", "result", "except", "Exception", "as", "e", ":", "raise", "Exception", "(", "'Caught exception processing dateString: '", "+", "dateString", "+", "', timeString: '", "+", "timeString", "+", "'\\n with values: '", "+", "str", "(", "(", "year", ",", "month", ",", "day", ",", "hour", ",", "minute", ",", "second", ",", "usecond", ")", ")", "+", "'\\n'", "+", "str", "(", "e", ")", ")" ]
https://github.com/NeoGeographyToolkit/StereoPipeline/blob/eedf54a919fb5cce1ab0e280bb0df4050763aa11/src/asp/IceBridge/icebridge_common.py#L1005-L1034
blackberry/Boost
fc90c3fde129c62565c023f091eddc4a7ed9902b
libs/python/pyste/src/Pyste/ClassExporter.py
python
_VirtualWrapperGenerator.Declaration
(self, method, indent)
return decl
Returns a string with the declarations of the virtual wrapper and its default implementations. This string must be put inside the Wrapper body.
Returns a string with the declarations of the virtual wrapper and its default implementations. This string must be put inside the Wrapper body.
[ "Returns", "a", "string", "with", "the", "declarations", "of", "the", "virtual", "wrapper", "and", "its", "default", "implementations", ".", "This", "string", "must", "be", "put", "inside", "the", "Wrapper", "body", "." ]
def Declaration(self, method, indent): '''Returns a string with the declarations of the virtual wrapper and its default implementations. This string must be put inside the Wrapper body. ''' pyste = namespaces.pyste python = namespaces.python rename = self.info[method.name].rename or method.name result = method.result.FullName() return_str = 'return ' if result == 'void': return_str = '' params, param_names, param_types = _ParamsInfo(method) constantness = '' if method.const: constantness = ' const' # call_method callback decl = indent + '%s %s(%s)%s%s {\n' % (result, method.name, params, constantness, method.Exceptions()) param_names_str = ', '.join(param_names) if param_names_str: param_names_str = ', ' + param_names_str self_str = self.SELF decl += indent*2 + '%(return_str)s%(python)scall_method< %(result)s >' \ '(%(self_str)s, "%(rename)s"%(param_names_str)s);\n' % locals() decl += indent + '}\n' # default implementations (with overloading) def DefaultImpl(method, param_names): 'Return the body of a default implementation wrapper' indent2 = indent * 2 wrapper = self.info[method.name].wrapper if not wrapper: # return the default implementation of the class return indent2 + '%s%s(%s);\n' % \ (return_str, method.FullName(), ', '.join(param_names)) else: if wrapper.code: self.codeunit.Write('declaration-outside', wrapper.code) # return a call for the wrapper params = ', '.join(['this'] + param_names) return indent2 + '%s%s(%s);\n' % (return_str, wrapper.FullName(), params) if not method.abstract and method.visibility != Scope.private: minArgs = method.minArgs maxArgs = method.maxArgs impl_names = self.DefaultImplementationNames(method) for impl_name, argNum in zip(impl_names, range(minArgs, maxArgs+1)): params, param_names, param_types = _ParamsInfo(method, argNum) decl += '\n' decl += indent + '%s %s(%s)%s {\n' % (result, impl_name, params, constantness) decl += DefaultImpl(method, param_names) decl += indent + '}\n' return decl
[ "def", "Declaration", "(", "self", ",", "method", ",", "indent", ")", ":", "pyste", "=", "namespaces", ".", "pyste", "python", "=", "namespaces", ".", "python", "rename", "=", "self", ".", "info", "[", "method", ".", "name", "]", ".", "rename", "or", "method", ".", "name", "result", "=", "method", ".", "result", ".", "FullName", "(", ")", "return_str", "=", "'return '", "if", "result", "==", "'void'", ":", "return_str", "=", "''", "params", ",", "param_names", ",", "param_types", "=", "_ParamsInfo", "(", "method", ")", "constantness", "=", "''", "if", "method", ".", "const", ":", "constantness", "=", "' const'", "# call_method callback", "decl", "=", "indent", "+", "'%s %s(%s)%s%s {\\n'", "%", "(", "result", ",", "method", ".", "name", ",", "params", ",", "constantness", ",", "method", ".", "Exceptions", "(", ")", ")", "param_names_str", "=", "', '", ".", "join", "(", "param_names", ")", "if", "param_names_str", ":", "param_names_str", "=", "', '", "+", "param_names_str", "self_str", "=", "self", ".", "SELF", "decl", "+=", "indent", "*", "2", "+", "'%(return_str)s%(python)scall_method< %(result)s >'", "'(%(self_str)s, \"%(rename)s\"%(param_names_str)s);\\n'", "%", "locals", "(", ")", "decl", "+=", "indent", "+", "'}\\n'", "# default implementations (with overloading)", "def", "DefaultImpl", "(", "method", ",", "param_names", ")", ":", "'Return the body of a default implementation wrapper'", "indent2", "=", "indent", "*", "2", "wrapper", "=", "self", ".", "info", "[", "method", ".", "name", "]", ".", "wrapper", "if", "not", "wrapper", ":", "# return the default implementation of the class", "return", "indent2", "+", "'%s%s(%s);\\n'", "%", "(", "return_str", ",", "method", ".", "FullName", "(", ")", ",", "', '", ".", "join", "(", "param_names", ")", ")", "else", ":", "if", "wrapper", ".", "code", ":", "self", ".", "codeunit", ".", "Write", "(", "'declaration-outside'", ",", "wrapper", ".", "code", ")", "# return a call for the wrapper", "params", "=", "', '", ".", "join", "(", "[", "'this'", "]", "+", "param_names", ")", "return", "indent2", "+", "'%s%s(%s);\\n'", "%", "(", "return_str", ",", "wrapper", ".", "FullName", "(", ")", ",", "params", ")", "if", "not", "method", ".", "abstract", "and", "method", ".", "visibility", "!=", "Scope", ".", "private", ":", "minArgs", "=", "method", ".", "minArgs", "maxArgs", "=", "method", ".", "maxArgs", "impl_names", "=", "self", ".", "DefaultImplementationNames", "(", "method", ")", "for", "impl_name", ",", "argNum", "in", "zip", "(", "impl_names", ",", "range", "(", "minArgs", ",", "maxArgs", "+", "1", ")", ")", ":", "params", ",", "param_names", ",", "param_types", "=", "_ParamsInfo", "(", "method", ",", "argNum", ")", "decl", "+=", "'\\n'", "decl", "+=", "indent", "+", "'%s %s(%s)%s {\\n'", "%", "(", "result", ",", "impl_name", ",", "params", ",", "constantness", ")", "decl", "+=", "DefaultImpl", "(", "method", ",", "param_names", ")", "decl", "+=", "indent", "+", "'}\\n'", "return", "decl" ]
https://github.com/blackberry/Boost/blob/fc90c3fde129c62565c023f091eddc4a7ed9902b/libs/python/pyste/src/Pyste/ClassExporter.py#L700-L755
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/contrib/learn/python/learn/estimators/svm.py
python
SVM.predict_proba
(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=False)
return preds[linear._PROBABILITIES]
Runs inference to determine the class probability predictions.
Runs inference to determine the class probability predictions.
[ "Runs", "inference", "to", "determine", "the", "class", "probability", "predictions", "." ]
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None, as_iterable=False): """Runs inference to determine the class probability predictions.""" preds = self._estimator.predict(x=x, input_fn=input_fn, batch_size=batch_size, outputs=[linear._PROBABILITIES], as_iterable=as_iterable) if as_iterable: return _as_iterable(preds, output=linear._PROBABILITIES) return preds[linear._PROBABILITIES]
[ "def", "predict_proba", "(", "self", ",", "x", "=", "None", ",", "input_fn", "=", "None", ",", "batch_size", "=", "None", ",", "outputs", "=", "None", ",", "as_iterable", "=", "False", ")", ":", "preds", "=", "self", ".", "_estimator", ".", "predict", "(", "x", "=", "x", ",", "input_fn", "=", "input_fn", ",", "batch_size", "=", "batch_size", ",", "outputs", "=", "[", "linear", ".", "_PROBABILITIES", "]", ",", "as_iterable", "=", "as_iterable", ")", "if", "as_iterable", ":", "return", "_as_iterable", "(", "preds", ",", "output", "=", "linear", ".", "_PROBABILITIES", ")", "return", "preds", "[", "linear", ".", "_PROBABILITIES", "]" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/contrib/learn/python/learn/estimators/svm.py#L215-L224
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/resmokelib/core/redirect.py
python
Pipe.wait
(self)
return self.proc.wait()
Wait for the process to terminate. Returns the error code.
Wait for the process to terminate. Returns the error code.
[ "Wait", "for", "the", "process", "to", "terminate", ".", "Returns", "the", "error", "code", "." ]
def wait(self): """Wait for the process to terminate. Returns the error code.""" return self.proc.wait()
[ "def", "wait", "(", "self", ")", ":", "return", "self", ".", "proc", ".", "wait", "(", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/resmokelib/core/redirect.py#L70-L73
Kitware/ParaView
f760af9124ff4634b23ebbeab95a4f56e0261955
Plugins/pvblot/blotish.py
python
plot
(*args)
Generates the current plot.
Generates the current plot.
[ "Generates", "the", "current", "plot", "." ]
def plot(*args): "Generates the current plot." if state.subProgram == "tplot": return _tplot_plot() if state.subProgram == "detour": return _detour_plot()
[ "def", "plot", "(", "*", "args", ")", ":", "if", "state", ".", "subProgram", "==", "\"tplot\"", ":", "return", "_tplot_plot", "(", ")", "if", "state", ".", "subProgram", "==", "\"detour\"", ":", "return", "_detour_plot", "(", ")" ]
https://github.com/Kitware/ParaView/blob/f760af9124ff4634b23ebbeab95a4f56e0261955/Plugins/pvblot/blotish.py#L832-L835