nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
list
function
stringlengths
34
151k
function_tokens
list
url
stringlengths
90
278
hakuna-m/wubiuefi
caec1af0a09c78fd5a345180ada1fe45e0c63493
src/pypack/altgraph/Dot.py
python
Dot.edge_style
(self, head, tail, **kwargs)
Modifies an edge style to the dot representation.
Modifies an edge style to the dot representation.
[ "Modifies", "an", "edge", "style", "to", "the", "dot", "representation", "." ]
def edge_style(self, head, tail, **kwargs): ''' Modifies an edge style to the dot representation. ''' try: if tail not in self.edges[head]: self.edges[head][tail]= {} self.edges[head][tail] = kwargs except KeyError: raise GraphError("invalid edge %s -> %s " % (head, tail) )
[ "def", "edge_style", "(", "self", ",", "head", ",", "tail", ",", "*", "*", "kwargs", ")", ":", "try", ":", "if", "tail", "not", "in", "self", ".", "edges", "[", "head", "]", ":", "self", ".", "edges", "[", "head", "]", "[", "tail", "]", "=", "{", "}", "self", ".", "edges", "[", "head", "]", "[", "tail", "]", "=", "kwargs", "except", "KeyError", ":", "raise", "GraphError", "(", "\"invalid edge %s -> %s \"", "%", "(", "head", ",", "tail", ")", ")" ]
https://github.com/hakuna-m/wubiuefi/blob/caec1af0a09c78fd5a345180ada1fe45e0c63493/src/pypack/altgraph/Dot.py#L177-L186
include-what-you-use/include-what-you-use
208fbfffa5d69364b9f78e427caa443441279283
fix_includes.py
python
_CalculateMoveSpans
(file_lines, forward_declare_spans)
Fills each input_line's move_span field. A 'move span' is a range of lines (from file_lines) that includes an #include or forward-declare, and all the comments preceding it. It is the unit we would move if we decided to move (or delete) this #include or forward-declare. For lines of type _INCLUDE_RE or _FORWARD_DECLARE_RE, the move span is set to the tuple [start_of_span, end_of_span). All other lines have the move span kept at None. Arguments: file_lines: an array of LineInfo objects, with .type fields filled in. forward_declare_spans: a set of line-number pairs [start_line, end_line), each representing a single namespace. In practice this comes from iwyu_record.seen_forward_declare_lines.
Fills each input_line's move_span field.
[ "Fills", "each", "input_line", "s", "move_span", "field", "." ]
def _CalculateMoveSpans(file_lines, forward_declare_spans): """Fills each input_line's move_span field. A 'move span' is a range of lines (from file_lines) that includes an #include or forward-declare, and all the comments preceding it. It is the unit we would move if we decided to move (or delete) this #include or forward-declare. For lines of type _INCLUDE_RE or _FORWARD_DECLARE_RE, the move span is set to the tuple [start_of_span, end_of_span). All other lines have the move span kept at None. Arguments: file_lines: an array of LineInfo objects, with .type fields filled in. forward_declare_spans: a set of line-number pairs [start_line, end_line), each representing a single namespace. In practice this comes from iwyu_record.seen_forward_declare_lines. """ # First let's do #includes. for line_number in range(len(file_lines)): if file_lines[line_number].type == _INCLUDE_RE: span_begin = _LineNumberStartingPrecedingComments(file_lines, line_number) for i in range(span_begin, line_number + 1): file_lines[i].move_span = (span_begin, line_number + 1) # Now forward-declares. These spans come as input to this function. for (span_begin, span_end) in forward_declare_spans: span_begin = _LineNumberStartingPrecedingComments(file_lines, span_begin) for i in range(span_begin, span_end): file_lines[i].move_span = (span_begin, span_end)
[ "def", "_CalculateMoveSpans", "(", "file_lines", ",", "forward_declare_spans", ")", ":", "# First let's do #includes.", "for", "line_number", "in", "range", "(", "len", "(", "file_lines", ")", ")", ":", "if", "file_lines", "[", "line_number", "]", ".", "type", "==", "_INCLUDE_RE", ":", "span_begin", "=", "_LineNumberStartingPrecedingComments", "(", "file_lines", ",", "line_number", ")", "for", "i", "in", "range", "(", "span_begin", ",", "line_number", "+", "1", ")", ":", "file_lines", "[", "i", "]", ".", "move_span", "=", "(", "span_begin", ",", "line_number", "+", "1", ")", "# Now forward-declares. These spans come as input to this function.", "for", "(", "span_begin", ",", "span_end", ")", "in", "forward_declare_spans", ":", "span_begin", "=", "_LineNumberStartingPrecedingComments", "(", "file_lines", ",", "span_begin", ")", "for", "i", "in", "range", "(", "span_begin", ",", "span_end", ")", ":", "file_lines", "[", "i", "]", ".", "move_span", "=", "(", "span_begin", ",", "span_end", ")" ]
https://github.com/include-what-you-use/include-what-you-use/blob/208fbfffa5d69364b9f78e427caa443441279283/fix_includes.py#L860-L889
google/zooshi
05390b98f79eed8ef26ec4ab2c3aea3790b1165c
scripts/build_assets.py
python
main
()
return builder.main( project_root=PROJECT_ROOT, assets_path=ASSETS_PATH, asset_meta=ASSET_META, asset_roots=ASSET_ROOTS, intermediate_path=INTERMEDIATE_TEXTURE_PATH, overlay_dirs=OVERLAY_DIRS, tga_files_to_convert=tga_files_to_convert, png_files_to_convert=png_files_to_convert, anim_files_to_convert=anim_files_to_convert, fbx_files_to_convert=fbx_files_to_convert, flatbuffers_conversion_data=lambda: FLATBUFFERS_CONVERSION_DATA, schema_output_path='flatbufferschemas')
Builds or cleans the assets needed for the game. To build all assets, either call this script without any arguments. Or alternatively, call it with the argument 'all'. To just convert the flatbuffer json files, call it with 'flatbuffers'. Likewise to convert the png files to webp files, call it with 'webp'. To clean all converted files, call it with 'clean'. Returns: Returns 0 on success.
Builds or cleans the assets needed for the game.
[ "Builds", "or", "cleans", "the", "assets", "needed", "for", "the", "game", "." ]
def main(): """Builds or cleans the assets needed for the game. To build all assets, either call this script without any arguments. Or alternatively, call it with the argument 'all'. To just convert the flatbuffer json files, call it with 'flatbuffers'. Likewise to convert the png files to webp files, call it with 'webp'. To clean all converted files, call it with 'clean'. Returns: Returns 0 on success. """ return builder.main( project_root=PROJECT_ROOT, assets_path=ASSETS_PATH, asset_meta=ASSET_META, asset_roots=ASSET_ROOTS, intermediate_path=INTERMEDIATE_TEXTURE_PATH, overlay_dirs=OVERLAY_DIRS, tga_files_to_convert=tga_files_to_convert, png_files_to_convert=png_files_to_convert, anim_files_to_convert=anim_files_to_convert, fbx_files_to_convert=fbx_files_to_convert, flatbuffers_conversion_data=lambda: FLATBUFFERS_CONVERSION_DATA, schema_output_path='flatbufferschemas')
[ "def", "main", "(", ")", ":", "return", "builder", ".", "main", "(", "project_root", "=", "PROJECT_ROOT", ",", "assets_path", "=", "ASSETS_PATH", ",", "asset_meta", "=", "ASSET_META", ",", "asset_roots", "=", "ASSET_ROOTS", ",", "intermediate_path", "=", "INTERMEDIATE_TEXTURE_PATH", ",", "overlay_dirs", "=", "OVERLAY_DIRS", ",", "tga_files_to_convert", "=", "tga_files_to_convert", ",", "png_files_to_convert", "=", "png_files_to_convert", ",", "anim_files_to_convert", "=", "anim_files_to_convert", ",", "fbx_files_to_convert", "=", "fbx_files_to_convert", ",", "flatbuffers_conversion_data", "=", "lambda", ":", "FLATBUFFERS_CONVERSION_DATA", ",", "schema_output_path", "=", "'flatbufferschemas'", ")" ]
https://github.com/google/zooshi/blob/05390b98f79eed8ef26ec4ab2c3aea3790b1165c/scripts/build_assets.py#L217-L241
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py
python
_sparse_apply_ftrl_v2_tbe
()
return
SparseApplyFtrlV2D TBE register
SparseApplyFtrlV2D TBE register
[ "SparseApplyFtrlV2D", "TBE", "register" ]
def _sparse_apply_ftrl_v2_tbe(): """SparseApplyFtrlV2D TBE register""" return
[ "def", "_sparse_apply_ftrl_v2_tbe", "(", ")", ":", "return" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py#L48-L50
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/logging/handlers.py
python
BaseRotatingHandler.emit
(self, record)
Emit a record. Output the record to the file, catering for rollover as described in doRollover().
Emit a record.
[ "Emit", "a", "record", "." ]
def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() logging.FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
[ "def", "emit", "(", "self", ",", "record", ")", ":", "try", ":", "if", "self", ".", "shouldRollover", "(", "record", ")", ":", "self", ".", "doRollover", "(", ")", "logging", ".", "FileHandler", ".", "emit", "(", "self", ",", "record", ")", "except", "(", "KeyboardInterrupt", ",", "SystemExit", ")", ":", "raise", "except", ":", "self", ".", "handleError", "(", "record", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/logging/handlers.py#L68-L82
scylladb/dpdk
cc7e6ed22c0fc08e3ff37b3e68a61979d8214547
tools/dpdk_nic_bind.py
python
get_nic_details
()
This function populates the "devices" dictionary. The keys used are the pci addresses (domain:bus:slot.func). The values are themselves dictionaries - one for each NIC.
This function populates the "devices" dictionary. The keys used are the pci addresses (domain:bus:slot.func). The values are themselves dictionaries - one for each NIC.
[ "This", "function", "populates", "the", "devices", "dictionary", ".", "The", "keys", "used", "are", "the", "pci", "addresses", "(", "domain", ":", "bus", ":", "slot", ".", "func", ")", ".", "The", "values", "are", "themselves", "dictionaries", "-", "one", "for", "each", "NIC", "." ]
def get_nic_details(): '''This function populates the "devices" dictionary. The keys used are the pci addresses (domain:bus:slot.func). The values are themselves dictionaries - one for each NIC.''' global devices global dpdk_drivers # clear any old data devices = {} # first loop through and read details for all devices # request machine readable format, with numeric IDs dev = {}; dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines() for dev_line in dev_lines: if (len(dev_line) == 0): if dev["Class"] == ETHERNET_CLASS: #convert device and vendor ids to numbers, then add to global dev["Vendor"] = int(dev["Vendor"],16) dev["Device"] = int(dev["Device"],16) devices[dev["Slot"]] = dict(dev) # use dict to make copy of dev else: name, value = dev_line.split("\t", 1) dev[name.rstrip(":")] = value # check what is the interface if any for an ssh connection if # any to this host, so we can mark it later. ssh_if = [] route = check_output(["ip", "-o", "route"]) # filter out all lines for 169.254 routes route = "\n".join(filter(lambda ln: not ln.startswith("169.254"), route.splitlines())) rt_info = route.split() for i in xrange(len(rt_info) - 1): if rt_info[i] == "dev": ssh_if.append(rt_info[i+1]) # based on the basic info, get extended text details for d in devices.keys(): # get additional info and add it to existing data devices[d] = dict(devices[d].items() + get_pci_device_details(d).items()) for _if in ssh_if: if _if in devices[d]["Interface"].split(","): devices[d]["Ssh_if"] = True devices[d]["Active"] = "*Active*" break; # add igb_uio to list of supporting modules if needed if "Module_str" in devices[d]: for driver in dpdk_drivers: if driver not in devices[d]["Module_str"]: devices[d]["Module_str"] = devices[d]["Module_str"] + ",%s" % driver else: devices[d]["Module_str"] = ",".join(dpdk_drivers) # make sure the driver and module strings do not have any duplicates if has_driver(d): modules = devices[d]["Module_str"].split(",") if devices[d]["Driver_str"] in modules: modules.remove(devices[d]["Driver_str"]) devices[d]["Module_str"] = ",".join(modules)
[ "def", "get_nic_details", "(", ")", ":", "global", "devices", "global", "dpdk_drivers", "# clear any old data", "devices", "=", "{", "}", "# first loop through and read details for all devices", "# request machine readable format, with numeric IDs", "dev", "=", "{", "}", "dev_lines", "=", "check_output", "(", "[", "\"lspci\"", ",", "\"-Dvmmn\"", "]", ")", ".", "splitlines", "(", ")", "for", "dev_line", "in", "dev_lines", ":", "if", "(", "len", "(", "dev_line", ")", "==", "0", ")", ":", "if", "dev", "[", "\"Class\"", "]", "==", "ETHERNET_CLASS", ":", "#convert device and vendor ids to numbers, then add to global", "dev", "[", "\"Vendor\"", "]", "=", "int", "(", "dev", "[", "\"Vendor\"", "]", ",", "16", ")", "dev", "[", "\"Device\"", "]", "=", "int", "(", "dev", "[", "\"Device\"", "]", ",", "16", ")", "devices", "[", "dev", "[", "\"Slot\"", "]", "]", "=", "dict", "(", "dev", ")", "# use dict to make copy of dev", "else", ":", "name", ",", "value", "=", "dev_line", ".", "split", "(", "\"\\t\"", ",", "1", ")", "dev", "[", "name", ".", "rstrip", "(", "\":\"", ")", "]", "=", "value", "# check what is the interface if any for an ssh connection if", "# any to this host, so we can mark it later.", "ssh_if", "=", "[", "]", "route", "=", "check_output", "(", "[", "\"ip\"", ",", "\"-o\"", ",", "\"route\"", "]", ")", "# filter out all lines for 169.254 routes", "route", "=", "\"\\n\"", ".", "join", "(", "filter", "(", "lambda", "ln", ":", "not", "ln", ".", "startswith", "(", "\"169.254\"", ")", ",", "route", ".", "splitlines", "(", ")", ")", ")", "rt_info", "=", "route", ".", "split", "(", ")", "for", "i", "in", "xrange", "(", "len", "(", "rt_info", ")", "-", "1", ")", ":", "if", "rt_info", "[", "i", "]", "==", "\"dev\"", ":", "ssh_if", ".", "append", "(", "rt_info", "[", "i", "+", "1", "]", ")", "# based on the basic info, get extended text details", "for", "d", "in", "devices", ".", "keys", "(", ")", ":", "# get additional info and add it to existing data", "devices", "[", "d", "]", "=", "dict", "(", "devices", "[", "d", "]", ".", "items", "(", ")", "+", "get_pci_device_details", "(", "d", ")", ".", "items", "(", ")", ")", "for", "_if", "in", "ssh_if", ":", "if", "_if", "in", "devices", "[", "d", "]", "[", "\"Interface\"", "]", ".", "split", "(", "\",\"", ")", ":", "devices", "[", "d", "]", "[", "\"Ssh_if\"", "]", "=", "True", "devices", "[", "d", "]", "[", "\"Active\"", "]", "=", "\"*Active*\"", "break", "# add igb_uio to list of supporting modules if needed", "if", "\"Module_str\"", "in", "devices", "[", "d", "]", ":", "for", "driver", "in", "dpdk_drivers", ":", "if", "driver", "not", "in", "devices", "[", "d", "]", "[", "\"Module_str\"", "]", ":", "devices", "[", "d", "]", "[", "\"Module_str\"", "]", "=", "devices", "[", "d", "]", "[", "\"Module_str\"", "]", "+", "\",%s\"", "%", "driver", "else", ":", "devices", "[", "d", "]", "[", "\"Module_str\"", "]", "=", "\",\"", ".", "join", "(", "dpdk_drivers", ")", "# make sure the driver and module strings do not have any duplicates", "if", "has_driver", "(", "d", ")", ":", "modules", "=", "devices", "[", "d", "]", "[", "\"Module_str\"", "]", ".", "split", "(", "\",\"", ")", "if", "devices", "[", "d", "]", "[", "\"Driver_str\"", "]", "in", "modules", ":", "modules", ".", "remove", "(", "devices", "[", "d", "]", "[", "\"Driver_str\"", "]", ")", "devices", "[", "d", "]", "[", "\"Module_str\"", "]", "=", "\",\"", ".", "join", "(", "modules", ")" ]
https://github.com/scylladb/dpdk/blob/cc7e6ed22c0fc08e3ff37b3e68a61979d8214547/tools/dpdk_nic_bind.py#L216-L277
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/framework/tensor_shape.py
python
Dimension.__le__
(self, other)
Returns True if `self` is known to be less than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) <= tf.Dimension(n)) == (m <= n) (tf.Dimension(m) <= tf.Dimension(None)) == None (tf.Dimension(None) <= tf.Dimension(n)) == None (tf.Dimension(None) <= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value <= other.value` if both are known, otherwise None.
Returns True if `self` is known to be less than or equal to `other`.
[ "Returns", "True", "if", "self", "is", "known", "to", "be", "less", "than", "or", "equal", "to", "other", "." ]
def __le__(self, other): """Returns True if `self` is known to be less than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) <= tf.Dimension(n)) == (m <= n) (tf.Dimension(m) <= tf.Dimension(None)) == None (tf.Dimension(None) <= tf.Dimension(n)) == None (tf.Dimension(None) <= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value <= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value <= other.value
[ "def", "__le__", "(", "self", ",", "other", ")", ":", "other", "=", "as_dimension", "(", "other", ")", "if", "self", ".", "_value", "is", "None", "or", "other", ".", "value", "is", "None", ":", "return", "None", "else", ":", "return", "self", ".", "_value", "<=", "other", ".", "value" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/framework/tensor_shape.py#L305-L328
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py
python
PrivateL1CacheHierarchy.__init__
(self, size: str, assoc: int)
:param size: The size of the priavte I/D caches in the hierarchy. :param assoc: The associativity of each cache.
:param size: The size of the priavte I/D caches in the hierarchy. :param assoc: The associativity of each cache.
[ ":", "param", "size", ":", "The", "size", "of", "the", "priavte", "I", "/", "D", "caches", "in", "the", "hierarchy", ".", ":", "param", "assoc", ":", "The", "associativity", "of", "each", "cache", "." ]
def __init__(self, size: str, assoc: int) -> None: """ :param size: The size of the priavte I/D caches in the hierarchy. :param assoc: The associativity of each cache. """ super().__init__() self._size = size self._assoc = assoc
[ "def", "__init__", "(", "self", ",", "size", ":", "str", ",", "assoc", ":", "int", ")", "->", "None", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "_size", "=", "size", "self", ".", "_assoc", "=", "assoc" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/gem5/components/cachehierarchies/chi/private_l1_cache_hierarchy.py#L71-L79
hakuna-m/wubiuefi
caec1af0a09c78fd5a345180ada1fe45e0c63493
src/pypack/modulegraph/pkg_resources.py
python
WorkingSet.iter_entry_points
(self, group, name=None)
Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order).
Yield entry point objects from `group` matching `name`
[ "Yield", "entry", "point", "objects", "from", "group", "matching", "name" ]
def iter_entry_points(self, group, name=None): """Yield entry point objects from `group` matching `name` If `name` is None, yields all entry points in `group` from all distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ for dist in self: entries = dist.get_entry_map(group) if name is None: for ep in entries.values(): yield ep elif name in entries: yield entries[name]
[ "def", "iter_entry_points", "(", "self", ",", "group", ",", "name", "=", "None", ")", ":", "for", "dist", "in", "self", ":", "entries", "=", "dist", ".", "get_entry_map", "(", "group", ")", "if", "name", "is", "None", ":", "for", "ep", "in", "entries", ".", "values", "(", ")", ":", "yield", "ep", "elif", "name", "in", "entries", ":", "yield", "entries", "[", "name", "]" ]
https://github.com/hakuna-m/wubiuefi/blob/caec1af0a09c78fd5a345180ada1fe45e0c63493/src/pypack/modulegraph/pkg_resources.py#L350-L363
baidu/Familia
958febfd5fe7a61e46a35bfb084e71f806dde6a6
python/familia_wrapper.py
python
TopicalWordEmbeddingsWrapper.__init__
(self, model_dir, emb_file)
初始化TWE对象 Args: model_dir: 模型目录路径 emb_file: topical word embeddings模型文件
初始化TWE对象 Args: model_dir: 模型目录路径 emb_file: topical word embeddings模型文件
[ "初始化TWE对象", "Args", ":", "model_dir", ":", "模型目录路径", "emb_file", ":", "topical", "word", "embeddings模型文件" ]
def __init__(self, model_dir, emb_file): """初始化TWE对象 Args: model_dir: 模型目录路径 emb_file: topical word embeddings模型文件 """ self._twe = familia.init_twe(model_dir, emb_file)
[ "def", "__init__", "(", "self", ",", "model_dir", ",", "emb_file", ")", ":", "self", ".", "_twe", "=", "familia", ".", "init_twe", "(", "model_dir", ",", "emb_file", ")" ]
https://github.com/baidu/Familia/blob/958febfd5fe7a61e46a35bfb084e71f806dde6a6/python/familia_wrapper.py#L190-L196
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/eager/monitoring.py
python
CounterCell.increase_by
(self, value)
Atomically increments the value. Args: value: non-negative value.
Atomically increments the value.
[ "Atomically", "increments", "the", "value", "." ]
def increase_by(self, value): """Atomically increments the value. Args: value: non-negative value. """ pywrap_tensorflow.TFE_MonitoringCounterCellIncrementBy(self._cell, value)
[ "def", "increase_by", "(", "self", ",", "value", ")", ":", "pywrap_tensorflow", ".", "TFE_MonitoringCounterCellIncrementBy", "(", "self", ".", "_cell", ",", "value", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/eager/monitoring.py#L153-L159
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Editor/Python/windows/Lib/site-packages/pip/_vendor/distlib/database.py
python
EggInfoDistribution.check_installed_files
(self)
return mismatches
Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value.
Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value.
[ "Checks", "that", "the", "hashes", "and", "sizes", "of", "the", "files", "in", "RECORD", "are", "matched", "by", "the", "files", "themselves", ".", "Returns", "a", "(", "possibly", "empty", ")", "list", "of", "mismatches", ".", "Each", "entry", "in", "the", "mismatch", "list", "will", "be", "a", "tuple", "consisting", "of", "the", "path", "exists", "size", "or", "hash", "according", "to", "what", "didn", "t", "match", "(", "existence", "is", "checked", "first", "then", "size", "then", "hash", ")", "the", "expected", "value", "and", "the", "actual", "value", "." ]
def check_installed_files(self): """ Checks that the hashes and sizes of the files in ``RECORD`` are matched by the files themselves. Returns a (possibly empty) list of mismatches. Each entry in the mismatch list will be a tuple consisting of the path, 'exists', 'size' or 'hash' according to what didn't match (existence is checked first, then size, then hash), the expected value and the actual value. """ mismatches = [] record_path = os.path.join(self.path, 'installed-files.txt') if os.path.exists(record_path): for path, _, _ in self.list_installed_files(): if path == record_path: continue if not os.path.exists(path): mismatches.append((path, 'exists', True, False)) return mismatches
[ "def", "check_installed_files", "(", "self", ")", ":", "mismatches", "=", "[", "]", "record_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "'installed-files.txt'", ")", "if", "os", ".", "path", ".", "exists", "(", "record_path", ")", ":", "for", "path", ",", "_", ",", "_", "in", "self", ".", "list_installed_files", "(", ")", ":", "if", "path", "==", "record_path", ":", "continue", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "mismatches", ".", "append", "(", "(", "path", ",", "'exists'", ",", "True", ",", "False", ")", ")", "return", "mismatches" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/_vendor/distlib/database.py#L949-L966
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Draft/draftguitools/gui_labels.py
python
Label.Activated
(self)
Execute when the command is called.
Execute when the command is called.
[ "Execute", "when", "the", "command", "is", "called", "." ]
def Activated(self): """Execute when the command is called.""" super(Label, self).Activated(name="Label") self.ghost = None self.labeltype = utils.getParam("labeltype", "Custom") self.sel = Gui.Selection.getSelectionEx() if self.sel: self.sel = self.sel[0] self.ui.labelUi(title=translate("draft",self.featureName), callback=self.setmode) self.ui.xValue.setFocus() self.ui.xValue.selectAll() self.ghost = trackers.lineTracker() self.call = self.view.addEventCallback("SoEvent", self.action) _msg(translate("draft", "Pick target point")) self.ui.isCopy.hide()
[ "def", "Activated", "(", "self", ")", ":", "super", "(", "Label", ",", "self", ")", ".", "Activated", "(", "name", "=", "\"Label\"", ")", "self", ".", "ghost", "=", "None", "self", ".", "labeltype", "=", "utils", ".", "getParam", "(", "\"labeltype\"", ",", "\"Custom\"", ")", "self", ".", "sel", "=", "Gui", ".", "Selection", ".", "getSelectionEx", "(", ")", "if", "self", ".", "sel", ":", "self", ".", "sel", "=", "self", ".", "sel", "[", "0", "]", "self", ".", "ui", ".", "labelUi", "(", "title", "=", "translate", "(", "\"draft\"", ",", "self", ".", "featureName", ")", ",", "callback", "=", "self", ".", "setmode", ")", "self", ".", "ui", ".", "xValue", ".", "setFocus", "(", ")", "self", ".", "ui", ".", "xValue", ".", "selectAll", "(", ")", "self", ".", "ghost", "=", "trackers", ".", "lineTracker", "(", ")", "self", ".", "call", "=", "self", ".", "view", ".", "addEventCallback", "(", "\"SoEvent\"", ",", "self", ".", "action", ")", "_msg", "(", "translate", "(", "\"draft\"", ",", "\"Pick target point\"", ")", ")", "self", ".", "ui", ".", "isCopy", ".", "hide", "(", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Draft/draftguitools/gui_labels.py#L67-L81
klzgrad/naiveproxy
ed2c513637c77b18721fe428d7ed395b4d284c83
src/build/android/gyp/util/resource_utils.py
python
RJavaBuildOptions._IsResourceFinal
(self, entry)
return entry.name not in self.resources_allowlist
Determines whether a resource should be final or not. Args: entry: A _TextSymbolEntry instance. Returns: True iff the corresponding entry should be final.
Determines whether a resource should be final or not.
[ "Determines", "whether", "a", "resource", "should", "be", "final", "or", "not", "." ]
def _IsResourceFinal(self, entry): """Determines whether a resource should be final or not. Args: entry: A _TextSymbolEntry instance. Returns: True iff the corresponding entry should be final. """ if entry.resource_type == 'styleable' and entry.java_type != 'int[]': # A styleable constant may be exported as non-final after all. return not self.export_const_styleable if not self.has_constant_ids: # Every resource is non-final return False if not self.resources_allowlist: # No allowlist means all IDs are non-final. return True # Otherwise, only those in the return entry.name not in self.resources_allowlist
[ "def", "_IsResourceFinal", "(", "self", ",", "entry", ")", ":", "if", "entry", ".", "resource_type", "==", "'styleable'", "and", "entry", ".", "java_type", "!=", "'int[]'", ":", "# A styleable constant may be exported as non-final after all.", "return", "not", "self", ".", "export_const_styleable", "if", "not", "self", ".", "has_constant_ids", ":", "# Every resource is non-final", "return", "False", "if", "not", "self", ".", "resources_allowlist", ":", "# No allowlist means all IDs are non-final.", "return", "True", "# Otherwise, only those in the", "return", "entry", ".", "name", "not", "in", "self", ".", "resources_allowlist" ]
https://github.com/klzgrad/naiveproxy/blob/ed2c513637c77b18721fe428d7ed395b4d284c83/src/build/android/gyp/util/resource_utils.py#L511-L529
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/tools/import_pb_to_tensorboard.py
python
import_to_tensorboard
(model_dir, log_dir, tag_set)
View an SavedModel as a graph in Tensorboard. Args: model_dir: The directory containing the SavedModel to import. log_dir: The location for the Tensorboard log to begin visualization from. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Usage: Call this function with your SavedModel location and desired log directory. Launch Tensorboard by pointing it to the log directory. View your imported SavedModel as a graph.
View an SavedModel as a graph in Tensorboard.
[ "View", "an", "SavedModel", "as", "a", "graph", "in", "Tensorboard", "." ]
def import_to_tensorboard(model_dir, log_dir, tag_set): """View an SavedModel as a graph in Tensorboard. Args: model_dir: The directory containing the SavedModel to import. log_dir: The location for the Tensorboard log to begin visualization from. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Usage: Call this function with your SavedModel location and desired log directory. Launch Tensorboard by pointing it to the log directory. View your imported SavedModel as a graph. """ with session.Session(graph=ops.Graph()) as sess: input_graph_def = saved_model_utils.get_meta_graph_def(model_dir, tag_set).graph_def importer.import_graph_def(input_graph_def) pb_visual_writer = summary.FileWriter(log_dir) pb_visual_writer.add_graph(sess.graph) print("Model Imported. Visualize by running: " "tensorboard --logdir={}".format(log_dir))
[ "def", "import_to_tensorboard", "(", "model_dir", ",", "log_dir", ",", "tag_set", ")", ":", "with", "session", ".", "Session", "(", "graph", "=", "ops", ".", "Graph", "(", ")", ")", "as", "sess", ":", "input_graph_def", "=", "saved_model_utils", ".", "get_meta_graph_def", "(", "model_dir", ",", "tag_set", ")", ".", "graph_def", "importer", ".", "import_graph_def", "(", "input_graph_def", ")", "pb_visual_writer", "=", "summary", ".", "FileWriter", "(", "log_dir", ")", "pb_visual_writer", ".", "add_graph", "(", "sess", ".", "graph", ")", "print", "(", "\"Model Imported. Visualize by running: \"", "\"tensorboard --logdir={}\"", ".", "format", "(", "log_dir", ")", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/tools/import_pb_to_tensorboard.py#L40-L61
llvm/llvm-project
ffa6262cb4e2a335d26416fad39a581b4f98c5f4
llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py
python
KScriptGenerator.updateCalledFunctionList
(self, callee)
Maintains a list of functions that will actually be called
Maintains a list of functions that will actually be called
[ "Maintains", "a", "list", "of", "functions", "that", "will", "actually", "be", "called" ]
def updateCalledFunctionList(self, callee): """Maintains a list of functions that will actually be called""" # Update the total call count self.updateTotalCallCount(callee) # If this function is already in the list, don't do anything else if callee in self.calledFunctions: return # Add this function to the list of those that will be called. self.calledFunctions.append(callee) # If this function calls other functions, add them too if callee in self.calledFunctionTable: for subCallee in self.calledFunctionTable[callee]: self.updateCalledFunctionList(subCallee)
[ "def", "updateCalledFunctionList", "(", "self", ",", "callee", ")", ":", "# Update the total call count", "self", ".", "updateTotalCallCount", "(", "callee", ")", "# If this function is already in the list, don't do anything else", "if", "callee", "in", "self", ".", "calledFunctions", ":", "return", "# Add this function to the list of those that will be called.", "self", ".", "calledFunctions", ".", "append", "(", "callee", ")", "# If this function calls other functions, add them too", "if", "callee", "in", "self", ".", "calledFunctionTable", ":", "for", "subCallee", "in", "self", ".", "calledFunctionTable", "[", "callee", "]", ":", "self", ".", "updateCalledFunctionList", "(", "subCallee", ")" ]
https://github.com/llvm/llvm-project/blob/ffa6262cb4e2a335d26416fad39a581b4f98c5f4/llvm/examples/Kaleidoscope/MCJIT/lazy/genk-timing.py#L68-L80
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py3/numpy/polynomial/polyutils.py
python
_fromroots
(line_f, mul_f, roots)
Helper function used to implement the ``<type>fromroots`` functions. Parameters ---------- line_f : function(float, float) -> ndarray The ``<type>line`` function, such as ``polyline`` mul_f : function(array_like, array_like) -> ndarray The ``<type>mul`` function, such as ``polymul`` roots See the ``<type>fromroots`` functions for more detail
Helper function used to implement the ``<type>fromroots`` functions.
[ "Helper", "function", "used", "to", "implement", "the", "<type", ">", "fromroots", "functions", "." ]
def _fromroots(line_f, mul_f, roots): """ Helper function used to implement the ``<type>fromroots`` functions. Parameters ---------- line_f : function(float, float) -> ndarray The ``<type>line`` function, such as ``polyline`` mul_f : function(array_like, array_like) -> ndarray The ``<type>mul`` function, such as ``polymul`` roots See the ``<type>fromroots`` functions for more detail """ if len(roots) == 0: return np.ones(1) else: [roots] = as_series([roots], trim=False) roots.sort() p = [line_f(-r, 1) for r in roots] n = len(p) while n > 1: m, r = divmod(n, 2) tmp = [mul_f(p[i], p[i+m]) for i in range(m)] if r: tmp[0] = mul_f(tmp[0], p[-1]) p = tmp n = m return p[0]
[ "def", "_fromroots", "(", "line_f", ",", "mul_f", ",", "roots", ")", ":", "if", "len", "(", "roots", ")", "==", "0", ":", "return", "np", ".", "ones", "(", "1", ")", "else", ":", "[", "roots", "]", "=", "as_series", "(", "[", "roots", "]", ",", "trim", "=", "False", ")", "roots", ".", "sort", "(", ")", "p", "=", "[", "line_f", "(", "-", "r", ",", "1", ")", "for", "r", "in", "roots", "]", "n", "=", "len", "(", "p", ")", "while", "n", ">", "1", ":", "m", ",", "r", "=", "divmod", "(", "n", ",", "2", ")", "tmp", "=", "[", "mul_f", "(", "p", "[", "i", "]", ",", "p", "[", "i", "+", "m", "]", ")", "for", "i", "in", "range", "(", "m", ")", "]", "if", "r", ":", "tmp", "[", "0", "]", "=", "mul_f", "(", "tmp", "[", "0", "]", ",", "p", "[", "-", "1", "]", ")", "p", "=", "tmp", "n", "=", "m", "return", "p", "[", "0", "]" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py3/numpy/polynomial/polyutils.py#L456-L483
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/grid.py
python
GridTableBase.SetValueAsBool
(*args, **kwargs)
return _grid.GridTableBase_SetValueAsBool(*args, **kwargs)
SetValueAsBool(self, int row, int col, bool value)
SetValueAsBool(self, int row, int col, bool value)
[ "SetValueAsBool", "(", "self", "int", "row", "int", "col", "bool", "value", ")" ]
def SetValueAsBool(*args, **kwargs): """SetValueAsBool(self, int row, int col, bool value)""" return _grid.GridTableBase_SetValueAsBool(*args, **kwargs)
[ "def", "SetValueAsBool", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "GridTableBase_SetValueAsBool", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/grid.py#L854-L856
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/arraypad.py
python
_set_wrap_both
(padded, axis, width_pair)
return new_left_pad, new_right_pad
Pad `axis` of `arr` with wrapped values. Parameters ---------- padded : ndarray Input array of arbitrary shape. axis : int Axis along which to pad `arr`. width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. Returns ------- pad_amt : tuple of ints, length 2 New index positions of padding to do along the `axis`. If these are both 0, padding is done in this dimension.
Pad `axis` of `arr` with wrapped values.
[ "Pad", "axis", "of", "arr", "with", "wrapped", "values", "." ]
def _set_wrap_both(padded, axis, width_pair): """ Pad `axis` of `arr` with wrapped values. Parameters ---------- padded : ndarray Input array of arbitrary shape. axis : int Axis along which to pad `arr`. width_pair : (int, int) Pair of widths that mark the pad area on both sides in the given dimension. Returns ------- pad_amt : tuple of ints, length 2 New index positions of padding to do along the `axis`. If these are both 0, padding is done in this dimension. """ left_pad, right_pad = width_pair period = padded.shape[axis] - right_pad - left_pad # If the current dimension of `arr` doesn't contain enough valid values # (not part of the undefined pad area) we need to pad multiple times. # Each time the pad area shrinks on both sides which is communicated with # these variables. new_left_pad = 0 new_right_pad = 0 if left_pad > 0: # Pad with wrapped values on left side # First slice chunk from right side of the non-pad area. # Use min(period, left_pad) to ensure that chunk is not larger than # pad area right_slice = _slice_at_axis( slice(-right_pad - min(period, left_pad), -right_pad if right_pad != 0 else None), axis ) right_chunk = padded[right_slice] if left_pad > period: # Chunk is smaller than pad area pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) new_left_pad = left_pad - period else: # Chunk matches pad area pad_area = _slice_at_axis(slice(None, left_pad), axis) padded[pad_area] = right_chunk if right_pad > 0: # Pad with wrapped values on right side # First slice chunk from left side of the non-pad area. # Use min(period, right_pad) to ensure that chunk is not larger than # pad area left_slice = _slice_at_axis( slice(left_pad, left_pad + min(period, right_pad),), axis) left_chunk = padded[left_slice] if right_pad > period: # Chunk is smaller than pad area pad_area = _slice_at_axis( slice(-right_pad, -right_pad + period), axis) new_right_pad = right_pad - period else: # Chunk matches pad area pad_area = _slice_at_axis(slice(-right_pad, None), axis) padded[pad_area] = left_chunk return new_left_pad, new_right_pad
[ "def", "_set_wrap_both", "(", "padded", ",", "axis", ",", "width_pair", ")", ":", "left_pad", ",", "right_pad", "=", "width_pair", "period", "=", "padded", ".", "shape", "[", "axis", "]", "-", "right_pad", "-", "left_pad", "# If the current dimension of `arr` doesn't contain enough valid values", "# (not part of the undefined pad area) we need to pad multiple times.", "# Each time the pad area shrinks on both sides which is communicated with", "# these variables.", "new_left_pad", "=", "0", "new_right_pad", "=", "0", "if", "left_pad", ">", "0", ":", "# Pad with wrapped values on left side", "# First slice chunk from right side of the non-pad area.", "# Use min(period, left_pad) to ensure that chunk is not larger than", "# pad area", "right_slice", "=", "_slice_at_axis", "(", "slice", "(", "-", "right_pad", "-", "min", "(", "period", ",", "left_pad", ")", ",", "-", "right_pad", "if", "right_pad", "!=", "0", "else", "None", ")", ",", "axis", ")", "right_chunk", "=", "padded", "[", "right_slice", "]", "if", "left_pad", ">", "period", ":", "# Chunk is smaller than pad area", "pad_area", "=", "_slice_at_axis", "(", "slice", "(", "left_pad", "-", "period", ",", "left_pad", ")", ",", "axis", ")", "new_left_pad", "=", "left_pad", "-", "period", "else", ":", "# Chunk matches pad area", "pad_area", "=", "_slice_at_axis", "(", "slice", "(", "None", ",", "left_pad", ")", ",", "axis", ")", "padded", "[", "pad_area", "]", "=", "right_chunk", "if", "right_pad", ">", "0", ":", "# Pad with wrapped values on right side", "# First slice chunk from left side of the non-pad area.", "# Use min(period, right_pad) to ensure that chunk is not larger than", "# pad area", "left_slice", "=", "_slice_at_axis", "(", "slice", "(", "left_pad", ",", "left_pad", "+", "min", "(", "period", ",", "right_pad", ")", ",", ")", ",", "axis", ")", "left_chunk", "=", "padded", "[", "left_slice", "]", "if", "right_pad", ">", "period", ":", "# Chunk is smaller than pad area", "pad_area", "=", "_slice_at_axis", "(", "slice", "(", "-", "right_pad", ",", "-", "right_pad", "+", "period", ")", ",", "axis", ")", "new_right_pad", "=", "right_pad", "-", "period", "else", ":", "# Chunk matches pad area", "pad_area", "=", "_slice_at_axis", "(", "slice", "(", "-", "right_pad", ",", "None", ")", ",", "axis", ")", "padded", "[", "pad_area", "]", "=", "left_chunk", "return", "new_left_pad", ",", "new_right_pad" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/lib/arraypad.py#L386-L456
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/graph_editor/subgraph.py
python
SubGraphView.input_index
(self, t)
return subgraph_id
Find the input index corresponding to the given input tensor t. Args: t: the input tensor of this subgraph view. Returns: The index in the self.inputs list. Raises: Error: if t in not an input tensor.
Find the input index corresponding to the given input tensor t.
[ "Find", "the", "input", "index", "corresponding", "to", "the", "given", "input", "tensor", "t", "." ]
def input_index(self, t): """Find the input index corresponding to the given input tensor t. Args: t: the input tensor of this subgraph view. Returns: The index in the self.inputs list. Raises: Error: if t in not an input tensor. """ try: subgraph_id = self._input_ts.index(t) except: raise ValueError("Can't find {} in inputs of subgraph {}.".format( t.name, self.name)) return subgraph_id
[ "def", "input_index", "(", "self", ",", "t", ")", ":", "try", ":", "subgraph_id", "=", "self", ".", "_input_ts", ".", "index", "(", "t", ")", "except", ":", "raise", "ValueError", "(", "\"Can't find {} in inputs of subgraph {}.\"", ".", "format", "(", "t", ".", "name", ",", "self", ".", "name", ")", ")", "return", "subgraph_id" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/graph_editor/subgraph.py#L553-L568
ricardoquesada/Spidermonkey
4a75ea2543408bd1b2c515aa95901523eeef7858
ipc/ipdl/ipdl/parser.py
python
p_TranslationUnit
(p)
TranslationUnit : Preamble NamespacedStuff
TranslationUnit : Preamble NamespacedStuff
[ "TranslationUnit", ":", "Preamble", "NamespacedStuff" ]
def p_TranslationUnit(p): """TranslationUnit : Preamble NamespacedStuff""" tu = Parser.current.tu tu.loc = Loc(tu.filename) for stmt in p[1]: if isinstance(stmt, CxxInclude): tu.addCxxInclude(stmt) elif isinstance(stmt, Include): tu.addInclude(stmt) elif isinstance(stmt, UsingStmt): tu.addUsingStmt(stmt) else: assert 0 for thing in p[2]: if isinstance(thing, StructDecl): tu.addStructDecl(thing) elif isinstance(thing, UnionDecl): tu.addUnionDecl(thing) elif isinstance(thing, Protocol): if tu.protocol is not None: _error(thing.loc, "only one protocol definition per file") tu.protocol = thing else: assert(0) # The "canonical" namespace of the tu, what it's considered to be # in for the purposes of C++: |#include "foo/bar/TU.h"| if tu.protocol: assert tu.filetype == 'protocol' tu.namespaces = tu.protocol.namespaces tu.name = tu.protocol.name else: assert tu.filetype == 'header' # There's not really a canonical "thing" in headers. So # somewhat arbitrarily use the namespace of the last # interesting thing that was declared. for thing in reversed(tu.structsAndUnions): tu.namespaces = thing.namespaces break p[0] = tu
[ "def", "p_TranslationUnit", "(", "p", ")", ":", "tu", "=", "Parser", ".", "current", ".", "tu", "tu", ".", "loc", "=", "Loc", "(", "tu", ".", "filename", ")", "for", "stmt", "in", "p", "[", "1", "]", ":", "if", "isinstance", "(", "stmt", ",", "CxxInclude", ")", ":", "tu", ".", "addCxxInclude", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "Include", ")", ":", "tu", ".", "addInclude", "(", "stmt", ")", "elif", "isinstance", "(", "stmt", ",", "UsingStmt", ")", ":", "tu", ".", "addUsingStmt", "(", "stmt", ")", "else", ":", "assert", "0", "for", "thing", "in", "p", "[", "2", "]", ":", "if", "isinstance", "(", "thing", ",", "StructDecl", ")", ":", "tu", ".", "addStructDecl", "(", "thing", ")", "elif", "isinstance", "(", "thing", ",", "UnionDecl", ")", ":", "tu", ".", "addUnionDecl", "(", "thing", ")", "elif", "isinstance", "(", "thing", ",", "Protocol", ")", ":", "if", "tu", ".", "protocol", "is", "not", "None", ":", "_error", "(", "thing", ".", "loc", ",", "\"only one protocol definition per file\"", ")", "tu", ".", "protocol", "=", "thing", "else", ":", "assert", "(", "0", ")", "# The \"canonical\" namespace of the tu, what it's considered to be", "# in for the purposes of C++: |#include \"foo/bar/TU.h\"|", "if", "tu", ".", "protocol", ":", "assert", "tu", ".", "filetype", "==", "'protocol'", "tu", ".", "namespaces", "=", "tu", ".", "protocol", ".", "namespaces", "tu", ".", "name", "=", "tu", ".", "protocol", ".", "name", "else", ":", "assert", "tu", ".", "filetype", "==", "'header'", "# There's not really a canonical \"thing\" in headers. So", "# somewhat arbitrarily use the namespace of the last", "# interesting thing that was declared.", "for", "thing", "in", "reversed", "(", "tu", ".", "structsAndUnions", ")", ":", "tu", ".", "namespaces", "=", "thing", ".", "namespaces", "break", "p", "[", "0", "]", "=", "tu" ]
https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/ipc/ipdl/ipdl/parser.py#L188-L229
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/batch_norm_benchmark.py
python
build_graph
(device, input_shape, axes, num_layers, mode, scale, train)
Build a graph containing a sequence of batch normalizations. Args: device: string, the device to run on. input_shape: shape of the input tensor. axes: axes that are to be normalized across. num_layers: number of batch normalization layers in the graph. mode: "op", "py" or "slow" depending on the implementation. scale: scale after normalization. train: if true, also run backprop. Returns: An array of tensors to run()
Build a graph containing a sequence of batch normalizations.
[ "Build", "a", "graph", "containing", "a", "sequence", "of", "batch", "normalizations", "." ]
def build_graph(device, input_shape, axes, num_layers, mode, scale, train): """Build a graph containing a sequence of batch normalizations. Args: device: string, the device to run on. input_shape: shape of the input tensor. axes: axes that are to be normalized across. num_layers: number of batch normalization layers in the graph. mode: "op", "py" or "slow" depending on the implementation. scale: scale after normalization. train: if true, also run backprop. Returns: An array of tensors to run() """ moment_shape = [] keep_dims = mode == "py" or mode == "slow" if keep_dims: for axis in range(len(input_shape)): if axis in axes: moment_shape.append(1) else: moment_shape.append(input_shape[axis]) else: for axis in range(len(input_shape)): if axis not in axes: moment_shape.append(input_shape[axis]) with ops.device("/%s:0" % device): tensor = variables.Variable(random_ops.truncated_normal(input_shape)) for _ in range(num_layers): if train: mean, variance = nn_impl.moments(tensor, axes, keep_dims=keep_dims) else: mean = array_ops.zeros(moment_shape) variance = array_ops.ones(moment_shape) beta = variables.Variable(array_ops.zeros(moment_shape)) gamma = variables.Variable(constant_op.constant(1.0, shape=moment_shape)) if mode == "py": tensor = batch_norm_py(tensor, mean, variance, beta, gamma, scale) elif mode == "op": tensor = batch_norm_op(tensor, mean, variance, beta, gamma, scale) elif mode == "slow": tensor = batch_norm_slow(tensor, mean, variance, beta, gamma, scale) if train: return gradients_impl.gradients([tensor], variables.trainable_variables()) else: return [tensor]
[ "def", "build_graph", "(", "device", ",", "input_shape", ",", "axes", ",", "num_layers", ",", "mode", ",", "scale", ",", "train", ")", ":", "moment_shape", "=", "[", "]", "keep_dims", "=", "mode", "==", "\"py\"", "or", "mode", "==", "\"slow\"", "if", "keep_dims", ":", "for", "axis", "in", "range", "(", "len", "(", "input_shape", ")", ")", ":", "if", "axis", "in", "axes", ":", "moment_shape", ".", "append", "(", "1", ")", "else", ":", "moment_shape", ".", "append", "(", "input_shape", "[", "axis", "]", ")", "else", ":", "for", "axis", "in", "range", "(", "len", "(", "input_shape", ")", ")", ":", "if", "axis", "not", "in", "axes", ":", "moment_shape", ".", "append", "(", "input_shape", "[", "axis", "]", ")", "with", "ops", ".", "device", "(", "\"/%s:0\"", "%", "device", ")", ":", "tensor", "=", "variables", ".", "Variable", "(", "random_ops", ".", "truncated_normal", "(", "input_shape", ")", ")", "for", "_", "in", "range", "(", "num_layers", ")", ":", "if", "train", ":", "mean", ",", "variance", "=", "nn_impl", ".", "moments", "(", "tensor", ",", "axes", ",", "keep_dims", "=", "keep_dims", ")", "else", ":", "mean", "=", "array_ops", ".", "zeros", "(", "moment_shape", ")", "variance", "=", "array_ops", ".", "ones", "(", "moment_shape", ")", "beta", "=", "variables", ".", "Variable", "(", "array_ops", ".", "zeros", "(", "moment_shape", ")", ")", "gamma", "=", "variables", ".", "Variable", "(", "constant_op", ".", "constant", "(", "1.0", ",", "shape", "=", "moment_shape", ")", ")", "if", "mode", "==", "\"py\"", ":", "tensor", "=", "batch_norm_py", "(", "tensor", ",", "mean", ",", "variance", ",", "beta", ",", "gamma", ",", "scale", ")", "elif", "mode", "==", "\"op\"", ":", "tensor", "=", "batch_norm_op", "(", "tensor", ",", "mean", ",", "variance", ",", "beta", ",", "gamma", ",", "scale", ")", "elif", "mode", "==", "\"slow\"", ":", "tensor", "=", "batch_norm_slow", "(", "tensor", ",", "mean", ",", "variance", ",", "beta", ",", "gamma", ",", "scale", ")", "if", "train", ":", "return", "gradients_impl", ".", "gradients", "(", "[", "tensor", "]", ",", "variables", ".", "trainable_variables", "(", ")", ")", "else", ":", "return", "[", "tensor", "]" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/batch_norm_benchmark.py#L68-L114
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/saved_model/builder_impl.py
python
_add_asset_to_metagraph
(meta_graph_def, asset_filename, asset_tensor)
Builds an asset proto and adds it to the meta graph def. Args: meta_graph_def: The meta graph def to which the asset will be added. asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto.
Builds an asset proto and adds it to the meta graph def.
[ "Builds", "an", "asset", "proto", "and", "adds", "it", "to", "the", "meta", "graph", "def", "." ]
def _add_asset_to_metagraph(meta_graph_def, asset_filename, asset_tensor): """Builds an asset proto and adds it to the meta graph def. Args: meta_graph_def: The meta graph def to which the asset will be added. asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto. """ asset_proto = meta_graph_def.asset_file_def.add() asset_proto.filename = asset_filename asset_proto.tensor_info.name = asset_tensor.name
[ "def", "_add_asset_to_metagraph", "(", "meta_graph_def", ",", "asset_filename", ",", "asset_tensor", ")", ":", "asset_proto", "=", "meta_graph_def", ".", "asset_file_def", ".", "add", "(", ")", "asset_proto", ".", "filename", "=", "asset_filename", "asset_proto", ".", "tensor_info", ".", "name", "=", "asset_tensor", ".", "name" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/saved_model/builder_impl.py#L742-L753
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
qa/tasks/cephfs/filesystem.py
python
Filesystem.wait_for_state
(self, goal_state, reject=None, timeout=None, mds_id=None, rank=None)
Block until the MDS reaches a particular state, or a failure condition is met. When there are multiple MDSs, succeed when exaclty one MDS is in the goal state, or fail when any MDS is in the reject state. :param goal_state: Return once the MDS is in this state :param reject: Fail if the MDS enters this state before the goal state :param timeout: Fail if this many seconds pass before reaching goal :return: number of seconds waited, rounded down to integer
Block until the MDS reaches a particular state, or a failure condition is met.
[ "Block", "until", "the", "MDS", "reaches", "a", "particular", "state", "or", "a", "failure", "condition", "is", "met", "." ]
def wait_for_state(self, goal_state, reject=None, timeout=None, mds_id=None, rank=None): """ Block until the MDS reaches a particular state, or a failure condition is met. When there are multiple MDSs, succeed when exaclty one MDS is in the goal state, or fail when any MDS is in the reject state. :param goal_state: Return once the MDS is in this state :param reject: Fail if the MDS enters this state before the goal state :param timeout: Fail if this many seconds pass before reaching goal :return: number of seconds waited, rounded down to integer """ started_at = time.time() while True: status = self.status() if rank is not None: try: mds_info = status.get_rank(self.id, rank) current_state = mds_info['state'] if mds_info else None log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state)) except: mdsmap = self.get_mds_map(status=status) if rank in mdsmap['failed']: log.debug("Waiting for rank {0} to come back.".format(rank)) current_state = None else: raise elif mds_id is not None: # mds_info is None if no daemon with this ID exists in the map mds_info = status.get_mds(mds_id) current_state = mds_info['state'] if mds_info else None log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state)) else: # In general, look for a single MDS states = [m['state'] for m in status.get_ranks(self.id)] if [s for s in states if s == goal_state] == [goal_state]: current_state = goal_state elif reject in states: current_state = reject else: current_state = None log.debug("mapped states {0} to {1}".format(states, current_state)) elapsed = time.time() - started_at if current_state == goal_state: log.debug("reached state '{0}' in {1}s".format(current_state, elapsed)) return elapsed elif reject is not None and current_state == reject: raise RuntimeError("MDS in reject state {0}".format(current_state)) elif timeout is not None and elapsed > timeout: log.error("MDS status at timeout: {0}".format(status.get_fsmap(self.id))) raise RuntimeError( "Reached timeout after {0} seconds waiting for state {1}, while in state {2}".format( elapsed, goal_state, current_state )) else: time.sleep(1)
[ "def", "wait_for_state", "(", "self", ",", "goal_state", ",", "reject", "=", "None", ",", "timeout", "=", "None", ",", "mds_id", "=", "None", ",", "rank", "=", "None", ")", ":", "started_at", "=", "time", ".", "time", "(", ")", "while", "True", ":", "status", "=", "self", ".", "status", "(", ")", "if", "rank", "is", "not", "None", ":", "try", ":", "mds_info", "=", "status", ".", "get_rank", "(", "self", ".", "id", ",", "rank", ")", "current_state", "=", "mds_info", "[", "'state'", "]", "if", "mds_info", "else", "None", "log", ".", "debug", "(", "\"Looked up MDS state for mds.{0}: {1}\"", ".", "format", "(", "rank", ",", "current_state", ")", ")", "except", ":", "mdsmap", "=", "self", ".", "get_mds_map", "(", "status", "=", "status", ")", "if", "rank", "in", "mdsmap", "[", "'failed'", "]", ":", "log", ".", "debug", "(", "\"Waiting for rank {0} to come back.\"", ".", "format", "(", "rank", ")", ")", "current_state", "=", "None", "else", ":", "raise", "elif", "mds_id", "is", "not", "None", ":", "# mds_info is None if no daemon with this ID exists in the map", "mds_info", "=", "status", ".", "get_mds", "(", "mds_id", ")", "current_state", "=", "mds_info", "[", "'state'", "]", "if", "mds_info", "else", "None", "log", ".", "debug", "(", "\"Looked up MDS state for {0}: {1}\"", ".", "format", "(", "mds_id", ",", "current_state", ")", ")", "else", ":", "# In general, look for a single MDS", "states", "=", "[", "m", "[", "'state'", "]", "for", "m", "in", "status", ".", "get_ranks", "(", "self", ".", "id", ")", "]", "if", "[", "s", "for", "s", "in", "states", "if", "s", "==", "goal_state", "]", "==", "[", "goal_state", "]", ":", "current_state", "=", "goal_state", "elif", "reject", "in", "states", ":", "current_state", "=", "reject", "else", ":", "current_state", "=", "None", "log", ".", "debug", "(", "\"mapped states {0} to {1}\"", ".", "format", "(", "states", ",", "current_state", ")", ")", "elapsed", "=", "time", ".", "time", "(", ")", "-", "started_at", "if", "current_state", "==", "goal_state", ":", "log", ".", "debug", "(", "\"reached state '{0}' in {1}s\"", ".", "format", "(", "current_state", ",", "elapsed", ")", ")", "return", "elapsed", "elif", "reject", "is", "not", "None", "and", "current_state", "==", "reject", ":", "raise", "RuntimeError", "(", "\"MDS in reject state {0}\"", ".", "format", "(", "current_state", ")", ")", "elif", "timeout", "is", "not", "None", "and", "elapsed", ">", "timeout", ":", "log", ".", "error", "(", "\"MDS status at timeout: {0}\"", ".", "format", "(", "status", ".", "get_fsmap", "(", "self", ".", "id", ")", ")", ")", "raise", "RuntimeError", "(", "\"Reached timeout after {0} seconds waiting for state {1}, while in state {2}\"", ".", "format", "(", "elapsed", ",", "goal_state", ",", "current_state", ")", ")", "else", ":", "time", ".", "sleep", "(", "1", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/qa/tasks/cephfs/filesystem.py#L1199-L1257
deepmind/open_spiel
4ca53bea32bb2875c7385d215424048ae92f78c8
open_spiel/python/pytorch/policy_gradient.py
python
PolicyGradient._add_transition
(self, time_step)
Adds intra-episode transition to the `_episode_data` buffer. Adds the transition from `self._prev_time_step` to `time_step`. Args: time_step: an instance of rl_environment.TimeStep.
Adds intra-episode transition to the `_episode_data` buffer.
[ "Adds", "intra", "-", "episode", "transition", "to", "the", "_episode_data", "buffer", "." ]
def _add_transition(self, time_step): """Adds intra-episode transition to the `_episode_data` buffer. Adds the transition from `self._prev_time_step` to `time_step`. Args: time_step: an instance of rl_environment.TimeStep. """ assert self._prev_time_step is not None legal_actions = ( self._prev_time_step.observations["legal_actions"][self.player_id]) legal_actions_mask = np.zeros(self._num_actions) legal_actions_mask[legal_actions] = 1.0 transition = Transition( info_state=( self._prev_time_step.observations["info_state"][self.player_id][:]), action=self._prev_action, reward=time_step.rewards[self.player_id], discount=time_step.discounts[self.player_id], legal_actions_mask=legal_actions_mask) self._episode_data.append(transition)
[ "def", "_add_transition", "(", "self", ",", "time_step", ")", ":", "assert", "self", ".", "_prev_time_step", "is", "not", "None", "legal_actions", "=", "(", "self", ".", "_prev_time_step", ".", "observations", "[", "\"legal_actions\"", "]", "[", "self", ".", "player_id", "]", ")", "legal_actions_mask", "=", "np", ".", "zeros", "(", "self", ".", "_num_actions", ")", "legal_actions_mask", "[", "legal_actions", "]", "=", "1.0", "transition", "=", "Transition", "(", "info_state", "=", "(", "self", ".", "_prev_time_step", ".", "observations", "[", "\"info_state\"", "]", "[", "self", ".", "player_id", "]", "[", ":", "]", ")", ",", "action", "=", "self", ".", "_prev_action", ",", "reward", "=", "time_step", ".", "rewards", "[", "self", ".", "player_id", "]", ",", "discount", "=", "time_step", ".", "discounts", "[", "self", ".", "player_id", "]", ",", "legal_actions_mask", "=", "legal_actions_mask", ")", "self", ".", "_episode_data", ".", "append", "(", "transition", ")" ]
https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/pytorch/policy_gradient.py#L383-L404
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_windows.py
python
ColourData.GetChooseFull
(*args, **kwargs)
return _windows_.ColourData_GetChooseFull(*args, **kwargs)
GetChooseFull(self) -> bool Under Windows, determines whether the Windows colour dialog will display the full dialog with custom colour selection controls. Has no meaning under other platforms. The default value is true.
GetChooseFull(self) -> bool
[ "GetChooseFull", "(", "self", ")", "-", ">", "bool" ]
def GetChooseFull(*args, **kwargs): """ GetChooseFull(self) -> bool Under Windows, determines whether the Windows colour dialog will display the full dialog with custom colour selection controls. Has no meaning under other platforms. The default value is true. """ return _windows_.ColourData_GetChooseFull(*args, **kwargs)
[ "def", "GetChooseFull", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_windows_", ".", "ColourData_GetChooseFull", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_windows.py#L2931-L2939
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/layers/python/layers/feature_column_ops.py
python
_create_embedding_lookup
(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections)
Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions.
Creates variables and returns predictions for linear weights in a model.
[ "Creates", "variables", "and", "returns", "predictions", "for", "linear", "weights", "in", "a", "model", "." ]
def _create_embedding_lookup(column, columns_to_tensors, embedding_lookup_arguments, num_outputs, trainable, weight_collections): """Creates variables and returns predictions for linear weights in a model. Args: column: the column we're working on. columns_to_tensors: a map from column name to tensors. embedding_lookup_arguments: arguments for embedding lookup. num_outputs: how many outputs. trainable: whether the variable we create is trainable. weight_collections: weights will be placed here. Returns: variables: the created embeddings. predictions: the computed predictions. """ with variable_scope.variable_scope( None, default_name=column.name, values=columns_to_tensors.values()): variable = contrib_variables.model_variable( name='weights', shape=[embedding_lookup_arguments.vocab_size, num_outputs], dtype=dtypes.float32, initializer=embedding_lookup_arguments.initializer, trainable=trainable, collections=weight_collections) if fc._is_variable(variable): # pylint: disable=protected-access variable = [variable] else: variable = variable._get_variable_list() # pylint: disable=protected-access predictions = embedding_ops.safe_embedding_lookup_sparse( variable, embedding_lookup_arguments.input_tensor, sparse_weights=embedding_lookup_arguments.weight_tensor, combiner=embedding_lookup_arguments.combiner, name=column.name + '_weights') return variable, predictions
[ "def", "_create_embedding_lookup", "(", "column", ",", "columns_to_tensors", ",", "embedding_lookup_arguments", ",", "num_outputs", ",", "trainable", ",", "weight_collections", ")", ":", "with", "variable_scope", ".", "variable_scope", "(", "None", ",", "default_name", "=", "column", ".", "name", ",", "values", "=", "columns_to_tensors", ".", "values", "(", ")", ")", ":", "variable", "=", "contrib_variables", ".", "model_variable", "(", "name", "=", "'weights'", ",", "shape", "=", "[", "embedding_lookup_arguments", ".", "vocab_size", ",", "num_outputs", "]", ",", "dtype", "=", "dtypes", ".", "float32", ",", "initializer", "=", "embedding_lookup_arguments", ".", "initializer", ",", "trainable", "=", "trainable", ",", "collections", "=", "weight_collections", ")", "if", "fc", ".", "_is_variable", "(", "variable", ")", ":", "# pylint: disable=protected-access", "variable", "=", "[", "variable", "]", "else", ":", "variable", "=", "variable", ".", "_get_variable_list", "(", ")", "# pylint: disable=protected-access", "predictions", "=", "embedding_ops", ".", "safe_embedding_lookup_sparse", "(", "variable", ",", "embedding_lookup_arguments", ".", "input_tensor", ",", "sparse_weights", "=", "embedding_lookup_arguments", ".", "weight_tensor", ",", "combiner", "=", "embedding_lookup_arguments", ".", "combiner", ",", "name", "=", "column", ".", "name", "+", "'_weights'", ")", "return", "variable", ",", "predictions" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/layers/python/layers/feature_column_ops.py#L261-L300
nasa/meshNetwork
ff4bd66e0ca6bd424fd8897a97252bb3925d8b3c
python/mesh/generic/serialComm.py
python
SerialComm.readBytes
(self, bufferFlag=False)
Reads raw bytes from radio
Reads raw bytes from radio
[ "Reads", "raw", "bytes", "from", "radio" ]
def readBytes(self, bufferFlag=False): """Reads raw bytes from radio""" self.radio.readBytes(bufferFlag)
[ "def", "readBytes", "(", "self", ",", "bufferFlag", "=", "False", ")", ":", "self", ".", "radio", ".", "readBytes", "(", "bufferFlag", ")" ]
https://github.com/nasa/meshNetwork/blob/ff4bd66e0ca6bd424fd8897a97252bb3925d8b3c/python/mesh/generic/serialComm.py#L77-L79
tinyobjloader/tinyobjloader
8322e00ae685ea623ab6ac5a6cebcfa2d22fbf93
deps/cpplint.py
python
CheckForBadCharacters
(filename, lines, error)
Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
Logs an error for each line containing bad characters.
[ "Logs", "an", "error", "for", "each", "line", "containing", "bad", "characters", "." ]
def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
[ "def", "CheckForBadCharacters", "(", "filename", ",", "lines", ",", "error", ")", ":", "for", "linenum", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "u'\\ufffd'", "in", "line", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/utf8'", ",", "5", ",", "'Line contains invalid UTF-8 (or Unicode replacement character).'", ")", "if", "'\\0'", "in", "line", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/nul'", ",", "5", ",", "'Line contains NUL byte.'", ")" ]
https://github.com/tinyobjloader/tinyobjloader/blob/8322e00ae685ea623ab6ac5a6cebcfa2d22fbf93/deps/cpplint.py#L1800-L1822
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/grid.py
python
Grid.CalcColLabelsExposed
(*args, **kwargs)
return _grid.Grid_CalcColLabelsExposed(*args, **kwargs)
CalcColLabelsExposed(self, Region reg) -> wxArrayInt
CalcColLabelsExposed(self, Region reg) -> wxArrayInt
[ "CalcColLabelsExposed", "(", "self", "Region", "reg", ")", "-", ">", "wxArrayInt" ]
def CalcColLabelsExposed(*args, **kwargs): """CalcColLabelsExposed(self, Region reg) -> wxArrayInt""" return _grid.Grid_CalcColLabelsExposed(*args, **kwargs)
[ "def", "CalcColLabelsExposed", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "Grid_CalcColLabelsExposed", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/grid.py#L1243-L1245
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/pyclbr.py
python
readmodule_ex
(module, path=None)
return _readmodule(module, path or [])
Return a dictionary with all functions and classes in module. Search for module in PATH + sys.path. If possible, include imported superclasses. Do this by reading source, without importing (and executing) it.
Return a dictionary with all functions and classes in module.
[ "Return", "a", "dictionary", "with", "all", "functions", "and", "classes", "in", "module", "." ]
def readmodule_ex(module, path=None): """Return a dictionary with all functions and classes in module. Search for module in PATH + sys.path. If possible, include imported superclasses. Do this by reading source, without importing (and executing) it. """ return _readmodule(module, path or [])
[ "def", "readmodule_ex", "(", "module", ",", "path", "=", "None", ")", ":", "return", "_readmodule", "(", "module", ",", "path", "or", "[", "]", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/pyclbr.py#L109-L116
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/grid.py
python
GridTableBase._setOORInfo
(*args, **kwargs)
return _grid.GridTableBase__setOORInfo(*args, **kwargs)
_setOORInfo(self, PyObject _self)
_setOORInfo(self, PyObject _self)
[ "_setOORInfo", "(", "self", "PyObject", "_self", ")" ]
def _setOORInfo(*args, **kwargs): """_setOORInfo(self, PyObject _self)""" return _grid.GridTableBase__setOORInfo(*args, **kwargs)
[ "def", "_setOORInfo", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_grid", ".", "GridTableBase__setOORInfo", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/grid.py#L770-L772
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/special/basic.py
python
jnyn_zeros
(n, nt)
return specfun.jyzo(abs(n), nt)
Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. Parameters ---------- n : int Order of the Bessel functions nt : int Number (<=1200) of zeros to compute See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
[ "Compute", "nt", "zeros", "of", "Bessel", "functions", "Jn", "(", "x", ")", "Jn", "(", "x", ")", "Yn", "(", "x", ")", "and", "Yn", "(", "x", ")", "." ]
def jnyn_zeros(n, nt): """Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x). Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. Parameters ---------- n : int Order of the Bessel functions nt : int Number (<=1200) of zeros to compute See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays. References ---------- .. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special Functions", John Wiley and Sons, 1996, chapter 5. https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html """ if not (isscalar(nt) and isscalar(n)): raise ValueError("Arguments must be scalars.") if (floor(n) != n) or (floor(nt) != nt): raise ValueError("Arguments must be integers.") if (nt <= 0): raise ValueError("nt > 0") return specfun.jyzo(abs(n), nt)
[ "def", "jnyn_zeros", "(", "n", ",", "nt", ")", ":", "if", "not", "(", "isscalar", "(", "nt", ")", "and", "isscalar", "(", "n", ")", ")", ":", "raise", "ValueError", "(", "\"Arguments must be scalars.\"", ")", "if", "(", "floor", "(", "n", ")", "!=", "n", ")", "or", "(", "floor", "(", "nt", ")", "!=", "nt", ")", ":", "raise", "ValueError", "(", "\"Arguments must be integers.\"", ")", "if", "(", "nt", "<=", "0", ")", ":", "raise", "ValueError", "(", "\"nt > 0\"", ")", "return", "specfun", ".", "jyzo", "(", "abs", "(", "n", ")", ",", "nt", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/special/basic.py#L199-L227
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/_header_value_parser.py
python
get_dtext
(value)
return ptext, value
r""" dtext = <printable ascii except \ [ ]> / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair We allow anything except the excluded characters, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Quoted pairs are converted to their unquoted values, so what is returned is a ptext token, in this case a ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is added to the returned token's defect list.
r""" dtext = <printable ascii except \ [ ]> / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair
[ "r", "dtext", "=", "<printable", "ascii", "except", "\\", "[", "]", ">", "/", "obs", "-", "dtext", "obs", "-", "dtext", "=", "obs", "-", "NO", "-", "WS", "-", "CTL", "/", "quoted", "-", "pair" ]
def get_dtext(value): r""" dtext = <printable ascii except \ [ ]> / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair We allow anything except the excluded characters, but if we find any ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is added to the token's defects list. Quoted pairs are converted to their unquoted values, so what is returned is a ptext token, in this case a ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is added to the returned token's defect list. """ ptext, value, had_qp = _get_ptext_to_endchars(value, '[]') ptext = ValueTerminal(ptext, 'ptext') if had_qp: ptext.defects.append(errors.ObsoleteHeaderDefect( "quoted printable found in domain-literal")) _validate_xtext(ptext) return ptext, value
[ "def", "get_dtext", "(", "value", ")", ":", "ptext", ",", "value", ",", "had_qp", "=", "_get_ptext_to_endchars", "(", "value", ",", "'[]'", ")", "ptext", "=", "ValueTerminal", "(", "ptext", ",", "'ptext'", ")", "if", "had_qp", ":", "ptext", ".", "defects", ".", "append", "(", "errors", ".", "ObsoleteHeaderDefect", "(", "\"quoted printable found in domain-literal\"", ")", ")", "_validate_xtext", "(", "ptext", ")", "return", "ptext", ",", "value" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/_header_value_parser.py#L1523-L1541
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
ppapi/generators/idl_parser.py
python
IDLParser.p_integer
(self, p)
integer : HEX | INT | OCT
integer : HEX | INT | OCT
[ "integer", ":", "HEX", "|", "INT", "|", "OCT" ]
def p_integer(self, p): """integer : HEX | INT | OCT""" p[0] = p[1] if self.parse_debug: DumpReduction('integer', p)
[ "def", "p_integer", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "if", "self", ".", "parse_debug", ":", "DumpReduction", "(", "'integer'", ",", "p", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/ppapi/generators/idl_parser.py#L450-L455
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Source/ThirdParty/CEF3/cef_source/tools/make_distrib.py
python
combine_libs
(build_dir, libs, dest_lib)
Combine multiple static libraries into a single static library.
Combine multiple static libraries into a single static library.
[ "Combine", "multiple", "static", "libraries", "into", "a", "single", "static", "library", "." ]
def combine_libs(build_dir, libs, dest_lib): """ Combine multiple static libraries into a single static library. """ cmdline = 'msvs_env.bat win%s python combine_libs.py -o "%s"' % (platform_arch, dest_lib) for lib in libs: lib_path = os.path.join(build_dir, lib) for path in get_files(lib_path): # Expand wildcards in |lib_path|. if not path_exists(path): raise Exception('File not found: ' + path) cmdline = cmdline + ' "%s"' % path run(cmdline, os.path.join(cef_dir, 'tools'))
[ "def", "combine_libs", "(", "build_dir", ",", "libs", ",", "dest_lib", ")", ":", "cmdline", "=", "'msvs_env.bat win%s python combine_libs.py -o \"%s\"'", "%", "(", "platform_arch", ",", "dest_lib", ")", "for", "lib", "in", "libs", ":", "lib_path", "=", "os", ".", "path", ".", "join", "(", "build_dir", ",", "lib", ")", "for", "path", "in", "get_files", "(", "lib_path", ")", ":", "# Expand wildcards in |lib_path|.", "if", "not", "path_exists", "(", "path", ")", ":", "raise", "Exception", "(", "'File not found: '", "+", "path", ")", "cmdline", "=", "cmdline", "+", "' \"%s\"'", "%", "path", "run", "(", "cmdline", ",", "os", ".", "path", ".", "join", "(", "cef_dir", ",", "'tools'", ")", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/cef_source/tools/make_distrib.py#L242-L251
bulletphysics/bullet3
f0f2a952e146f016096db6f85cf0c44ed75b0b9a
examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env.py
python
MinitaurTrottingEnv._get_observation
(self)
return self._observation
Get observations of this environment. It includes the base roll, pitch, roll dot and pitch dot which may contain noises, bias, and latency. Also includes the disired/observed motor angles if the relevant flags are set. Returns: The observation list.
Get observations of this environment.
[ "Get", "observations", "of", "this", "environment", "." ]
def _get_observation(self): """Get observations of this environment. It includes the base roll, pitch, roll dot and pitch dot which may contain noises, bias, and latency. Also includes the disired/observed motor angles if the relevant flags are set. Returns: The observation list. """ observation = [] roll, pitch, _ = self.minitaur.GetBaseRollPitchYaw() roll_rate, pitch_rate, _ = self.minitaur.GetBaseRollPitchYawRate() observation.extend([roll, pitch, roll_rate, pitch_rate]) if self._use_signal_in_observation: observation.extend(self._transform_action_to_motor_command([0] * 8)) if self._use_angle_in_observation: observation.extend(self.minitaur.GetMotorAngles().tolist()) self._observation = np.array(observation) return self._observation
[ "def", "_get_observation", "(", "self", ")", ":", "observation", "=", "[", "]", "roll", ",", "pitch", ",", "_", "=", "self", ".", "minitaur", ".", "GetBaseRollPitchYaw", "(", ")", "roll_rate", ",", "pitch_rate", ",", "_", "=", "self", ".", "minitaur", ".", "GetBaseRollPitchYawRate", "(", ")", "observation", ".", "extend", "(", "[", "roll", ",", "pitch", ",", "roll_rate", ",", "pitch_rate", "]", ")", "if", "self", ".", "_use_signal_in_observation", ":", "observation", ".", "extend", "(", "self", ".", "_transform_action_to_motor_command", "(", "[", "0", "]", "*", "8", ")", ")", "if", "self", ".", "_use_angle_in_observation", ":", "observation", ".", "extend", "(", "self", ".", "minitaur", ".", "GetMotorAngles", "(", ")", ".", "tolist", "(", ")", ")", "self", ".", "_observation", "=", "np", ".", "array", "(", "observation", ")", "return", "self", ".", "_observation" ]
https://github.com/bulletphysics/bullet3/blob/f0f2a952e146f016096db6f85cf0c44ed75b0b9a/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_trotting_env.py#L254-L273
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/python3/src/Lib/smtplib.py
python
SMTP.getreply
(self)
return errcode, errmsg
Get a reply from the server. Returns a tuple consisting of: - server response code (e.g. '250', or such, if all goes well) Note: returns -1 if it can't read response code. - server response string corresponding to response code (multiline responses are converted to a single, multiline string). Raises SMTPServerDisconnected if end-of-file is reached.
Get a reply from the server.
[ "Get", "a", "reply", "from", "the", "server", "." ]
def getreply(self): """Get a reply from the server. Returns a tuple consisting of: - server response code (e.g. '250', or such, if all goes well) Note: returns -1 if it can't read response code. - server response string corresponding to response code (multiline responses are converted to a single, multiline string). Raises SMTPServerDisconnected if end-of-file is reached. """ resp = [] if self.file is None: self.file = self.sock.makefile('rb') while 1: try: line = self.file.readline(_MAXLINE + 1) except OSError as e: self.close() raise SMTPServerDisconnected("Connection unexpectedly closed: " + str(e)) if not line: self.close() raise SMTPServerDisconnected("Connection unexpectedly closed") if self.debuglevel > 0: self._print_debug('reply:', repr(line)) if len(line) > _MAXLINE: self.close() raise SMTPResponseException(500, "Line too long.") resp.append(line[4:].strip(b' \t\r\n')) code = line[:3] # Check that the error code is syntactically correct. # Don't attempt to read a continuation line if it is broken. try: errcode = int(code) except ValueError: errcode = -1 break # Check if multiline response. if line[3:4] != b"-": break errmsg = b"\n".join(resp) if self.debuglevel > 0: self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg)) return errcode, errmsg
[ "def", "getreply", "(", "self", ")", ":", "resp", "=", "[", "]", "if", "self", ".", "file", "is", "None", ":", "self", ".", "file", "=", "self", ".", "sock", ".", "makefile", "(", "'rb'", ")", "while", "1", ":", "try", ":", "line", "=", "self", ".", "file", ".", "readline", "(", "_MAXLINE", "+", "1", ")", "except", "OSError", "as", "e", ":", "self", ".", "close", "(", ")", "raise", "SMTPServerDisconnected", "(", "\"Connection unexpectedly closed: \"", "+", "str", "(", "e", ")", ")", "if", "not", "line", ":", "self", ".", "close", "(", ")", "raise", "SMTPServerDisconnected", "(", "\"Connection unexpectedly closed\"", ")", "if", "self", ".", "debuglevel", ">", "0", ":", "self", ".", "_print_debug", "(", "'reply:'", ",", "repr", "(", "line", ")", ")", "if", "len", "(", "line", ")", ">", "_MAXLINE", ":", "self", ".", "close", "(", ")", "raise", "SMTPResponseException", "(", "500", ",", "\"Line too long.\"", ")", "resp", ".", "append", "(", "line", "[", "4", ":", "]", ".", "strip", "(", "b' \\t\\r\\n'", ")", ")", "code", "=", "line", "[", ":", "3", "]", "# Check that the error code is syntactically correct.", "# Don't attempt to read a continuation line if it is broken.", "try", ":", "errcode", "=", "int", "(", "code", ")", "except", "ValueError", ":", "errcode", "=", "-", "1", "break", "# Check if multiline response.", "if", "line", "[", "3", ":", "4", "]", "!=", "b\"-\"", ":", "break", "errmsg", "=", "b\"\\n\"", ".", "join", "(", "resp", ")", "if", "self", ".", "debuglevel", ">", "0", ":", "self", ".", "_print_debug", "(", "'reply: retcode (%s); Msg: %a'", "%", "(", "errcode", ",", "errmsg", ")", ")", "return", "errcode", ",", "errmsg" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/smtplib.py#L380-L427
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py3/IPython/utils/text.py
python
columnize
(items, row_first=False, separator=' ', displaywidth=80, spread=False)
return '\n'.join(map(sjoin, fmatrix))+'\n'
Transform a list of strings into a single string with columns. Parameters ---------- items : sequence of strings The strings to process. row_first : (default False) Whether to compute columns for a row-first matrix instead of column-first (default). separator : str, optional [default is two spaces] The string that separates columns. displaywidth : int, optional [default is 80] Width of the display in number of characters. Returns ------- The formatted string.
Transform a list of strings into a single string with columns.
[ "Transform", "a", "list", "of", "strings", "into", "a", "single", "string", "with", "columns", "." ]
def columnize(items, row_first=False, separator=' ', displaywidth=80, spread=False): """ Transform a list of strings into a single string with columns. Parameters ---------- items : sequence of strings The strings to process. row_first : (default False) Whether to compute columns for a row-first matrix instead of column-first (default). separator : str, optional [default is two spaces] The string that separates columns. displaywidth : int, optional [default is 80] Width of the display in number of characters. Returns ------- The formatted string. """ if not items: return '\n' matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth) if spread: separator = separator.ljust(int(info['optimal_separator_width'])) fmatrix = [filter(None, x) for x in matrix] sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])]) return '\n'.join(map(sjoin, fmatrix))+'\n'
[ "def", "columnize", "(", "items", ",", "row_first", "=", "False", ",", "separator", "=", "' '", ",", "displaywidth", "=", "80", ",", "spread", "=", "False", ")", ":", "if", "not", "items", ":", "return", "'\\n'", "matrix", ",", "info", "=", "compute_item_matrix", "(", "items", ",", "row_first", "=", "row_first", ",", "separator_size", "=", "len", "(", "separator", ")", ",", "displaywidth", "=", "displaywidth", ")", "if", "spread", ":", "separator", "=", "separator", ".", "ljust", "(", "int", "(", "info", "[", "'optimal_separator_width'", "]", ")", ")", "fmatrix", "=", "[", "filter", "(", "None", ",", "x", ")", "for", "x", "in", "matrix", "]", "sjoin", "=", "lambda", "x", ":", "separator", ".", "join", "(", "[", "y", ".", "ljust", "(", "w", ",", "' '", ")", "for", "y", ",", "w", "in", "zip", "(", "x", ",", "info", "[", "'column_widths'", "]", ")", "]", ")", "return", "'\\n'", ".", "join", "(", "map", "(", "sjoin", ",", "fmatrix", ")", ")", "+", "'\\n'" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/utils/text.py#L701-L730
microsoft/AirSim
8057725712c0cd46979135396381784075ffc0f3
PythonClient/airsim/client.py
python
VehicleClient.getGpsData
(self, gps_name = '', vehicle_name = '')
return GpsData.from_msgpack(self.client.call('getGpsData', gps_name, vehicle_name))
Args: gps_name (str, optional): Name of GPS to get data from, specified in settings.json vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to Returns: GpsData:
Args: gps_name (str, optional): Name of GPS to get data from, specified in settings.json vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to
[ "Args", ":", "gps_name", "(", "str", "optional", ")", ":", "Name", "of", "GPS", "to", "get", "data", "from", "specified", "in", "settings", ".", "json", "vehicle_name", "(", "str", "optional", ")", ":", "Name", "of", "vehicle", "to", "which", "the", "sensor", "corresponds", "to" ]
def getGpsData(self, gps_name = '', vehicle_name = ''): """ Args: gps_name (str, optional): Name of GPS to get data from, specified in settings.json vehicle_name (str, optional): Name of vehicle to which the sensor corresponds to Returns: GpsData: """ return GpsData.from_msgpack(self.client.call('getGpsData', gps_name, vehicle_name))
[ "def", "getGpsData", "(", "self", ",", "gps_name", "=", "''", ",", "vehicle_name", "=", "''", ")", ":", "return", "GpsData", ".", "from_msgpack", "(", "self", ".", "client", ".", "call", "(", "'getGpsData'", ",", "gps_name", ",", "vehicle_name", ")", ")" ]
https://github.com/microsoft/AirSim/blob/8057725712c0cd46979135396381784075ffc0f3/PythonClient/airsim/client.py#L872-L881
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py
python
Canvas.type
(self, tagOrId)
return self.tk.call(self._w, 'type', tagOrId) or None
Return the type of the item TAGORID.
Return the type of the item TAGORID.
[ "Return", "the", "type", "of", "the", "item", "TAGORID", "." ]
def type(self, tagOrId): """Return the type of the item TAGORID.""" return self.tk.call(self._w, 'type', tagOrId) or None
[ "def", "type", "(", "self", ",", "tagOrId", ")", ":", "return", "self", ".", "tk", ".", "call", "(", "self", ".", "_w", ",", "'type'", ",", "tagOrId", ")", "or", "None" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L2630-L2632
s9xie/hed
94fb22f10cbfec8d84fbc0642b224022014b6bd6
python/caffe/pycaffe.py
python
_Net_blobs
(self)
return OrderedDict(zip(self._blob_names, self._blobs))
An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name
An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name
[ "An", "OrderedDict", "(", "bottom", "to", "top", "i", ".", "e", ".", "input", "to", "output", ")", "of", "network", "blobs", "indexed", "by", "name" ]
def _Net_blobs(self): """ An OrderedDict (bottom to top, i.e., input to output) of network blobs indexed by name """ return OrderedDict(zip(self._blob_names, self._blobs))
[ "def", "_Net_blobs", "(", "self", ")", ":", "return", "OrderedDict", "(", "zip", "(", "self", ".", "_blob_names", ",", "self", ".", "_blobs", ")", ")" ]
https://github.com/s9xie/hed/blob/94fb22f10cbfec8d84fbc0642b224022014b6bd6/python/caffe/pycaffe.py#L22-L27
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/nn/layer/basic.py
python
Dense.__init__
(self, in_channels, out_channels, weight_init='normal', bias_init='zeros', has_bias=True, activation=None)
Initialize Dense.
Initialize Dense.
[ "Initialize", "Dense", "." ]
def __init__(self, in_channels, out_channels, weight_init='normal', bias_init='zeros', has_bias=True, activation=None): """Initialize Dense.""" super(Dense, self).__init__() self.in_channels = Validator.check_positive_int(in_channels, "in_channels", self.cls_name) self.out_channels = Validator.check_positive_int(out_channels, "out_channels", self.cls_name) self.has_bias = Validator.check_bool(has_bias, "has_bias", self.cls_name) self.reshape = P.Reshape() self.shape_op = P.Shape() if isinstance(weight_init, Tensor): if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \ weight_init.shape[1] != in_channels: raise ValueError(f"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' should " f"be equal to 2, and the first dim should be equal to 'out_channels', and the " f"second dim should be equal to 'in_channels'. But got 'weight_init': {weight_init}, " f"'out_channels': {out_channels}, 'in_channels': {in_channels}.") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") self.bias = None if self.has_bias: if isinstance(bias_init, Tensor): if bias_init.ndim != 1 or bias_init.shape[0] != out_channels: raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' should " f"be equal to 1, and the first dim should be equal to 'out_channels'. But got " f"'bias_init': {bias_init}, 'out_channels': {out_channels}.") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") self.bias_add = P.BiasAdd() self.matmul = P.MatMul(transpose_b=True) self.activation = get_activation(activation) if isinstance(activation, str) else activation if activation is not None and not isinstance(self.activation, (Cell, Primitive)): raise TypeError(f"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got " f"{type(activation).__name__}.") self.activation_flag = self.activation is not None
[ "def", "__init__", "(", "self", ",", "in_channels", ",", "out_channels", ",", "weight_init", "=", "'normal'", ",", "bias_init", "=", "'zeros'", ",", "has_bias", "=", "True", ",", "activation", "=", "None", ")", ":", "super", "(", "Dense", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "in_channels", "=", "Validator", ".", "check_positive_int", "(", "in_channels", ",", "\"in_channels\"", ",", "self", ".", "cls_name", ")", "self", ".", "out_channels", "=", "Validator", ".", "check_positive_int", "(", "out_channels", ",", "\"out_channels\"", ",", "self", ".", "cls_name", ")", "self", ".", "has_bias", "=", "Validator", ".", "check_bool", "(", "has_bias", ",", "\"has_bias\"", ",", "self", ".", "cls_name", ")", "self", ".", "reshape", "=", "P", ".", "Reshape", "(", ")", "self", ".", "shape_op", "=", "P", ".", "Shape", "(", ")", "if", "isinstance", "(", "weight_init", ",", "Tensor", ")", ":", "if", "weight_init", ".", "ndim", "!=", "2", "or", "weight_init", ".", "shape", "[", "0", "]", "!=", "out_channels", "or", "weight_init", ".", "shape", "[", "1", "]", "!=", "in_channels", ":", "raise", "ValueError", "(", "f\"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' should \"", "f\"be equal to 2, and the first dim should be equal to 'out_channels', and the \"", "f\"second dim should be equal to 'in_channels'. But got 'weight_init': {weight_init}, \"", "f\"'out_channels': {out_channels}, 'in_channels': {in_channels}.\"", ")", "self", ".", "weight", "=", "Parameter", "(", "initializer", "(", "weight_init", ",", "[", "out_channels", ",", "in_channels", "]", ")", ",", "name", "=", "\"weight\"", ")", "self", ".", "bias", "=", "None", "if", "self", ".", "has_bias", ":", "if", "isinstance", "(", "bias_init", ",", "Tensor", ")", ":", "if", "bias_init", ".", "ndim", "!=", "1", "or", "bias_init", ".", "shape", "[", "0", "]", "!=", "out_channels", ":", "raise", "ValueError", "(", "f\"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' should \"", "f\"be equal to 1, and the first dim should be equal to 'out_channels'. But got \"", "f\"'bias_init': {bias_init}, 'out_channels': {out_channels}.\"", ")", "self", ".", "bias", "=", "Parameter", "(", "initializer", "(", "bias_init", ",", "[", "out_channels", "]", ")", ",", "name", "=", "\"bias\"", ")", "self", ".", "bias_add", "=", "P", ".", "BiasAdd", "(", ")", "self", ".", "matmul", "=", "P", ".", "MatMul", "(", "transpose_b", "=", "True", ")", "self", ".", "activation", "=", "get_activation", "(", "activation", ")", "if", "isinstance", "(", "activation", ",", "str", ")", "else", "activation", "if", "activation", "is", "not", "None", "and", "not", "isinstance", "(", "self", ".", "activation", ",", "(", "Cell", ",", "Primitive", ")", ")", ":", "raise", "TypeError", "(", "f\"For '{self.cls_name}', the 'activation' must be str or Cell or Primitive, but got \"", "f\"{type(activation).__name__}.\"", ")", "self", ".", "activation_flag", "=", "self", ".", "activation", "is", "not", "None" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/nn/layer/basic.py#L278-L317
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/python/ops/image_ops.py
python
grayscale_to_rgb
(images, name=None)
Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. Args: images: The Grayscale tensor to convert. Last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s).
Converts one or more images from Grayscale to RGB.
[ "Converts", "one", "or", "more", "images", "from", "Grayscale", "to", "RGB", "." ]
def grayscale_to_rgb(images, name=None): """Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. Args: images: The Grayscale tensor to convert. Last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.op_scope([images], name, 'grayscale_to_rgb') as name: images = ops.convert_to_tensor(images, name='images') rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0) shape_list = ( [array_ops.ones(rank_1, dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)]) multiples = array_ops.concat(0, shape_list) rgb = array_ops.tile(images, multiples, name=name) rgb.set_shape(images.get_shape()[:-1].concatenate([3])) return rgb
[ "def", "grayscale_to_rgb", "(", "images", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "op_scope", "(", "[", "images", "]", ",", "name", ",", "'grayscale_to_rgb'", ")", "as", "name", ":", "images", "=", "ops", ".", "convert_to_tensor", "(", "images", ",", "name", "=", "'images'", ")", "rank_1", "=", "array_ops", ".", "expand_dims", "(", "array_ops", ".", "rank", "(", "images", ")", "-", "1", ",", "0", ")", "shape_list", "=", "(", "[", "array_ops", ".", "ones", "(", "rank_1", ",", "dtype", "=", "dtypes", ".", "int32", ")", "]", "+", "[", "array_ops", ".", "expand_dims", "(", "3", ",", "0", ")", "]", ")", "multiples", "=", "array_ops", ".", "concat", "(", "0", ",", "shape_list", ")", "rgb", "=", "array_ops", ".", "tile", "(", "images", ",", "multiples", ",", "name", "=", "name", ")", "rgb", ".", "set_shape", "(", "images", ".", "get_shape", "(", ")", "[", ":", "-", "1", "]", ".", "concatenate", "(", "[", "3", "]", ")", ")", "return", "rgb" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/ops/image_ops.py#L1158-L1180
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/wallet.py
python
Wallet.timestamp
(self, timestamp)
Sets the timestamp of this Wallet. :param timestamp: The timestamp of this Wallet. # noqa: E501 :type: datetime
Sets the timestamp of this Wallet.
[ "Sets", "the", "timestamp", "of", "this", "Wallet", "." ]
def timestamp(self, timestamp): """Sets the timestamp of this Wallet. :param timestamp: The timestamp of this Wallet. # noqa: E501 :type: datetime """ self._timestamp = timestamp
[ "def", "timestamp", "(", "self", ",", "timestamp", ")", ":", "self", ".", "_timestamp", "=", "timestamp" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/wallet.py#L624-L632
nucleic/atom
9f0cb2a8101dd63c354a98ebc7489b2c616dc82a
docs/source/examples/example_doc_generator.py
python
main
()
Generate documentation for all atom examples.
Generate documentation for all atom examples.
[ "Generate", "documentation", "for", "all", "atom", "examples", "." ]
def main(): """Generate documentation for all atom examples.""" docs_path = os.path.dirname(__file__) base_path = "../../../examples" base_path = os.path.realpath(os.path.join(docs_path, base_path)) # Find all the files in the examples directory with a .enaml extension # that contain the pragma '<< autodoc-me >>', and generate .rst files for # them. for dirname, dirnames, filenames in os.walk(base_path): files = [os.path.join(dirname, f) for f in filenames if f.endswith(".py")] for fname in files: generate_example_doc(docs_path, fname)
[ "def", "main", "(", ")", ":", "docs_path", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "base_path", "=", "\"../../../examples\"", "base_path", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "join", "(", "docs_path", ",", "base_path", ")", ")", "# Find all the files in the examples directory with a .enaml extension", "# that contain the pragma '<< autodoc-me >>', and generate .rst files for", "# them.", "for", "dirname", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "base_path", ")", ":", "files", "=", "[", "os", ".", "path", ".", "join", "(", "dirname", ",", "f", ")", "for", "f", "in", "filenames", "if", "f", ".", "endswith", "(", "\".py\"", ")", "]", "for", "fname", "in", "files", ":", "generate_example_doc", "(", "docs_path", ",", "fname", ")" ]
https://github.com/nucleic/atom/blob/9f0cb2a8101dd63c354a98ebc7489b2c616dc82a/docs/source/examples/example_doc_generator.py#L109-L121
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/saved_model/utils_impl.py
python
build_tensor_info
(tensor)
return tensor_info
Utility function to build TensorInfo proto. Args: tensor: Tensor or SparseTensor whose name, dtype and shape are used to build the TensorInfo. For SparseTensors, the names of the three constitutent Tensors are used. Returns: A TensorInfo protocol buffer constructed based on the supplied argument.
Utility function to build TensorInfo proto.
[ "Utility", "function", "to", "build", "TensorInfo", "proto", "." ]
def build_tensor_info(tensor): """Utility function to build TensorInfo proto. Args: tensor: Tensor or SparseTensor whose name, dtype and shape are used to build the TensorInfo. For SparseTensors, the names of the three constitutent Tensors are used. Returns: A TensorInfo protocol buffer constructed based on the supplied argument. """ tensor_info = meta_graph_pb2.TensorInfo( dtype=dtypes.as_dtype(tensor.dtype).as_datatype_enum, tensor_shape=tensor.get_shape().as_proto()) if isinstance(tensor, sparse_tensor.SparseTensor): tensor_info.coo_sparse.values_tensor_name = tensor.values.name tensor_info.coo_sparse.indices_tensor_name = tensor.indices.name tensor_info.coo_sparse.dense_shape_tensor_name = tensor.dense_shape.name else: tensor_info.name = tensor.name return tensor_info
[ "def", "build_tensor_info", "(", "tensor", ")", ":", "tensor_info", "=", "meta_graph_pb2", ".", "TensorInfo", "(", "dtype", "=", "dtypes", ".", "as_dtype", "(", "tensor", ".", "dtype", ")", ".", "as_datatype_enum", ",", "tensor_shape", "=", "tensor", ".", "get_shape", "(", ")", ".", "as_proto", "(", ")", ")", "if", "isinstance", "(", "tensor", ",", "sparse_tensor", ".", "SparseTensor", ")", ":", "tensor_info", ".", "coo_sparse", ".", "values_tensor_name", "=", "tensor", ".", "values", ".", "name", "tensor_info", ".", "coo_sparse", ".", "indices_tensor_name", "=", "tensor", ".", "indices", ".", "name", "tensor_info", ".", "coo_sparse", ".", "dense_shape_tensor_name", "=", "tensor", ".", "dense_shape", ".", "name", "else", ":", "tensor_info", ".", "name", "=", "tensor", ".", "name", "return", "tensor_info" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/saved_model/utils_impl.py#L30-L50
weolar/miniblink49
1c4678db0594a4abde23d3ebbcc7cd13c3170777
third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/results.py
python
Numbers.pc_str_width
(cls)
return width
How many characters wide can pc_covered_str be?
How many characters wide can pc_covered_str be?
[ "How", "many", "characters", "wide", "can", "pc_covered_str", "be?" ]
def pc_str_width(cls): """How many characters wide can pc_covered_str be?""" width = 3 # "100" if cls._precision > 0: width += 1 + cls._precision return width
[ "def", "pc_str_width", "(", "cls", ")", ":", "width", "=", "3", "# \"100\"", "if", "cls", ".", "_precision", ">", "0", ":", "width", "+=", "1", "+", "cls", ".", "_precision", "return", "width" ]
https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/results.py#L224-L229
intel/caffe
3f494b442ee3f9d17a07b09ecbd5fa2bbda00836
scripts/cpp_lint.py
python
_NestingState.Update
(self, filename, clean_lines, linenum, error)
Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Update nesting state with current line.
[ "Update", "nesting", "state", "with", "current", "line", "." ]
def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Update pp_stack first self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; # # Templates with class arguments may confuse the parser, for example: # template <class T # class Comparator = less<T>, # class Vector = vector<T> > # class HeapQueue { # # Because this parser has no nesting state about templates, by the # time it saw "class Comparator", it may think that it's a new class. # Nested templates have a similar problem: # template < # typename ExportedType, # typename TupleType, # template <typename, typename> class ImplTemplate> # # To avoid these cases, we ignore classes that are followed by '=' or '>' class_decl_match = Match( r'\s*(template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): self.stack.append(_ClassInfo( class_decl_match.group(4), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(5) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True else: self.stack.append(_BlockInfo(True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2)
[ "def", "Update", "(", "self", ",", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Update pp_stack first", "self", ".", "UpdatePreprocessor", "(", "line", ")", "# Count parentheses. This is to avoid adding struct arguments to", "# the nesting stack.", "if", "self", ".", "stack", ":", "inner_block", "=", "self", ".", "stack", "[", "-", "1", "]", "depth_change", "=", "line", ".", "count", "(", "'('", ")", "-", "line", ".", "count", "(", "')'", ")", "inner_block", ".", "open_parentheses", "+=", "depth_change", "# Also check if we are starting or ending an inline assembly block.", "if", "inner_block", ".", "inline_asm", "in", "(", "_NO_ASM", ",", "_END_ASM", ")", ":", "if", "(", "depth_change", "!=", "0", "and", "inner_block", ".", "open_parentheses", "==", "1", "and", "_MATCH_ASM", ".", "match", "(", "line", ")", ")", ":", "# Enter assembly block", "inner_block", ".", "inline_asm", "=", "_INSIDE_ASM", "else", ":", "# Not entering assembly block. If previous line was _END_ASM,", "# we will now shift to _NO_ASM state.", "inner_block", ".", "inline_asm", "=", "_NO_ASM", "elif", "(", "inner_block", ".", "inline_asm", "==", "_INSIDE_ASM", "and", "inner_block", ".", "open_parentheses", "==", "0", ")", ":", "# Exit assembly block", "inner_block", ".", "inline_asm", "=", "_END_ASM", "# Consume namespace declaration at the beginning of the line. Do", "# this in a loop so that we catch same line declarations like this:", "# namespace proto2 { namespace bridge { class MessageSet; } }", "while", "True", ":", "# Match start of namespace. The \"\\b\\s*\" below catches namespace", "# declarations even if it weren't followed by a whitespace, this", "# is so that we don't confuse our namespace checker. The", "# missing spaces will be flagged by CheckSpacing.", "namespace_decl_match", "=", "Match", "(", "r'^\\s*namespace\\b\\s*([:\\w]+)?(.*)$'", ",", "line", ")", "if", "not", "namespace_decl_match", ":", "break", "new_namespace", "=", "_NamespaceInfo", "(", "namespace_decl_match", ".", "group", "(", "1", ")", ",", "linenum", ")", "self", ".", "stack", ".", "append", "(", "new_namespace", ")", "line", "=", "namespace_decl_match", ".", "group", "(", "2", ")", "if", "line", ".", "find", "(", "'{'", ")", "!=", "-", "1", ":", "new_namespace", ".", "seen_open_brace", "=", "True", "line", "=", "line", "[", "line", ".", "find", "(", "'{'", ")", "+", "1", ":", "]", "# Look for a class declaration in whatever is left of the line", "# after parsing namespaces. The regexp accounts for decorated classes", "# such as in:", "# class LOCKABLE API Object {", "# };", "#", "# Templates with class arguments may confuse the parser, for example:", "# template <class T", "# class Comparator = less<T>,", "# class Vector = vector<T> >", "# class HeapQueue {", "#", "# Because this parser has no nesting state about templates, by the", "# time it saw \"class Comparator\", it may think that it's a new class.", "# Nested templates have a similar problem:", "# template <", "# typename ExportedType,", "# typename TupleType,", "# template <typename, typename> class ImplTemplate>", "#", "# To avoid these cases, we ignore classes that are followed by '=' or '>'", "class_decl_match", "=", "Match", "(", "r'\\s*(template\\s*<[\\w\\s<>,:]*>\\s*)?'", "r'(class|struct)\\s+([A-Z_]+\\s+)*(\\w+(?:::\\w+)*)'", "r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\\s*>)*)$'", ",", "line", ")", "if", "(", "class_decl_match", "and", "(", "not", "self", ".", "stack", "or", "self", ".", "stack", "[", "-", "1", "]", ".", "open_parentheses", "==", "0", ")", ")", ":", "self", ".", "stack", ".", "append", "(", "_ClassInfo", "(", "class_decl_match", ".", "group", "(", "4", ")", ",", "class_decl_match", ".", "group", "(", "2", ")", ",", "clean_lines", ",", "linenum", ")", ")", "line", "=", "class_decl_match", ".", "group", "(", "5", ")", "# If we have not yet seen the opening brace for the innermost block,", "# run checks here.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "CheckBegin", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "# Update access control if we are inside a class/struct", "if", "self", ".", "stack", "and", "isinstance", "(", "self", ".", "stack", "[", "-", "1", "]", ",", "_ClassInfo", ")", ":", "classinfo", "=", "self", ".", "stack", "[", "-", "1", "]", "access_match", "=", "Match", "(", "r'^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?'", "r':(?:[^:]|$)'", ",", "line", ")", "if", "access_match", ":", "classinfo", ".", "access", "=", "access_match", ".", "group", "(", "2", ")", "# Check that access keywords are indented +1 space. Skip this", "# check if the keywords are not preceded by whitespaces.", "indent", "=", "access_match", ".", "group", "(", "1", ")", "if", "(", "len", "(", "indent", ")", "!=", "classinfo", ".", "class_indent", "+", "1", "and", "Match", "(", "r'^\\s*$'", ",", "indent", ")", ")", ":", "if", "classinfo", ".", "is_struct", ":", "parent", "=", "'struct '", "+", "classinfo", ".", "name", "else", ":", "parent", "=", "'class '", "+", "classinfo", ".", "name", "slots", "=", "''", "if", "access_match", ".", "group", "(", "3", ")", ":", "slots", "=", "access_match", ".", "group", "(", "3", ")", "error", "(", "filename", ",", "linenum", ",", "'whitespace/indent'", ",", "3", ",", "'%s%s: should be indented +1 space inside %s'", "%", "(", "access_match", ".", "group", "(", "2", ")", ",", "slots", ",", "parent", ")", ")", "# Consume braces or semicolons from what's left of the line", "while", "True", ":", "# Match first brace, semicolon, or closed parenthesis.", "matched", "=", "Match", "(", "r'^[^{;)}]*([{;)}])(.*)$'", ",", "line", ")", "if", "not", "matched", ":", "break", "token", "=", "matched", ".", "group", "(", "1", ")", "if", "token", "==", "'{'", ":", "# If namespace or class hasn't seen a opening brace yet, mark", "# namespace/class head as complete. Push a new block onto the", "# stack otherwise.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "seen_open_brace", "=", "True", "else", ":", "self", ".", "stack", ".", "append", "(", "_BlockInfo", "(", "True", ")", ")", "if", "_MATCH_ASM", ".", "match", "(", "line", ")", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "inline_asm", "=", "_BLOCK_ASM", "elif", "token", "==", "';'", "or", "token", "==", "')'", ":", "# If we haven't seen an opening brace yet, but we already saw", "# a semicolon, this is probably a forward declaration. Pop", "# the stack for these.", "#", "# Similarly, if we haven't seen an opening brace yet, but we", "# already saw a closing parenthesis, then these are probably", "# function arguments with extra \"class\" or \"struct\" keywords.", "# Also pop these stack for these.", "if", "not", "self", ".", "SeenOpenBrace", "(", ")", ":", "self", ".", "stack", ".", "pop", "(", ")", "else", ":", "# token == '}'", "# Perform end of block checks and pop the stack.", "if", "self", ".", "stack", ":", "self", ".", "stack", "[", "-", "1", "]", ".", "CheckEnd", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", "self", ".", "stack", ".", "pop", "(", ")", "line", "=", "matched", ".", "group", "(", "2", ")" ]
https://github.com/intel/caffe/blob/3f494b442ee3f9d17a07b09ecbd5fa2bbda00836/scripts/cpp_lint.py#L2008-L2162
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
current/tools/gyp/pylib/gyp/xcode_emulation.py
python
XcodeSettings.GetExecutableName
(self)
Returns the executable name of the bundle represented by this target. E.g. Chromium.
Returns the executable name of the bundle represented by this target. E.g. Chromium.
[ "Returns", "the", "executable", "name", "of", "the", "bundle", "represented", "by", "this", "target", ".", "E", ".", "g", ".", "Chromium", "." ]
def GetExecutableName(self): """Returns the executable name of the bundle represented by this target. E.g. Chromium.""" if self._IsBundle(): return self.spec.get('product_name', self.spec['target_name']) else: return self._GetStandaloneBinaryPath()
[ "def", "GetExecutableName", "(", "self", ")", ":", "if", "self", ".", "_IsBundle", "(", ")", ":", "return", "self", ".", "spec", ".", "get", "(", "'product_name'", ",", "self", ".", "spec", "[", "'target_name'", "]", ")", "else", ":", "return", "self", ".", "_GetStandaloneBinaryPath", "(", ")" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/tools/gyp/pylib/gyp/xcode_emulation.py#L471-L477
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/calendar.py
python
Calendar.itermonthdays
(self, year, month)
Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0.
Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0.
[ "Like", "itermonthdates", "()", "but", "will", "yield", "day", "numbers", ".", "For", "days", "outside", "the", "specified", "month", "the", "day", "number", "is", "0", "." ]
def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ for date in self.itermonthdates(year, month): if date.month != month: yield 0 else: yield date.day
[ "def", "itermonthdays", "(", "self", ",", "year", ",", "month", ")", ":", "for", "date", "in", "self", ".", "itermonthdates", "(", "year", ",", "month", ")", ":", "if", "date", ".", "month", "!=", "month", ":", "yield", "0", "else", ":", "yield", "date", ".", "day" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/calendar.py#L183-L192
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py
python
_handle_ns
(packageName, path_item)
return subpath
Ensure that named package includes a subpath of path_item (if needed)
Ensure that named package includes a subpath of path_item (if needed)
[ "Ensure", "that", "named", "package", "includes", "a", "subpath", "of", "path_item", "(", "if", "needed", ")" ]
def _handle_ns(packageName, path_item): """Ensure that named package includes a subpath of path_item (if needed)""" importer = get_importer(path_item) if importer is None: return None # use find_spec (PEP 451) and fall-back to find_module (PEP 302) try: loader = importer.find_spec(packageName).loader except AttributeError: # capture warnings due to #1111 with warnings.catch_warnings(): warnings.simplefilter("ignore") loader = importer.find_module(packageName) if loader is None: return None module = sys.modules.get(packageName) if module is None: module = sys.modules[packageName] = types.ModuleType(packageName) module.__path__ = [] _set_parent_ns(packageName) elif not hasattr(module, '__path__'): raise TypeError("Not a package:", packageName) handler = _find_adapter(_namespace_handlers, importer) subpath = handler(importer, path_item, packageName, module) if subpath is not None: path = module.__path__ path.append(subpath) loader.load_module(packageName) _rebuild_mod_path(path, packageName, module) return subpath
[ "def", "_handle_ns", "(", "packageName", ",", "path_item", ")", ":", "importer", "=", "get_importer", "(", "path_item", ")", "if", "importer", "is", "None", ":", "return", "None", "# use find_spec (PEP 451) and fall-back to find_module (PEP 302)", "try", ":", "loader", "=", "importer", ".", "find_spec", "(", "packageName", ")", ".", "loader", "except", "AttributeError", ":", "# capture warnings due to #1111", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "loader", "=", "importer", ".", "find_module", "(", "packageName", ")", "if", "loader", "is", "None", ":", "return", "None", "module", "=", "sys", ".", "modules", ".", "get", "(", "packageName", ")", "if", "module", "is", "None", ":", "module", "=", "sys", ".", "modules", "[", "packageName", "]", "=", "types", ".", "ModuleType", "(", "packageName", ")", "module", ".", "__path__", "=", "[", "]", "_set_parent_ns", "(", "packageName", ")", "elif", "not", "hasattr", "(", "module", ",", "'__path__'", ")", ":", "raise", "TypeError", "(", "\"Not a package:\"", ",", "packageName", ")", "handler", "=", "_find_adapter", "(", "_namespace_handlers", ",", "importer", ")", "subpath", "=", "handler", "(", "importer", ",", "path_item", ",", "packageName", ",", "module", ")", "if", "subpath", "is", "not", "None", ":", "path", "=", "module", ".", "__path__", "path", ".", "append", "(", "subpath", ")", "loader", ".", "load_module", "(", "packageName", ")", "_rebuild_mod_path", "(", "path", ",", "packageName", ",", "module", ")", "return", "subpath" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py#L2193-L2225
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/indexes/datetimelike.py
python
DatetimeTimedeltaMixin._set_freq
(self, freq)
Set the _freq attribute on our underlying DatetimeArray. Parameters ---------- freq : DateOffset, None, or "infer"
Set the _freq attribute on our underlying DatetimeArray.
[ "Set", "the", "_freq", "attribute", "on", "our", "underlying", "DatetimeArray", "." ]
def _set_freq(self, freq): """ Set the _freq attribute on our underlying DatetimeArray. Parameters ---------- freq : DateOffset, None, or "infer" """ # GH#29843 if freq is None: # Always valid pass elif len(self) == 0 and isinstance(freq, DateOffset): # Always valid. In the TimedeltaIndex case, we assume this # is a Tick offset. pass else: # As an internal method, we can ensure this assertion always holds assert freq == "infer" freq = to_offset(self.inferred_freq) self._data._freq = freq
[ "def", "_set_freq", "(", "self", ",", "freq", ")", ":", "# GH#29843", "if", "freq", "is", "None", ":", "# Always valid", "pass", "elif", "len", "(", "self", ")", "==", "0", "and", "isinstance", "(", "freq", ",", "DateOffset", ")", ":", "# Always valid. In the TimedeltaIndex case, we assume this", "# is a Tick offset.", "pass", "else", ":", "# As an internal method, we can ensure this assertion always holds", "assert", "freq", "==", "\"infer\"", "freq", "=", "to_offset", "(", "self", ".", "inferred_freq", ")", "self", ".", "_data", ".", "_freq", "=", "freq" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pandas/core/indexes/datetimelike.py#L575-L596
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/python/ops/array_ops.py
python
_OneHotShape
(op)
return [tensor_shape.TensorShape(new_shape)]
Shape function for the OneHot op. It closely follows the code in the .cc implementation. Args: op: A OneHot Operation. Returns: A single-element list containing the shape of the output. Raises: ValueError: if axis < -1.
Shape function for the OneHot op.
[ "Shape", "function", "for", "the", "OneHot", "op", "." ]
def _OneHotShape(op): """Shape function for the OneHot op. It closely follows the code in the .cc implementation. Args: op: A OneHot Operation. Returns: A single-element list containing the shape of the output. Raises: ValueError: if axis < -1. """ indices_shape = op.inputs[0].get_shape() indices_dims = indices_shape.ndims depth = tensor_util.constant_value(op.inputs[1]) axis = op.get_attr("axis") if axis < -1: raise ValueError("axis must be >= -1") new_shape = None if indices_dims is not None: new_shape = indices_shape.as_list() new_shape.insert(axis % (indices_dims + 1), depth) return [tensor_shape.TensorShape(new_shape)]
[ "def", "_OneHotShape", "(", "op", ")", ":", "indices_shape", "=", "op", ".", "inputs", "[", "0", "]", ".", "get_shape", "(", ")", "indices_dims", "=", "indices_shape", ".", "ndims", "depth", "=", "tensor_util", ".", "constant_value", "(", "op", ".", "inputs", "[", "1", "]", ")", "axis", "=", "op", ".", "get_attr", "(", "\"axis\"", ")", "if", "axis", "<", "-", "1", ":", "raise", "ValueError", "(", "\"axis must be >= -1\"", ")", "new_shape", "=", "None", "if", "indices_dims", "is", "not", "None", ":", "new_shape", "=", "indices_shape", ".", "as_list", "(", ")", "new_shape", ".", "insert", "(", "axis", "%", "(", "indices_dims", "+", "1", ")", ",", "depth", ")", "return", "[", "tensor_shape", ".", "TensorShape", "(", "new_shape", ")", "]" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/ops/array_ops.py#L2721-L2748
Kitware/kwiver
7ed70308905698b6e88d27ae3dc028c9b016ca0a
python/kwiver/sprokit/processes/pytorch/utils/parse_gpu_list.py
python
get_device
(gpu_list=None)
return torch.device('cuda:{}'.format(gpu_list[0])), True
Get a Pytorch device corresponding to one of the GPU indices listed in gpu_list. If gpu_list is empty, get the device corresponding to the CPU instead. If gpu_list is None (the default), enumerate the available GPU indices and pick one as though the list had been passed directly, except that in the case of there being no GPUs, an IndexError will be thrown. The return value is a pair of the device and a boolean that is true if the returned device is a GPU device. Note that we currently return the first listed device.
Get a Pytorch device corresponding to one of the GPU indices listed in gpu_list. If gpu_list is empty, get the device corresponding to the CPU instead. If gpu_list is None (the default), enumerate the available GPU indices and pick one as though the list had been passed directly, except that in the case of there being no GPUs, an IndexError will be thrown.
[ "Get", "a", "Pytorch", "device", "corresponding", "to", "one", "of", "the", "GPU", "indices", "listed", "in", "gpu_list", ".", "If", "gpu_list", "is", "empty", "get", "the", "device", "corresponding", "to", "the", "CPU", "instead", ".", "If", "gpu_list", "is", "None", "(", "the", "default", ")", "enumerate", "the", "available", "GPU", "indices", "and", "pick", "one", "as", "though", "the", "list", "had", "been", "passed", "directly", "except", "that", "in", "the", "case", "of", "there", "being", "no", "GPUs", "an", "IndexError", "will", "be", "thrown", "." ]
def get_device(gpu_list=None): """Get a Pytorch device corresponding to one of the GPU indices listed in gpu_list. If gpu_list is empty, get the device corresponding to the CPU instead. If gpu_list is None (the default), enumerate the available GPU indices and pick one as though the list had been passed directly, except that in the case of there being no GPUs, an IndexError will be thrown. The return value is a pair of the device and a boolean that is true if the returned device is a GPU device. Note that we currently return the first listed device. """ if gpu_list is None: gpu_list = list(range(torch.cuda.device_count())) elif not gpu_list: return torch.device('cpu'), False return torch.device('cuda:{}'.format(gpu_list[0])), True
[ "def", "get_device", "(", "gpu_list", "=", "None", ")", ":", "if", "gpu_list", "is", "None", ":", "gpu_list", "=", "list", "(", "range", "(", "torch", ".", "cuda", ".", "device_count", "(", ")", ")", ")", "elif", "not", "gpu_list", ":", "return", "torch", ".", "device", "(", "'cpu'", ")", ",", "False", "return", "torch", ".", "device", "(", "'cuda:{}'", ".", "format", "(", "gpu_list", "[", "0", "]", ")", ")", ",", "True" ]
https://github.com/Kitware/kwiver/blob/7ed70308905698b6e88d27ae3dc028c9b016ca0a/python/kwiver/sprokit/processes/pytorch/utils/parse_gpu_list.py#L61-L79
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/build/waf-1.7.13/lmbrwaflib/third_party.py
python
evaluate_node_alias_map
(lib_root, lib_name)
return evaluated_name_alias_map
Given a root of a lib config, perform an evaluation of all the aliases and produce a map of its value :param lib_root: The lib root :return: The map of aliased values
Given a root of a lib config, perform an evaluation of all the aliases and produce a map of its value :param lib_root: The lib root :return: The map of aliased values
[ "Given", "a", "root", "of", "a", "lib", "config", "perform", "an", "evaluation", "of", "all", "the", "aliases", "and", "produce", "a", "map", "of", "its", "value", ":", "param", "lib_root", ":", "The", "lib", "root", ":", "return", ":", "The", "map", "of", "aliased", "values" ]
def evaluate_node_alias_map(lib_root, lib_name): """ Given a root of a lib config, perform an evaluation of all the aliases and produce a map of its value :param lib_root: The lib root :return: The map of aliased values """ def _process_alias_value(name_alias_map, name_alias_value, visit_stack): alias_match = ALIAS_SEARCH_PATTERN.search(name_alias_value) if alias_match is not None: # This contains an aliased value alias_key = alias_match.group(2) # If the value is another alias if alias_key not in name_alias_map: # If the alias is not in the map, this is an error raise RuntimeError('Invalid alias value {} for 3rd party library {}'.format(name_alias_value, lib_name)) if alias_key in visit_stack: # If the alias is in the stack, this is a recursive error raise RuntimeError('Recursive alias value error {} for 3rd party library {}'.format(name_alias_value, lib_name)) visit_stack.append(alias_key) aliased_values = [] aliased_names = name_alias_map[alias_key] if isinstance(name_alias_map[alias_key], list) else [name_alias_map[alias_key]] for alias_value in aliased_names: pre_expansion_aliases = _process_alias_value(name_alias_map, alias_value, visit_stack) for pre_expansion_alias in pre_expansion_aliases: aliased_values.append(ALIAS_SEARCH_PATTERN.sub(pre_expansion_alias, name_alias_value)) visit_stack.pop() return aliased_values else: return [name_alias_value] # Scan the optional 'aliases' node off of the parent to look for aliases to build the map evaluated_name_alias_map = {} if 'aliases' not in list(lib_root.keys()): return evaluated_name_alias_map alias_node = lib_root['aliases'] # The first pass is to collect the pre-evaluated alias names for node_key in alias_node: node_alias_value = alias_node[node_key] if isinstance(node_alias_value, str): node_alias_value = [node_alias_value] evaluated_name_alias_map[node_key] = node_alias_value # Second pass, go through each list (possible recursively) and expand any aliased values in the list stack = [] for aliased_node_key in evaluated_name_alias_map: aliased_node_values = evaluated_name_alias_map[aliased_node_key] stack.append(aliased_node_key) values = [] for aliased_node_value in aliased_node_values: values += _process_alias_value(evaluated_name_alias_map, aliased_node_value, stack) evaluated_name_alias_map[aliased_node_key] = values stack.pop() return evaluated_name_alias_map
[ "def", "evaluate_node_alias_map", "(", "lib_root", ",", "lib_name", ")", ":", "def", "_process_alias_value", "(", "name_alias_map", ",", "name_alias_value", ",", "visit_stack", ")", ":", "alias_match", "=", "ALIAS_SEARCH_PATTERN", ".", "search", "(", "name_alias_value", ")", "if", "alias_match", "is", "not", "None", ":", "# This contains an aliased value", "alias_key", "=", "alias_match", ".", "group", "(", "2", ")", "# If the value is another alias", "if", "alias_key", "not", "in", "name_alias_map", ":", "# If the alias is not in the map, this is an error", "raise", "RuntimeError", "(", "'Invalid alias value {} for 3rd party library {}'", ".", "format", "(", "name_alias_value", ",", "lib_name", ")", ")", "if", "alias_key", "in", "visit_stack", ":", "# If the alias is in the stack, this is a recursive error", "raise", "RuntimeError", "(", "'Recursive alias value error {} for 3rd party library {}'", ".", "format", "(", "name_alias_value", ",", "lib_name", ")", ")", "visit_stack", ".", "append", "(", "alias_key", ")", "aliased_values", "=", "[", "]", "aliased_names", "=", "name_alias_map", "[", "alias_key", "]", "if", "isinstance", "(", "name_alias_map", "[", "alias_key", "]", ",", "list", ")", "else", "[", "name_alias_map", "[", "alias_key", "]", "]", "for", "alias_value", "in", "aliased_names", ":", "pre_expansion_aliases", "=", "_process_alias_value", "(", "name_alias_map", ",", "alias_value", ",", "visit_stack", ")", "for", "pre_expansion_alias", "in", "pre_expansion_aliases", ":", "aliased_values", ".", "append", "(", "ALIAS_SEARCH_PATTERN", ".", "sub", "(", "pre_expansion_alias", ",", "name_alias_value", ")", ")", "visit_stack", ".", "pop", "(", ")", "return", "aliased_values", "else", ":", "return", "[", "name_alias_value", "]", "# Scan the optional 'aliases' node off of the parent to look for aliases to build the map", "evaluated_name_alias_map", "=", "{", "}", "if", "'aliases'", "not", "in", "list", "(", "lib_root", ".", "keys", "(", ")", ")", ":", "return", "evaluated_name_alias_map", "alias_node", "=", "lib_root", "[", "'aliases'", "]", "# The first pass is to collect the pre-evaluated alias names", "for", "node_key", "in", "alias_node", ":", "node_alias_value", "=", "alias_node", "[", "node_key", "]", "if", "isinstance", "(", "node_alias_value", ",", "str", ")", ":", "node_alias_value", "=", "[", "node_alias_value", "]", "evaluated_name_alias_map", "[", "node_key", "]", "=", "node_alias_value", "# Second pass, go through each list (possible recursively) and expand any aliased values in the list", "stack", "=", "[", "]", "for", "aliased_node_key", "in", "evaluated_name_alias_map", ":", "aliased_node_values", "=", "evaluated_name_alias_map", "[", "aliased_node_key", "]", "stack", ".", "append", "(", "aliased_node_key", ")", "values", "=", "[", "]", "for", "aliased_node_value", "in", "aliased_node_values", ":", "values", "+=", "_process_alias_value", "(", "evaluated_name_alias_map", ",", "aliased_node_value", ",", "stack", ")", "evaluated_name_alias_map", "[", "aliased_node_key", "]", "=", "values", "stack", ".", "pop", "(", ")", "return", "evaluated_name_alias_map" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/build/waf-1.7.13/lmbrwaflib/third_party.py#L112-L172
lballabio/quantlib-old
136336947ed4fea9ecc1da6edad188700e821739
gensrc/gensrc/rules/rule.py
python
Wrap.serialize
(self, serializer)
Load/unload class state to/from serializer object.
Load/unload class state to/from serializer object.
[ "Load", "/", "unload", "class", "state", "to", "/", "from", "serializer", "object", "." ]
def serialize(self, serializer): """Load/unload class state to/from serializer object.""" serializer.serializeValue(self) serializer.serializeAttribute(self, common.CODE_ID)
[ "def", "serialize", "(", "self", ",", "serializer", ")", ":", "serializer", ".", "serializeValue", "(", "self", ")", "serializer", ".", "serializeAttribute", "(", "self", ",", "common", ".", "CODE_ID", ")" ]
https://github.com/lballabio/quantlib-old/blob/136336947ed4fea9ecc1da6edad188700e821739/gensrc/gensrc/rules/rule.py#L140-L143
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/dashboard/dashboard/edit_config_handler.py
python
_ChangeTestPatterns
(old_patterns, new_patterns)
return _RemoveOverlapping(added_test_paths, removed_test_paths)
Updates tests that are different between old_patterns and new_patterns. The two arguments both represent sets of test paths (i.e. sets of data series). Any tests that are different between these two sets need to be updated. Some properties of TestMetadata entities are updated when they are put in the |_pre_put_hook| method of TestMetadata, so any TestMetadata entity that might need to be updated should be re-put. Args: old_patterns: An iterable of test path pattern strings. new_patterns: Another iterable of test path pattern strings. Returns: A pair (added_test_paths, removed_test_paths), which are, respectively, the test paths that are in the new set but not the old, and those that are in the old set but not the new.
Updates tests that are different between old_patterns and new_patterns.
[ "Updates", "tests", "that", "are", "different", "between", "old_patterns", "and", "new_patterns", "." ]
def _ChangeTestPatterns(old_patterns, new_patterns): """Updates tests that are different between old_patterns and new_patterns. The two arguments both represent sets of test paths (i.e. sets of data series). Any tests that are different between these two sets need to be updated. Some properties of TestMetadata entities are updated when they are put in the |_pre_put_hook| method of TestMetadata, so any TestMetadata entity that might need to be updated should be re-put. Args: old_patterns: An iterable of test path pattern strings. new_patterns: Another iterable of test path pattern strings. Returns: A pair (added_test_paths, removed_test_paths), which are, respectively, the test paths that are in the new set but not the old, and those that are in the old set but not the new. """ added_patterns, removed_patterns = _ComputeDeltas(old_patterns, new_patterns) added_test_paths = _AllTestPathsMatchingPatterns(added_patterns) removed_test_paths = _AllTestPathsMatchingPatterns(removed_patterns) _AddTestsToPutToTaskQueue(added_test_paths + removed_test_paths) return _RemoveOverlapping(added_test_paths, removed_test_paths)
[ "def", "_ChangeTestPatterns", "(", "old_patterns", ",", "new_patterns", ")", ":", "added_patterns", ",", "removed_patterns", "=", "_ComputeDeltas", "(", "old_patterns", ",", "new_patterns", ")", "added_test_paths", "=", "_AllTestPathsMatchingPatterns", "(", "added_patterns", ")", "removed_test_paths", "=", "_AllTestPathsMatchingPatterns", "(", "removed_patterns", ")", "_AddTestsToPutToTaskQueue", "(", "added_test_paths", "+", "removed_test_paths", ")", "return", "_RemoveOverlapping", "(", "added_test_paths", ",", "removed_test_paths", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/dashboard/dashboard/edit_config_handler.py#L165-L189
idaholab/moose
9eeebc65e098b4c30f8205fb41591fd5b61eb6ff
python/MooseDocs/base/renderers.py
python
Renderer.getRoot
(self)
Return the rendered content root node. Called by the Translator prior to beginning rendering.
Return the rendered content root node.
[ "Return", "the", "rendered", "content", "root", "node", "." ]
def getRoot(self): """ Return the rendered content root node. Called by the Translator prior to beginning rendering. """ raise NotImplementedError()
[ "def", "getRoot", "(", "self", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/idaholab/moose/blob/9eeebc65e098b4c30f8205fb41591fd5b61eb6ff/python/MooseDocs/base/renderers.py#L59-L65
NVIDIA/DALI
bf16cc86ba8f091b145f91962f21fe1b6aff243d
dali/python/nvidia/dali/math.py
python
clamp
(value, lo, hi)
return _arithm_op("clamp", value, lo, hi)
Produces a tensor of values from ``value`` clamped to the range ``[lo, hi]``. :rtype: TensorList of the type that is calculated based on the type promotion rules.
Produces a tensor of values from ``value`` clamped to the range ``[lo, hi]``.
[ "Produces", "a", "tensor", "of", "values", "from", "value", "clamped", "to", "the", "range", "[", "lo", "hi", "]", "." ]
def clamp(value, lo, hi): """Produces a tensor of values from ``value`` clamped to the range ``[lo, hi]``. :rtype: TensorList of the type that is calculated based on the type promotion rules. """ return _arithm_op("clamp", value, lo, hi)
[ "def", "clamp", "(", "value", ",", "lo", ",", "hi", ")", ":", "return", "_arithm_op", "(", "\"clamp\"", ",", "value", ",", "lo", ",", "hi", ")" ]
https://github.com/NVIDIA/DALI/blob/bf16cc86ba8f091b145f91962f21fe1b6aff243d/dali/python/nvidia/dali/math.py#L245-L250
libornovax/master_thesis_code
6eca474ed3cae673afde010caef338cf7349f839
caffe/python/caffe/pycaffe.py
python
_Net_forward_all
(self, blobs=None, **kwargs)
return all_outs
Run net forward in batches. Parameters ---------- blobs : list of blobs to extract as in forward() kwargs : Keys are input blob names and values are blob ndarrays. Refer to forward(). Returns ------- all_outs : {blob name: list of blobs} dict.
Run net forward in batches.
[ "Run", "net", "forward", "in", "batches", "." ]
def _Net_forward_all(self, blobs=None, **kwargs): """ Run net forward in batches. Parameters ---------- blobs : list of blobs to extract as in forward() kwargs : Keys are input blob names and values are blob ndarrays. Refer to forward(). Returns ------- all_outs : {blob name: list of blobs} dict. """ # Collect outputs from batches all_outs = {out: [] for out in set(self.outputs + (blobs or []))} for batch in self._batch(kwargs): outs = self.forward(blobs=blobs, **batch) for out, out_blob in six.iteritems(outs): all_outs[out].extend(out_blob.copy()) # Package in ndarray. for out in all_outs: all_outs[out] = np.asarray(all_outs[out]) # Discard padding. pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs))) if pad: for out in all_outs: all_outs[out] = all_outs[out][:-pad] return all_outs
[ "def", "_Net_forward_all", "(", "self", ",", "blobs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Collect outputs from batches", "all_outs", "=", "{", "out", ":", "[", "]", "for", "out", "in", "set", "(", "self", ".", "outputs", "+", "(", "blobs", "or", "[", "]", ")", ")", "}", "for", "batch", "in", "self", ".", "_batch", "(", "kwargs", ")", ":", "outs", "=", "self", ".", "forward", "(", "blobs", "=", "blobs", ",", "*", "*", "batch", ")", "for", "out", ",", "out_blob", "in", "six", ".", "iteritems", "(", "outs", ")", ":", "all_outs", "[", "out", "]", ".", "extend", "(", "out_blob", ".", "copy", "(", ")", ")", "# Package in ndarray.", "for", "out", "in", "all_outs", ":", "all_outs", "[", "out", "]", "=", "np", ".", "asarray", "(", "all_outs", "[", "out", "]", ")", "# Discard padding.", "pad", "=", "len", "(", "six", ".", "next", "(", "six", ".", "itervalues", "(", "all_outs", ")", ")", ")", "-", "len", "(", "six", ".", "next", "(", "six", ".", "itervalues", "(", "kwargs", ")", ")", ")", "if", "pad", ":", "for", "out", "in", "all_outs", ":", "all_outs", "[", "out", "]", "=", "all_outs", "[", "out", "]", "[", ":", "-", "pad", "]", "return", "all_outs" ]
https://github.com/libornovax/master_thesis_code/blob/6eca474ed3cae673afde010caef338cf7349f839/caffe/python/caffe/pycaffe.py#L175-L203
ApolloAuto/apollo-platform
86d9dc6743b496ead18d597748ebabd34a513289
ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/matrixlib/defmatrix.py
python
matrix.all
(self, axis=None, out=None)
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
Test whether all matrix elements along a given axis evaluate to True. Parameters ---------- See `numpy.all` for complete descriptions See Also -------- numpy.all Notes ----- This is the same as `ndarray.all`, but it returns a `matrix` object. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> y = x[0]; y matrix([[0, 1, 2, 3]]) >>> (x == y) matrix([[ True, True, True, True], [False, False, False, False], [False, False, False, False]], dtype=bool) >>> (x == y).all() False >>> (x == y).all(0) matrix([[False, False, False, False]], dtype=bool) >>> (x == y).all(1) matrix([[ True], [False], [False]], dtype=bool)
Test whether all matrix elements along a given axis evaluate to True.
[ "Test", "whether", "all", "matrix", "elements", "along", "a", "given", "axis", "evaluate", "to", "True", "." ]
def all(self, axis=None, out=None): """ Test whether all matrix elements along a given axis evaluate to True. Parameters ---------- See `numpy.all` for complete descriptions See Also -------- numpy.all Notes ----- This is the same as `ndarray.all`, but it returns a `matrix` object. Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> y = x[0]; y matrix([[0, 1, 2, 3]]) >>> (x == y) matrix([[ True, True, True, True], [False, False, False, False], [False, False, False, False]], dtype=bool) >>> (x == y).all() False >>> (x == y).all(0) matrix([[False, False, False, False]], dtype=bool) >>> (x == y).all(1) matrix([[ True], [False], [False]], dtype=bool) """ return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
[ "def", "all", "(", "self", ",", "axis", "=", "None", ",", "out", "=", "None", ")", ":", "return", "N", ".", "ndarray", ".", "all", "(", "self", ",", "axis", ",", "out", ",", "keepdims", "=", "True", ")", ".", "_collapse", "(", "axis", ")" ]
https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/matrixlib/defmatrix.py#L615-L653
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
samples/doodle/superdoodle.py
python
ControlPanel.OnSetThickness
(self, event)
Use the event ID to set the thickness in the doodle.
Use the event ID to set the thickness in the doodle.
[ "Use", "the", "event", "ID", "to", "set", "the", "thickness", "in", "the", "doodle", "." ]
def OnSetThickness(self, event): """ Use the event ID to set the thickness in the doodle. """ thickness = event.GetId() if thickness != self.doodle.thickness: # untoggle the old thickness button self.thknsBtns[self.doodle.thickness].SetToggle(False) # set the new colour self.doodle.SetThickness(thickness)
[ "def", "OnSetThickness", "(", "self", ",", "event", ")", ":", "thickness", "=", "event", ".", "GetId", "(", ")", "if", "thickness", "!=", "self", ".", "doodle", ".", "thickness", ":", "# untoggle the old thickness button", "self", ".", "thknsBtns", "[", "self", ".", "doodle", ".", "thickness", "]", ".", "SetToggle", "(", "False", ")", "# set the new colour", "self", ".", "doodle", ".", "SetThickness", "(", "thickness", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/samples/doodle/superdoodle.py#L283-L292
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
buildconfig/python_export_maker.py
python
get_namespace
(headerfile)
return namespace
Returns the Mantid namespace that the header resides in
Returns the Mantid namespace that the header resides in
[ "Returns", "the", "Mantid", "namespace", "that", "the", "header", "resides", "in" ]
def get_namespace(headerfile): """ Returns the Mantid namespace that the header resides in """ matches = re.match(r".*inc(/|\\)Mantid(\w+)(/|\\).*\.h", headerfile) if matches: namespace = matches.group(2) else: raise RuntimeError("Unknown header path style. Cannot extract Mantid namespace name.") return namespace
[ "def", "get_namespace", "(", "headerfile", ")", ":", "matches", "=", "re", ".", "match", "(", "r\".*inc(/|\\\\)Mantid(\\w+)(/|\\\\).*\\.h\"", ",", "headerfile", ")", "if", "matches", ":", "namespace", "=", "matches", ".", "group", "(", "2", ")", "else", ":", "raise", "RuntimeError", "(", "\"Unknown header path style. Cannot extract Mantid namespace name.\"", ")", "return", "namespace" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/buildconfig/python_export_maker.py#L41-L50
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/math/autodiff/kinematics_ad.py
python
KinematicsBuilder.world_angular_velocity
(self,link)
return self.link_angular_velocities[link]
Returns an autodiff expression for the world angular velocity of the given link. Expression evaluates to a 9-D so3_ad array.
Returns an autodiff expression for the world angular velocity of the given link.
[ "Returns", "an", "autodiff", "expression", "for", "the", "world", "angular", "velocity", "of", "the", "given", "link", "." ]
def world_angular_velocity(self,link): """Returns an autodiff expression for the world angular velocity of the given link. Expression evaluates to a 9-D so3_ad array. """ link = self._link_index(link) return self.link_angular_velocities[link]
[ "def", "world_angular_velocity", "(", "self", ",", "link", ")", ":", "link", "=", "self", ".", "_link_index", "(", "link", ")", "return", "self", ".", "link_angular_velocities", "[", "link", "]" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/math/autodiff/kinematics_ad.py#L551-L558
y123456yz/reading-and-annotate-mongodb-3.6
93280293672ca7586dc24af18132aa61e4ed7fcf
mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/packaging/msi.py
python
build_wxsfile_default_gui
(root)
This function adds a default GUI to the wxs file
This function adds a default GUI to the wxs file
[ "This", "function", "adds", "a", "default", "GUI", "to", "the", "wxs", "file" ]
def build_wxsfile_default_gui(root): """ This function adds a default GUI to the wxs file """ factory = Document() Product = root.getElementsByTagName('Product')[0] UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_Mondo' Product.childNodes.append(UIRef) UIRef = factory.createElement('UIRef') UIRef.attributes['Id'] = 'WixUI_ErrorProgressText' Product.childNodes.append(UIRef)
[ "def", "build_wxsfile_default_gui", "(", "root", ")", ":", "factory", "=", "Document", "(", ")", "Product", "=", "root", ".", "getElementsByTagName", "(", "'Product'", ")", "[", "0", "]", "UIRef", "=", "factory", ".", "createElement", "(", "'UIRef'", ")", "UIRef", ".", "attributes", "[", "'Id'", "]", "=", "'WixUI_Mondo'", "Product", ".", "childNodes", ".", "append", "(", "UIRef", ")", "UIRef", "=", "factory", ".", "createElement", "(", "'UIRef'", ")", "UIRef", ".", "attributes", "[", "'Id'", "]", "=", "'WixUI_ErrorProgressText'", "Product", ".", "childNodes", ".", "append", "(", "UIRef", ")" ]
https://github.com/y123456yz/reading-and-annotate-mongodb-3.6/blob/93280293672ca7586dc24af18132aa61e4ed7fcf/mongo/src/third_party/scons-2.5.0/scons-local-2.5.0/SCons/Tool/packaging/msi.py#L417-L429
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py
python
_singlefileMailbox.__len__
(self)
return len(self._toc)
Return a count of messages in the mailbox.
Return a count of messages in the mailbox.
[ "Return", "a", "count", "of", "messages", "in", "the", "mailbox", "." ]
def __len__(self): """Return a count of messages in the mailbox.""" self._lookup() return len(self._toc)
[ "def", "__len__", "(", "self", ")", ":", "self", ".", "_lookup", "(", ")", "return", "len", "(", "self", ".", "_toc", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py#L617-L620
gem5/gem5
141cc37c2d4b93959d4c249b8f7e6a8b2ef75338
src/python/gem5/components/processors/complex_generator_core.py
python
ComplexGeneratorCore._create_linear_traffic
( self, duration: str, rate: str, block_size: int, min_addr: int, max_addr: int, rd_perc: int, data_limit: int, )
This function yields (creates) a linear traffic based on the input params. Then it will yield (create) an exit traffic (exit traffic is used to exit the simulation). :param duration: The number of ticks for the generator core to generate traffic. :param rate: The rate at which the synthetic data is read/written. :param block_size: The number of bytes to be read/written with each request. :param min_addr: The lower bound of the address range the generator will read/write from/to. :param max_addr: The upper bound of the address range the generator will read/write from/to. :param rd_perc: The percentage of read requests among all the generated requests. The write percentage would be equal to 100 - rd_perc. :param data_limit: The amount of data in bytes to read/write by the generator before stopping generation.
This function yields (creates) a linear traffic based on the input params. Then it will yield (create) an exit traffic (exit traffic is used to exit the simulation).
[ "This", "function", "yields", "(", "creates", ")", "a", "linear", "traffic", "based", "on", "the", "input", "params", ".", "Then", "it", "will", "yield", "(", "create", ")", "an", "exit", "traffic", "(", "exit", "traffic", "is", "used", "to", "exit", "the", "simulation", ")", "." ]
def _create_linear_traffic( self, duration: str, rate: str, block_size: int, min_addr: int, max_addr: int, rd_perc: int, data_limit: int, ) -> None: """ This function yields (creates) a linear traffic based on the input params. Then it will yield (create) an exit traffic (exit traffic is used to exit the simulation). :param duration: The number of ticks for the generator core to generate traffic. :param rate: The rate at which the synthetic data is read/written. :param block_size: The number of bytes to be read/written with each request. :param min_addr: The lower bound of the address range the generator will read/write from/to. :param max_addr: The upper bound of the address range the generator will read/write from/to. :param rd_perc: The percentage of read requests among all the generated requests. The write percentage would be equal to 100 - rd_perc. :param data_limit: The amount of data in bytes to read/write by the generator before stopping generation. """ duration = fromSeconds(toLatency(duration)) rate = toMemoryBandwidth(rate) period = fromSeconds(block_size / rate) min_period = period max_period = period yield self.generator.createLinear( duration, min_addr, max_addr, block_size, min_period, max_period, rd_perc, data_limit, ) yield self.generator.createExit(0)
[ "def", "_create_linear_traffic", "(", "self", ",", "duration", ":", "str", ",", "rate", ":", "str", ",", "block_size", ":", "int", ",", "min_addr", ":", "int", ",", "max_addr", ":", "int", ",", "rd_perc", ":", "int", ",", "data_limit", ":", "int", ",", ")", "->", "None", ":", "duration", "=", "fromSeconds", "(", "toLatency", "(", "duration", ")", ")", "rate", "=", "toMemoryBandwidth", "(", "rate", ")", "period", "=", "fromSeconds", "(", "block_size", "/", "rate", ")", "min_period", "=", "period", "max_period", "=", "period", "yield", "self", ".", "generator", ".", "createLinear", "(", "duration", ",", "min_addr", ",", "max_addr", ",", "block_size", ",", "min_period", ",", "max_period", ",", "rd_perc", ",", "data_limit", ",", ")", "yield", "self", ".", "generator", ".", "createExit", "(", "0", ")" ]
https://github.com/gem5/gem5/blob/141cc37c2d4b93959d4c249b8f7e6a8b2ef75338/src/python/gem5/components/processors/complex_generator_core.py#L242-L286
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/learn/python/learn/estimators/head.py
python
_assert_class_id
(class_id, num_classes=None)
Average label value for class `class_id`.
Average label value for class `class_id`.
[ "Average", "label", "value", "for", "class", "class_id", "." ]
def _assert_class_id(class_id, num_classes=None): """Average label value for class `class_id`.""" if (class_id is None) or (class_id < 0): raise ValueError("Invalid class_id %s." % class_id) if num_classes is not None: if num_classes < 2: raise ValueError("Invalid num_classes %s." % num_classes) if class_id >= num_classes: raise ValueError("Invalid class_id %s." % class_id)
[ "def", "_assert_class_id", "(", "class_id", ",", "num_classes", "=", "None", ")", ":", "if", "(", "class_id", "is", "None", ")", "or", "(", "class_id", "<", "0", ")", ":", "raise", "ValueError", "(", "\"Invalid class_id %s.\"", "%", "class_id", ")", "if", "num_classes", "is", "not", "None", ":", "if", "num_classes", "<", "2", ":", "raise", "ValueError", "(", "\"Invalid num_classes %s.\"", "%", "num_classes", ")", "if", "class_id", ">=", "num_classes", ":", "raise", "ValueError", "(", "\"Invalid class_id %s.\"", "%", "class_id", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/learn/python/learn/estimators/head.py#L2017-L2025
apple/swift
469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893
utils/swift_build_support/swift_build_support/targets.py
python
Platform.cmake_options
(self, args)
return ''
CMake flags to build for a platform, useful for cross-compiling
CMake flags to build for a platform, useful for cross-compiling
[ "CMake", "flags", "to", "build", "for", "a", "platform", "useful", "for", "cross", "-", "compiling" ]
def cmake_options(self, args): """ CMake flags to build for a platform, useful for cross-compiling """ return ''
[ "def", "cmake_options", "(", "self", ",", "args", ")", ":", "return", "''" ]
https://github.com/apple/swift/blob/469f72fdae2ea828b3b6c0d7d62d7e4cf98c4893/utils/swift_build_support/swift_build_support/targets.py#L77-L81
infinit/memo
3a8394d0f647efe03ccb8bfe885a7279cb8be8a6
elle/drake/src/drake/__init__.py
python
FunctionExpander.function
(self)
return self.__function
The function.
The function.
[ "The", "function", "." ]
def function(self): """The function.""" return self.__function
[ "def", "function", "(", "self", ")", ":", "return", "self", ".", "__function" ]
https://github.com/infinit/memo/blob/3a8394d0f647efe03ccb8bfe885a7279cb8be8a6/elle/drake/src/drake/__init__.py#L2842-L2844
koth/kcws
88efbd36a7022de4e6e90f5a1fb880cf87cfae9f
third_party/setuptools/pkg_resources.py
python
DistInfoDistribution._compute_dependencies
(self)
return dm
Recompute this distribution's dependencies.
Recompute this distribution's dependencies.
[ "Recompute", "this", "distribution", "s", "dependencies", "." ]
def _compute_dependencies(self): """Recompute this distribution's dependencies.""" from _markerlib import compile as compile_marker dm = self.__dep_map = {None: []} reqs = [] # Including any condition expressions for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: distvers, mark = self._preparse_requirement(req) parsed = next(parse_requirements(distvers)) parsed.marker_fn = compile_marker(mark) reqs.append(parsed) def reqs_for_extra(extra): for req in reqs: if req.marker_fn(override={'extra':extra}): yield req common = frozenset(reqs_for_extra(None)) dm[None].extend(common) for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: extra = safe_extra(extra.strip()) dm[extra] = list(frozenset(reqs_for_extra(extra)) - common) return dm
[ "def", "_compute_dependencies", "(", "self", ")", ":", "from", "_markerlib", "import", "compile", "as", "compile_marker", "dm", "=", "self", ".", "__dep_map", "=", "{", "None", ":", "[", "]", "}", "reqs", "=", "[", "]", "# Including any condition expressions", "for", "req", "in", "self", ".", "_parsed_pkg_info", ".", "get_all", "(", "'Requires-Dist'", ")", "or", "[", "]", ":", "distvers", ",", "mark", "=", "self", ".", "_preparse_requirement", "(", "req", ")", "parsed", "=", "next", "(", "parse_requirements", "(", "distvers", ")", ")", "parsed", ".", "marker_fn", "=", "compile_marker", "(", "mark", ")", "reqs", ".", "append", "(", "parsed", ")", "def", "reqs_for_extra", "(", "extra", ")", ":", "for", "req", "in", "reqs", ":", "if", "req", ".", "marker_fn", "(", "override", "=", "{", "'extra'", ":", "extra", "}", ")", ":", "yield", "req", "common", "=", "frozenset", "(", "reqs_for_extra", "(", "None", ")", ")", "dm", "[", "None", "]", ".", "extend", "(", "common", ")", "for", "extra", "in", "self", ".", "_parsed_pkg_info", ".", "get_all", "(", "'Provides-Extra'", ")", "or", "[", "]", ":", "extra", "=", "safe_extra", "(", "extra", ".", "strip", "(", ")", ")", "dm", "[", "extra", "]", "=", "list", "(", "frozenset", "(", "reqs_for_extra", "(", "extra", ")", ")", "-", "common", ")", "return", "dm" ]
https://github.com/koth/kcws/blob/88efbd36a7022de4e6e90f5a1fb880cf87cfae9f/third_party/setuptools/pkg_resources.py#L2577-L2602
trilinos/Trilinos
6168be6dd51e35e1cd681e9c4b24433e709df140
packages/seacas/scripts/exodus3.in.py
python
exodus.get_node_set_name
(self, object_id)
return self.__ex_get_name('EX_NODE_SET', object_id)
get the name of a node set >>> node_set_name = exo.get_node_set_name(node_set_id) Parameters ---------- <int> node_set_id node set *ID* (not *INDEX*) Returns ------- <string> node_set_name
get the name of a node set
[ "get", "the", "name", "of", "a", "node", "set" ]
def get_node_set_name(self, object_id): """ get the name of a node set >>> node_set_name = exo.get_node_set_name(node_set_id) Parameters ---------- <int> node_set_id node set *ID* (not *INDEX*) Returns ------- <string> node_set_name """ return self.__ex_get_name('EX_NODE_SET', object_id)
[ "def", "get_node_set_name", "(", "self", ",", "object_id", ")", ":", "return", "self", ".", "__ex_get_name", "(", "'EX_NODE_SET'", ",", "object_id", ")" ]
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/scripts/exodus3.in.py#L3180-L3194
sofa-framework/sofa
70628e35a44fcc258cf8250109b5e4eba8c5abe9
applications/plugins/PSL/python/pslengine.py
python
populateFrame
(cname, frame, stack)
Initialize a frame from the current attributes of the 'self' object This is needed to expose the data as first class object.
Initialize a frame from the current attributes of the 'self' object This is needed to expose the data as first class object.
[ "Initialize", "a", "frame", "from", "the", "current", "attributes", "of", "the", "self", "object", "This", "is", "needed", "to", "expose", "the", "data", "as", "first", "class", "object", "." ]
def populateFrame(cname, frame, stack): """Initialize a frame from the current attributes of the 'self' object This is needed to expose the data as first class object. """ fself = getFromStack("self", stack) if fself == None: return
[ "def", "populateFrame", "(", "cname", ",", "frame", ",", "stack", ")", ":", "fself", "=", "getFromStack", "(", "\"self\"", ",", "stack", ")", "if", "fself", "==", "None", ":", "return" ]
https://github.com/sofa-framework/sofa/blob/70628e35a44fcc258cf8250109b5e4eba8c5abe9/applications/plugins/PSL/python/pslengine.py#L112-L118
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/contrib/factorization/examples/mnist.py
python
run_training
()
Train MNIST for a number of steps.
Train MNIST for a number of steps.
[ "Train", "MNIST", "for", "a", "number", "of", "steps", "." ]
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. train_dir = tempfile.mkdtemp() data_sets = input_data.read_data_sets(train_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs() # Build a Graph that computes predictions from the inference model. logits, clustering_loss, kmeans_training_op = inference(images_placeholder, FLAGS.num_clusters, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = tf.group(mnist.training(loss, FLAGS.learning_rate), kmeans_training_op) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Add the variable initializer Op. init = tf.initialize_all_variables() # Create a session for running Ops on the Graph. sess = tf.Session() feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder, batch_size=5000) # Run the Op to initialize the variables. sess.run(init, feed_dict=feed_dict) # Start the training loop. max_test_prec = 0 for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder, FLAGS.batch_size) # Run one step of the model. _, loss_value, clustering_loss_value = sess.run([train_op, loss, clustering_loss], feed_dict=feed_dict) duration = time.time() - start_time if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f, clustering_loss = %.2f (%.3f sec)' % ( step, loss_value, clustering_loss_value, duration)) # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') test_prec = do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) max_test_prec = max(max_test_prec, test_prec) return max_test_prec
[ "def", "run_training", "(", ")", ":", "# Get the sets of images and labels for training, validation, and", "# test on MNIST.", "train_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "data_sets", "=", "input_data", ".", "read_data_sets", "(", "train_dir", ",", "FLAGS", ".", "fake_data", ")", "# Tell TensorFlow that the model will be built into the default Graph.", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "# Generate placeholders for the images and labels.", "images_placeholder", ",", "labels_placeholder", "=", "placeholder_inputs", "(", ")", "# Build a Graph that computes predictions from the inference model.", "logits", ",", "clustering_loss", ",", "kmeans_training_op", "=", "inference", "(", "images_placeholder", ",", "FLAGS", ".", "num_clusters", ",", "FLAGS", ".", "hidden1", ",", "FLAGS", ".", "hidden2", ")", "# Add to the Graph the Ops for loss calculation.", "loss", "=", "mnist", ".", "loss", "(", "logits", ",", "labels_placeholder", ")", "# Add to the Graph the Ops that calculate and apply gradients.", "train_op", "=", "tf", ".", "group", "(", "mnist", ".", "training", "(", "loss", ",", "FLAGS", ".", "learning_rate", ")", ",", "kmeans_training_op", ")", "# Add the Op to compare the logits to the labels during evaluation.", "eval_correct", "=", "mnist", ".", "evaluation", "(", "logits", ",", "labels_placeholder", ")", "# Add the variable initializer Op.", "init", "=", "tf", ".", "initialize_all_variables", "(", ")", "# Create a session for running Ops on the Graph.", "sess", "=", "tf", ".", "Session", "(", ")", "feed_dict", "=", "fill_feed_dict", "(", "data_sets", ".", "train", ",", "images_placeholder", ",", "labels_placeholder", ",", "batch_size", "=", "5000", ")", "# Run the Op to initialize the variables.", "sess", ".", "run", "(", "init", ",", "feed_dict", "=", "feed_dict", ")", "# Start the training loop.", "max_test_prec", "=", "0", "for", "step", "in", "xrange", "(", "FLAGS", ".", "max_steps", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "# Fill a feed dictionary with the actual set of images and labels", "# for this particular training step.", "feed_dict", "=", "fill_feed_dict", "(", "data_sets", ".", "train", ",", "images_placeholder", ",", "labels_placeholder", ",", "FLAGS", ".", "batch_size", ")", "# Run one step of the model.", "_", ",", "loss_value", ",", "clustering_loss_value", "=", "sess", ".", "run", "(", "[", "train_op", ",", "loss", ",", "clustering_loss", "]", ",", "feed_dict", "=", "feed_dict", ")", "duration", "=", "time", ".", "time", "(", ")", "-", "start_time", "if", "step", "%", "100", "==", "0", ":", "# Print status to stdout.", "print", "(", "'Step %d: loss = %.2f, clustering_loss = %.2f (%.3f sec)'", "%", "(", "step", ",", "loss_value", ",", "clustering_loss_value", ",", "duration", ")", ")", "# Save a checkpoint and evaluate the model periodically.", "if", "(", "step", "+", "1", ")", "%", "1000", "==", "0", "or", "(", "step", "+", "1", ")", "==", "FLAGS", ".", "max_steps", ":", "# Evaluate against the training set.", "print", "(", "'Training Data Eval:'", ")", "do_eval", "(", "sess", ",", "eval_correct", ",", "images_placeholder", ",", "labels_placeholder", ",", "data_sets", ".", "train", ")", "# Evaluate against the validation set.", "print", "(", "'Validation Data Eval:'", ")", "do_eval", "(", "sess", ",", "eval_correct", ",", "images_placeholder", ",", "labels_placeholder", ",", "data_sets", ".", "validation", ")", "# Evaluate against the test set.", "print", "(", "'Test Data Eval:'", ")", "test_prec", "=", "do_eval", "(", "sess", ",", "eval_correct", ",", "images_placeholder", ",", "labels_placeholder", ",", "data_sets", ".", "test", ")", "max_test_prec", "=", "max", "(", "max_test_prec", ",", "test_prec", ")", "return", "max_test_prec" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/factorization/examples/mnist.py#L192-L281
lrjconan/GRAN
43cb4433e6f69401c3a4a6e946ea75da6ec35d72
utils/dist_helper.py
python
gaussian_emd
(x, y, sigma=1.0, distance_scaling=1.0)
return np.exp(-emd * emd / (2 * sigma * sigma))
Gaussian kernel with squared distance in exponential term replaced by EMD Args: x, y: 1D pmf of two distributions with the same support sigma: standard deviation
Gaussian kernel with squared distance in exponential term replaced by EMD Args: x, y: 1D pmf of two distributions with the same support sigma: standard deviation
[ "Gaussian", "kernel", "with", "squared", "distance", "in", "exponential", "term", "replaced", "by", "EMD", "Args", ":", "x", "y", ":", "1D", "pmf", "of", "two", "distributions", "with", "the", "same", "support", "sigma", ":", "standard", "deviation" ]
def gaussian_emd(x, y, sigma=1.0, distance_scaling=1.0): ''' Gaussian kernel with squared distance in exponential term replaced by EMD Args: x, y: 1D pmf of two distributions with the same support sigma: standard deviation ''' support_size = max(len(x), len(y)) d_mat = toeplitz(range(support_size)).astype(np.float) distance_mat = d_mat / distance_scaling # convert histogram values x and y to float, and make them equal len x = x.astype(np.float) y = y.astype(np.float) if len(x) < len(y): x = np.hstack((x, [0.0] * (support_size - len(x)))) elif len(y) < len(x): y = np.hstack((y, [0.0] * (support_size - len(y)))) emd = pyemd.emd(x, y, distance_mat) return np.exp(-emd * emd / (2 * sigma * sigma))
[ "def", "gaussian_emd", "(", "x", ",", "y", ",", "sigma", "=", "1.0", ",", "distance_scaling", "=", "1.0", ")", ":", "support_size", "=", "max", "(", "len", "(", "x", ")", ",", "len", "(", "y", ")", ")", "d_mat", "=", "toeplitz", "(", "range", "(", "support_size", ")", ")", ".", "astype", "(", "np", ".", "float", ")", "distance_mat", "=", "d_mat", "/", "distance_scaling", "# convert histogram values x and y to float, and make them equal len", "x", "=", "x", ".", "astype", "(", "np", ".", "float", ")", "y", "=", "y", ".", "astype", "(", "np", ".", "float", ")", "if", "len", "(", "x", ")", "<", "len", "(", "y", ")", ":", "x", "=", "np", ".", "hstack", "(", "(", "x", ",", "[", "0.0", "]", "*", "(", "support_size", "-", "len", "(", "x", ")", ")", ")", ")", "elif", "len", "(", "y", ")", "<", "len", "(", "x", ")", ":", "y", "=", "np", ".", "hstack", "(", "(", "y", ",", "[", "0.0", "]", "*", "(", "support_size", "-", "len", "(", "y", ")", ")", ")", ")", "emd", "=", "pyemd", ".", "emd", "(", "x", ",", "y", ",", "distance_mat", ")", "return", "np", ".", "exp", "(", "-", "emd", "*", "emd", "/", "(", "2", "*", "sigma", "*", "sigma", ")", ")" ]
https://github.com/lrjconan/GRAN/blob/43cb4433e6f69401c3a4a6e946ea75da6ec35d72/utils/dist_helper.py#L57-L76
qboticslabs/mastering_ros
d83e78f30acc45b0f18522c1d5fae3a7f52974b9
chapter_4_codes/seven_dof_arm_gazebo/scripts/pick_and_place_both_working_good.py
python
CokeCanPickAndPlace._place
(self, group, target, place)
return True
Place a target using the planning group
Place a target using the planning group
[ "Place", "a", "target", "using", "the", "planning", "group" ]
def _place(self, group, target, place): """ Place a target using the planning group """ # Obtain possible places: places = self._generate_places(place) # Create and send Place goal: goal = self._create_place_goal(group, target, places) state = self._place_ac.send_goal_and_wait(goal) if state != GoalStatus.SUCCEEDED: rospy.logerr('Place goal failed!: %s' % self._place_ac.get_goal_status_text()) return None result = self._place_ac.get_result() # Check for error: err = result.error_code.val if err != MoveItErrorCodes.SUCCESS: rospy.logwarn('Group %s cannot place target %s!: %s' % (group, target, str(moveit_error_dict[err]))) return False return True
[ "def", "_place", "(", "self", ",", "group", ",", "target", ",", "place", ")", ":", "# Obtain possible places:", "places", "=", "self", ".", "_generate_places", "(", "place", ")", "# Create and send Place goal:", "goal", "=", "self", ".", "_create_place_goal", "(", "group", ",", "target", ",", "places", ")", "state", "=", "self", ".", "_place_ac", ".", "send_goal_and_wait", "(", "goal", ")", "if", "state", "!=", "GoalStatus", ".", "SUCCEEDED", ":", "rospy", ".", "logerr", "(", "'Place goal failed!: %s'", "%", "self", ".", "_place_ac", ".", "get_goal_status_text", "(", ")", ")", "return", "None", "result", "=", "self", ".", "_place_ac", ".", "get_result", "(", ")", "# Check for error:", "err", "=", "result", ".", "error_code", ".", "val", "if", "err", "!=", "MoveItErrorCodes", ".", "SUCCESS", ":", "rospy", ".", "logwarn", "(", "'Group %s cannot place target %s!: %s'", "%", "(", "group", ",", "target", ",", "str", "(", "moveit_error_dict", "[", "err", "]", ")", ")", ")", "return", "False", "return", "True" ]
https://github.com/qboticslabs/mastering_ros/blob/d83e78f30acc45b0f18522c1d5fae3a7f52974b9/chapter_4_codes/seven_dof_arm_gazebo/scripts/pick_and_place_both_working_good.py#L324-L349
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/io/pytables.py
python
HDFStore._identify_group
(self, key: str, append: bool)
return group
Identify HDF5 group based on key, delete/create group if needed.
Identify HDF5 group based on key, delete/create group if needed.
[ "Identify", "HDF5", "group", "based", "on", "key", "delete", "/", "create", "group", "if", "needed", "." ]
def _identify_group(self, key: str, append: bool) -> Node: """Identify HDF5 group based on key, delete/create group if needed.""" group = self.get_node(key) # we make this assertion for mypy; the get_node call will already # have raised if this is incorrect assert self._handle is not None # remove the node if we are not appending if group is not None and not append: self._handle.remove_node(group, recursive=True) group = None if group is None: group = self._create_nodes_and_group(key) return group
[ "def", "_identify_group", "(", "self", ",", "key", ":", "str", ",", "append", ":", "bool", ")", "->", "Node", ":", "group", "=", "self", ".", "get_node", "(", "key", ")", "# we make this assertion for mypy; the get_node call will already", "# have raised if this is incorrect", "assert", "self", ".", "_handle", "is", "not", "None", "# remove the node if we are not appending", "if", "group", "is", "not", "None", "and", "not", "append", ":", "self", ".", "_handle", ".", "remove_node", "(", "group", ",", "recursive", "=", "True", ")", "group", "=", "None", "if", "group", "is", "None", ":", "group", "=", "self", ".", "_create_nodes_and_group", "(", "key", ")", "return", "group" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/pytables.py#L1796-L1812
yuxng/PoseCNN
9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04
lib/utils/bbox_transform.py
python
clip_boxes
(boxes, im_shape)
return boxes
Clip boxes to image boundaries.
Clip boxes to image boundaries.
[ "Clip", "boxes", "to", "image", "boundaries", "." ]
def clip_boxes(boxes, im_shape): """ Clip boxes to image boundaries. """ # x1 >= 0 boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0) # y1 >= 0 boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0) # x2 < im_shape[1] boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0) # y2 < im_shape[0] boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0) return boxes
[ "def", "clip_boxes", "(", "boxes", ",", "im_shape", ")", ":", "# x1 >= 0", "boxes", "[", ":", ",", "0", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "0", ":", ":", "4", "]", ",", "im_shape", "[", "1", "]", "-", "1", ")", ",", "0", ")", "# y1 >= 0", "boxes", "[", ":", ",", "1", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "1", ":", ":", "4", "]", ",", "im_shape", "[", "0", "]", "-", "1", ")", ",", "0", ")", "# x2 < im_shape[1]", "boxes", "[", ":", ",", "2", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "2", ":", ":", "4", "]", ",", "im_shape", "[", "1", "]", "-", "1", ")", ",", "0", ")", "# y2 < im_shape[0]", "boxes", "[", ":", ",", "3", ":", ":", "4", "]", "=", "np", ".", "maximum", "(", "np", ".", "minimum", "(", "boxes", "[", ":", ",", "3", ":", ":", "4", "]", ",", "im_shape", "[", "0", "]", "-", "1", ")", ",", "0", ")", "return", "boxes" ]
https://github.com/yuxng/PoseCNN/blob/9f3dd7b7bce21dcafc05e8f18ccc90da3caabd04/lib/utils/bbox_transform.py#L67-L80
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mailbox.py
python
Babyl.get_file
(self, key)
return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep))
Return a file-like representation or raise a KeyError.
Return a file-like representation or raise a KeyError.
[ "Return", "a", "file", "-", "like", "representation", "or", "raise", "a", "KeyError", "." ]
def get_file(self, key): """Return a file-like representation or raise a KeyError.""" return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep))
[ "def", "get_file", "(", "self", ",", "key", ")", ":", "return", "io", ".", "BytesIO", "(", "self", ".", "get_bytes", "(", "key", ")", ".", "replace", "(", "b'\\n'", ",", "linesep", ")", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/mailbox.py#L1317-L1319
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
scripts/SANS/sans/algorithm_detail/batch_execution.py
python
rename_group_workspace
(name_of_workspace, name_of_group_workspace)
Rename a group workspace :param name_of_workspace: current name of group workspace :param name_of_group_workspace: target name of group workspace
Rename a group workspace :param name_of_workspace: current name of group workspace :param name_of_group_workspace: target name of group workspace
[ "Rename", "a", "group", "workspace", ":", "param", "name_of_workspace", ":", "current", "name", "of", "group", "workspace", ":", "param", "name_of_group_workspace", ":", "target", "name", "of", "group", "workspace" ]
def rename_group_workspace(name_of_workspace, name_of_group_workspace): """ Rename a group workspace :param name_of_workspace: current name of group workspace :param name_of_group_workspace: target name of group workspace """ rename_name = "RenameWorkspace" rename_options = {"InputWorkspace": name_of_workspace, "OutputWorkspace": name_of_group_workspace} rename_alg = create_unmanaged_algorithm(rename_name, **rename_options) rename_alg.setAlwaysStoreInADS(True) rename_alg.execute()
[ "def", "rename_group_workspace", "(", "name_of_workspace", ",", "name_of_group_workspace", ")", ":", "rename_name", "=", "\"RenameWorkspace\"", "rename_options", "=", "{", "\"InputWorkspace\"", ":", "name_of_workspace", ",", "\"OutputWorkspace\"", ":", "name_of_group_workspace", "}", "rename_alg", "=", "create_unmanaged_algorithm", "(", "rename_name", ",", "*", "*", "rename_options", ")", "rename_alg", ".", "setAlwaysStoreInADS", "(", "True", ")", "rename_alg", ".", "execute", "(", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/scripts/SANS/sans/algorithm_detail/batch_execution.py#L1067-L1078
intel/llvm
e6d0547e9d99b5a56430c4749f6c7e328bf221ab
lldb/third_party/Python/module/pexpect-4.6/pexpect/screen.py
python
screen.scroll_down
(self)
Scroll display down one line.
Scroll display down one line.
[ "Scroll", "display", "down", "one", "line", "." ]
def scroll_down (self): # <ESC>D '''Scroll display down one line.''' # Screen is indexed from 1, but arrays are indexed from 0. s = self.scroll_row_start - 1 e = self.scroll_row_end - 1 self.w[s+1:e+1] = copy.deepcopy(self.w[s:e])
[ "def", "scroll_down", "(", "self", ")", ":", "# <ESC>D", "# Screen is indexed from 1, but arrays are indexed from 0.", "s", "=", "self", ".", "scroll_row_start", "-", "1", "e", "=", "self", ".", "scroll_row_end", "-", "1", "self", ".", "w", "[", "s", "+", "1", ":", "e", "+", "1", "]", "=", "copy", ".", "deepcopy", "(", "self", ".", "w", "[", "s", ":", "e", "]", ")" ]
https://github.com/intel/llvm/blob/e6d0547e9d99b5a56430c4749f6c7e328bf221ab/lldb/third_party/Python/module/pexpect-4.6/pexpect/screen.py#L360-L366
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/operations/math_ops.py
python
Invert.__init__
(self)
Initialize Invert
Initialize Invert
[ "Initialize", "Invert" ]
def __init__(self): """Initialize Invert""" self.init_prim_io_names(inputs=['x'], outputs=['y'])
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "init_prim_io_names", "(", "inputs", "=", "[", "'x'", "]", ",", "outputs", "=", "[", "'y'", "]", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/operations/math_ops.py#L5177-L5179
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Tools/generateBase/generateTools.py
python
temporary_exec
(text, globals, locals)
this function is a dirty hack to allow using the copier from python2 and python3. Once the support of python2 has stopped feel free to remove this function and use the std exec function instead
this function is a dirty hack to allow using the copier from python2 and python3. Once the support of python2 has stopped feel free to remove this function and use the std exec function instead
[ "this", "function", "is", "a", "dirty", "hack", "to", "allow", "using", "the", "copier", "from", "python2", "and", "python3", ".", "Once", "the", "support", "of", "python2", "has", "stopped", "feel", "free", "to", "remove", "this", "function", "and", "use", "the", "std", "exec", "function", "instead" ]
def temporary_exec(text, globals, locals): """this function is a dirty hack to allow using the copier from python2 and python3. Once the support of python2 has stopped feel free to remove this function and use the std exec function instead""" # maybe this should be fixed by rewriting the generators. if sys.version_info[0] < 3: from .__exec_old import __exec_old__ __exec_old__(text, globals, locals) else: from .__exec_new import __exec_new__ __exec_new__(text, globals, locals)
[ "def", "temporary_exec", "(", "text", ",", "globals", ",", "locals", ")", ":", "# maybe this should be fixed by rewriting the generators.", "if", "sys", ".", "version_info", "[", "0", "]", "<", "3", ":", "from", ".", "__exec_old", "import", "__exec_old__", "__exec_old__", "(", "text", ",", "globals", ",", "locals", ")", "else", ":", "from", ".", "__exec_new", "import", "__exec_new__", "__exec_new__", "(", "text", ",", "globals", ",", "locals", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Tools/generateBase/generateTools.py#L10-L21
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/learn/python/learn/models.py
python
bidirectional_rnn
(cell_fw, cell_bw, inputs, initial_state_fw=None, initial_state_bw=None, dtype=None, sequence_length=None, scope=None)
return outputs, array_ops_.concat(1, [state_fw, state_bw])
Creates a bidirectional recurrent neural network. Similar to the unidirectional case (rnn) but takes input and builds independent forward and backward RNNs with the final forward and backward outputs depth-concatenated, such that the output will have the format [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: A length T list of inputs, each a tensor of shape [batch_size, cell.input_size]. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape [batch_size x cell.state_size]. initial_state_bw: (optional) Same as for initial_state_fw. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int64 vector (tensor) of size [batch_size], containing the actual lengths for each of the sequences. scope: VariableScope for the created subgraph; defaults to "BiRNN" Returns: A pair (outputs, state) where: outputs is a length T list of outputs (one for each input), which are depth-concatenated forward and backward outputs state is the concatenated final state of the forward and backward RNN Raises: TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell. ValueError: If inputs is None or an empty list.
Creates a bidirectional recurrent neural network.
[ "Creates", "a", "bidirectional", "recurrent", "neural", "network", "." ]
def bidirectional_rnn(cell_fw, cell_bw, inputs, initial_state_fw=None, initial_state_bw=None, dtype=None, sequence_length=None, scope=None): """Creates a bidirectional recurrent neural network. Similar to the unidirectional case (rnn) but takes input and builds independent forward and backward RNNs with the final forward and backward outputs depth-concatenated, such that the output will have the format [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of forward and backward cell must match. The initial state for both directions is zero by default (but can be set optionally) and no intermediate states are ever returned -- the network is fully unrolled for the given (passed in) length(s) of the sequence(s) or completely unrolled if length(s) is not given. Args: cell_fw: An instance of RNNCell, to be used for forward direction. cell_bw: An instance of RNNCell, to be used for backward direction. inputs: A length T list of inputs, each a tensor of shape [batch_size, cell.input_size]. initial_state_fw: (optional) An initial state for the forward RNN. This must be a tensor of appropriate type and shape [batch_size x cell.state_size]. initial_state_bw: (optional) Same as for initial_state_fw. dtype: (optional) The data type for the initial state. Required if either of the initial states are not provided. sequence_length: (optional) An int64 vector (tensor) of size [batch_size], containing the actual lengths for each of the sequences. scope: VariableScope for the created subgraph; defaults to "BiRNN" Returns: A pair (outputs, state) where: outputs is a length T list of outputs (one for each input), which are depth-concatenated forward and backward outputs state is the concatenated final state of the forward and backward RNN Raises: TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell. ValueError: If inputs is None or an empty list. """ if not isinstance(cell_fw, nn.rnn_cell.RNNCell): raise TypeError('cell_fw must be an instance of RNNCell') if not isinstance(cell_bw, nn.rnn_cell.RNNCell): raise TypeError('cell_bw must be an instance of RNNCell') if not isinstance(inputs, list): raise TypeError('inputs must be a list') if not inputs: raise ValueError('inputs must not be empty') name = scope or 'BiRNN' # Forward direction with vs.variable_scope(name + '_FW'): output_fw, state_fw = nn.rnn(cell_fw, inputs, initial_state_fw, dtype, sequence_length) # Backward direction with vs.variable_scope(name + '_BW'): tmp, state_bw = nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length), initial_state_bw, dtype, sequence_length) output_bw = _reverse_seq(tmp, sequence_length) # Concat each of the forward/backward outputs outputs = [array_ops_.concat(1, [fw, bw]) for fw, bw in zip(output_fw, output_bw)] return outputs, array_ops_.concat(1, [state_fw, state_bw])
[ "def", "bidirectional_rnn", "(", "cell_fw", ",", "cell_bw", ",", "inputs", ",", "initial_state_fw", "=", "None", ",", "initial_state_bw", "=", "None", ",", "dtype", "=", "None", ",", "sequence_length", "=", "None", ",", "scope", "=", "None", ")", ":", "if", "not", "isinstance", "(", "cell_fw", ",", "nn", ".", "rnn_cell", ".", "RNNCell", ")", ":", "raise", "TypeError", "(", "'cell_fw must be an instance of RNNCell'", ")", "if", "not", "isinstance", "(", "cell_bw", ",", "nn", ".", "rnn_cell", ".", "RNNCell", ")", ":", "raise", "TypeError", "(", "'cell_bw must be an instance of RNNCell'", ")", "if", "not", "isinstance", "(", "inputs", ",", "list", ")", ":", "raise", "TypeError", "(", "'inputs must be a list'", ")", "if", "not", "inputs", ":", "raise", "ValueError", "(", "'inputs must not be empty'", ")", "name", "=", "scope", "or", "'BiRNN'", "# Forward direction", "with", "vs", ".", "variable_scope", "(", "name", "+", "'_FW'", ")", ":", "output_fw", ",", "state_fw", "=", "nn", ".", "rnn", "(", "cell_fw", ",", "inputs", ",", "initial_state_fw", ",", "dtype", ",", "sequence_length", ")", "# Backward direction", "with", "vs", ".", "variable_scope", "(", "name", "+", "'_BW'", ")", ":", "tmp", ",", "state_bw", "=", "nn", ".", "rnn", "(", "cell_bw", ",", "_reverse_seq", "(", "inputs", ",", "sequence_length", ")", ",", "initial_state_bw", ",", "dtype", ",", "sequence_length", ")", "output_bw", "=", "_reverse_seq", "(", "tmp", ",", "sequence_length", ")", "# Concat each of the forward/backward outputs", "outputs", "=", "[", "array_ops_", ".", "concat", "(", "1", ",", "[", "fw", ",", "bw", "]", ")", "for", "fw", ",", "bw", "in", "zip", "(", "output_fw", ",", "output_bw", ")", "]", "return", "outputs", ",", "array_ops_", ".", "concat", "(", "1", ",", "[", "state_fw", ",", "state_bw", "]", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/learn/python/learn/models.py#L266-L336
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_gdi.py
python
Colour.Alpha
(*args, **kwargs)
return _gdi_.Colour_Alpha(*args, **kwargs)
Alpha(self) -> byte Returns the Alpha value.
Alpha(self) -> byte
[ "Alpha", "(", "self", ")", "-", ">", "byte" ]
def Alpha(*args, **kwargs): """ Alpha(self) -> byte Returns the Alpha value. """ return _gdi_.Colour_Alpha(*args, **kwargs)
[ "def", "Alpha", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "Colour_Alpha", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L157-L163
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/turtle.py
python
RawTurtle.window_height
(self)
return self.screen._window_size()[1]
Return the height of the turtle window. No argument. Example (for a TurtleScreen instance named screen): >>> screen.window_height() 480
Return the height of the turtle window.
[ "Return", "the", "height", "of", "the", "turtle", "window", "." ]
def window_height(self): """ Return the height of the turtle window. No argument. Example (for a TurtleScreen instance named screen): >>> screen.window_height() 480 """ return self.screen._window_size()[1]
[ "def", "window_height", "(", "self", ")", ":", "return", "self", ".", "screen", ".", "_window_size", "(", ")", "[", "1", "]" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/lib-tk/turtle.py#L3396-L3405
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/logging/handlers.py
python
TimedRotatingFileHandler.shouldRollover
(self, record)
return 0
Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same
Determine if rollover should occur.
[ "Determine", "if", "rollover", "should", "occur", "." ]
def shouldRollover(self, record): """ Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ t = int(time.time()) if t >= self.rolloverAt: return 1 #print "No need to rollover: %d, %d" % (t, self.rolloverAt) return 0
[ "def", "shouldRollover", "(", "self", ",", "record", ")", ":", "t", "=", "int", "(", "time", ".", "time", "(", ")", ")", "if", "t", ">=", "self", ".", "rolloverAt", ":", "return", "1", "#print \"No need to rollover: %d, %d\" % (t, self.rolloverAt)", "return", "0" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/armeabi/toolchain/lib/python2.7/logging/handlers.py#L281-L292
geemaple/leetcode
68bc5032e1ee52c22ef2f2e608053484c487af54
leetcode/338.counting-bits.py
python
Solution2.countBits
(self, num)
:type num: int :rtype: List[int]
:type num: int :rtype: List[int]
[ ":", "type", "num", ":", "int", ":", "rtype", ":", "List", "[", "int", "]" ]
def countBits(self, num): """ :type num: int :rtype: List[int] """ table = [] for i in range(num + 1): table.append(i % 2) pre = i >> 1 if pre > 0: table[i] += table[pre]
[ "def", "countBits", "(", "self", ",", "num", ")", ":", "table", "=", "[", "]", "for", "i", "in", "range", "(", "num", "+", "1", ")", ":", "table", ".", "append", "(", "i", "%", "2", ")", "pre", "=", "i", ">>", "1", "if", "pre", ">", "0", ":", "table", "[", "i", "]", "+=", "table", "[", "pre", "]" ]
https://github.com/geemaple/leetcode/blob/68bc5032e1ee52c22ef2f2e608053484c487af54/leetcode/338.counting-bits.py#L58-L70
apple/swift-lldb
d74be846ef3e62de946df343e8c234bde93a8912
scripts/Python/static-binding/lldb.py
python
SBCommandReturnObject.SetImmediateErrorFile
(self, fh)
return _lldb.SBCommandReturnObject_SetImmediateErrorFile(self, fh)
SetImmediateErrorFile(SBCommandReturnObject self, FILE * fh)
SetImmediateErrorFile(SBCommandReturnObject self, FILE * fh)
[ "SetImmediateErrorFile", "(", "SBCommandReturnObject", "self", "FILE", "*", "fh", ")" ]
def SetImmediateErrorFile(self, fh): """SetImmediateErrorFile(SBCommandReturnObject self, FILE * fh)""" return _lldb.SBCommandReturnObject_SetImmediateErrorFile(self, fh)
[ "def", "SetImmediateErrorFile", "(", "self", ",", "fh", ")", ":", "return", "_lldb", ".", "SBCommandReturnObject_SetImmediateErrorFile", "(", "self", ",", "fh", ")" ]
https://github.com/apple/swift-lldb/blob/d74be846ef3e62de946df343e8c234bde93a8912/scripts/Python/static-binding/lldb.py#L2950-L2952
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/io/pytables.py
python
IndexCol.take_data
(self)
return self.values
return the values
return the values
[ "return", "the", "values" ]
def take_data(self): """return the values""" return self.values
[ "def", "take_data", "(", "self", ")", ":", "return", "self", ".", "values" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/pytables.py#L2095-L2097
NVIDIA/MDL-SDK
aa9642b2546ad7b6236b5627385d882c2ed83c5d
src/mdl/jit/generator_jit/gen_intrinsic_func.py
python
SignatureParser.is_debug_supported
(self, name, signature)
return False
Checks if the given debug intrinsic is supported.
Checks if the given debug intrinsic is supported.
[ "Checks", "if", "the", "given", "debug", "intrinsic", "is", "supported", "." ]
def is_debug_supported(self, name, signature): """Checks if the given debug intrinsic is supported.""" ret_type, params = self.split_signature(signature) if name == "breakpoint": if len(params) == 0: # support breakpoint() self.intrinsic_modes[name + signature] = "debug::breakpoint" return True elif name == "assert": if len(params) == 5: # support assert(expr, reason) self.intrinsic_modes[name + signature] = "debug::assert" return True elif name == "print": if len(params) == 1 or len(params) == 3: # support print() self.intrinsic_modes[name + signature] = "debug::print" return True return False
[ "def", "is_debug_supported", "(", "self", ",", "name", ",", "signature", ")", ":", "ret_type", ",", "params", "=", "self", ".", "split_signature", "(", "signature", ")", "if", "name", "==", "\"breakpoint\"", ":", "if", "len", "(", "params", ")", "==", "0", ":", "# support breakpoint()", "self", ".", "intrinsic_modes", "[", "name", "+", "signature", "]", "=", "\"debug::breakpoint\"", "return", "True", "elif", "name", "==", "\"assert\"", ":", "if", "len", "(", "params", ")", "==", "5", ":", "# support assert(expr, reason)", "self", ".", "intrinsic_modes", "[", "name", "+", "signature", "]", "=", "\"debug::assert\"", "return", "True", "elif", "name", "==", "\"print\"", ":", "if", "len", "(", "params", ")", "==", "1", "or", "len", "(", "params", ")", "==", "3", ":", "# support print()", "self", ".", "intrinsic_modes", "[", "name", "+", "signature", "]", "=", "\"debug::print\"", "return", "True", "return", "False" ]
https://github.com/NVIDIA/MDL-SDK/blob/aa9642b2546ad7b6236b5627385d882c2ed83c5d/src/mdl/jit/generator_jit/gen_intrinsic_func.py#L706-L725
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_misc.py
python
Joystick.ReleaseCapture
(*args, **kwargs)
return _misc_.Joystick_ReleaseCapture(*args, **kwargs)
ReleaseCapture(self) -> bool
ReleaseCapture(self) -> bool
[ "ReleaseCapture", "(", "self", ")", "-", ">", "bool" ]
def ReleaseCapture(*args, **kwargs): """ReleaseCapture(self) -> bool""" return _misc_.Joystick_ReleaseCapture(*args, **kwargs)
[ "def", "ReleaseCapture", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "Joystick_ReleaseCapture", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L2290-L2292
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/fit_function_options_view.py
python
FitFunctionOptionsView.plot_guess_points
(self)
return int(self.plot_guess_points_spin_box.value())
Returns the points for the guess fit.
Returns the points for the guess fit.
[ "Returns", "the", "points", "for", "the", "guess", "fit", "." ]
def plot_guess_points(self) -> int: """Returns the points for the guess fit.""" return int(self.plot_guess_points_spin_box.value())
[ "def", "plot_guess_points", "(", "self", ")", "->", "int", ":", "return", "int", "(", "self", ".", "plot_guess_points_spin_box", ".", "value", "(", ")", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/fitting_widgets/basic_fitting/fit_function_options_view.py#L272-L274
ycm-core/ycmd
fc0fb7e5e15176cc5a2a30c80956335988c6b59a
ycmd/completers/language_server/language_server_completer.py
python
LanguageServerCompleter.Format
( self, request_data )
return responses.BuildFixItResponse( [ responses.FixIt( responses.Location( request_data[ 'line_num' ], request_data[ 'column_num' ], request_data[ 'filepath' ] ), chunks ) ] )
Issues the formatting or rangeFormatting request (depending on the presence of a range) and returns the result as a FixIt response.
Issues the formatting or rangeFormatting request (depending on the presence of a range) and returns the result as a FixIt response.
[ "Issues", "the", "formatting", "or", "rangeFormatting", "request", "(", "depending", "on", "the", "presence", "of", "a", "range", ")", "and", "returns", "the", "result", "as", "a", "FixIt", "response", "." ]
def Format( self, request_data ): """Issues the formatting or rangeFormatting request (depending on the presence of a range) and returns the result as a FixIt response.""" if not self.ServerIsReady(): raise RuntimeError( 'Server is initializing. Please wait.' ) self._UpdateServerWithFileContents( request_data ) request_data[ 'options' ].update( self.AdditionalFormattingOptions( request_data ) ) request_id = self.GetConnection().NextRequestId() if 'range' in request_data: message = lsp.RangeFormatting( request_id, request_data ) else: message = lsp.Formatting( request_id, request_data ) response = self.GetConnection().GetResponse( request_id, message, REQUEST_TIMEOUT_COMMAND ) filepath = request_data[ 'filepath' ] contents = GetFileLines( request_data, filepath ) chunks = [ responses.FixItChunk( text_edit[ 'newText' ], _BuildRange( contents, filepath, text_edit[ 'range' ] ) ) for text_edit in response[ 'result' ] or [] ] return responses.BuildFixItResponse( [ responses.FixIt( responses.Location( request_data[ 'line_num' ], request_data[ 'column_num' ], request_data[ 'filepath' ] ), chunks ) ] )
[ "def", "Format", "(", "self", ",", "request_data", ")", ":", "if", "not", "self", ".", "ServerIsReady", "(", ")", ":", "raise", "RuntimeError", "(", "'Server is initializing. Please wait.'", ")", "self", ".", "_UpdateServerWithFileContents", "(", "request_data", ")", "request_data", "[", "'options'", "]", ".", "update", "(", "self", ".", "AdditionalFormattingOptions", "(", "request_data", ")", ")", "request_id", "=", "self", ".", "GetConnection", "(", ")", ".", "NextRequestId", "(", ")", "if", "'range'", "in", "request_data", ":", "message", "=", "lsp", ".", "RangeFormatting", "(", "request_id", ",", "request_data", ")", "else", ":", "message", "=", "lsp", ".", "Formatting", "(", "request_id", ",", "request_data", ")", "response", "=", "self", ".", "GetConnection", "(", ")", ".", "GetResponse", "(", "request_id", ",", "message", ",", "REQUEST_TIMEOUT_COMMAND", ")", "filepath", "=", "request_data", "[", "'filepath'", "]", "contents", "=", "GetFileLines", "(", "request_data", ",", "filepath", ")", "chunks", "=", "[", "responses", ".", "FixItChunk", "(", "text_edit", "[", "'newText'", "]", ",", "_BuildRange", "(", "contents", ",", "filepath", ",", "text_edit", "[", "'range'", "]", ")", ")", "for", "text_edit", "in", "response", "[", "'result'", "]", "or", "[", "]", "]", "return", "responses", ".", "BuildFixItResponse", "(", "[", "responses", ".", "FixIt", "(", "responses", ".", "Location", "(", "request_data", "[", "'line_num'", "]", ",", "request_data", "[", "'column_num'", "]", ",", "request_data", "[", "'filepath'", "]", ")", ",", "chunks", ")", "]", ")" ]
https://github.com/ycm-core/ycmd/blob/fc0fb7e5e15176cc5a2a30c80956335988c6b59a/ycmd/completers/language_server/language_server_completer.py#L2648-L2679
Tencent/Pebble
68315f176d9e328a233ace29b7579a829f89879f
tools/blade/src/blade/fbthrift_library.py
python
FBThriftLibrary.scons_rules
(self)
scons_rules. It outputs the scons rules according to user options.
scons_rules.
[ "scons_rules", "." ]
def scons_rules(self): """scons_rules. It outputs the scons rules according to user options. """ self._prepare_to_generate_rule() # Build java source according to its option env_name = self._env_name() self.options = self.blade.get_options() self.direct_targets = self.blade.get_direct_targets() self._setup_cc_flags() sources = [] obj_names = [] for src in self.srcs: thrift_cpp_files = self._thrift_gen_cpp_files(self.path, src) thrift_cpp_src_files = [f for f in thrift_cpp_files if f.endswith('.cpp')] thrift_cpp2_files = self._thrift_gen_cpp2_files(self.path, src) thrift_cpp2_src_files = [f for f in thrift_cpp2_files if f.endswith('.cpp')] self._write_rule('%s.FBThrift1(%s, "%s")' % ( env_name, str(thrift_cpp_files), os.path.join(self.path, src))) self._write_rule('%s.FBThrift2(%s, "%s")' % ( env_name, str(thrift_cpp2_files), os.path.join(self.path, src))) for thrift_cpp_src in thrift_cpp_src_files: obj_name = '%s_object' % self._generate_variable_name( self.path, thrift_cpp_src) obj_names.append(obj_name) self._write_rule( '%s = %s.SharedObject(target="%s" + top_env["OBJSUFFIX"], ' 'source="%s")' % (obj_name, env_name, thrift_cpp_src, thrift_cpp_src)) sources.append(thrift_cpp_src) for thrift_cpp_src in thrift_cpp2_src_files: obj_name = '%s_object' % self._generate_variable_name( self.path, thrift_cpp_src) obj_names.append(obj_name) self._write_rule( '%s = %s.SharedObject(target="%s" + top_env["OBJSUFFIX"], ' 'source="%s")' % (obj_name, env_name, thrift_cpp_src, thrift_cpp_src)) sources.append(thrift_cpp_src) self._write_rule('%s = [%s]' % (self._objs_name(), ','.join(obj_names))) self._write_rule('%s.Depends(%s, %s)' % ( env_name, self._objs_name(), sources)) self._cc_library() options = self.blade.get_options() if (getattr(options, 'generate_dynamic', False) or self.data.get('build_dynamic')): self._dynamic_cc_library()
[ "def", "scons_rules", "(", "self", ")", ":", "self", ".", "_prepare_to_generate_rule", "(", ")", "# Build java source according to its option", "env_name", "=", "self", ".", "_env_name", "(", ")", "self", ".", "options", "=", "self", ".", "blade", ".", "get_options", "(", ")", "self", ".", "direct_targets", "=", "self", ".", "blade", ".", "get_direct_targets", "(", ")", "self", ".", "_setup_cc_flags", "(", ")", "sources", "=", "[", "]", "obj_names", "=", "[", "]", "for", "src", "in", "self", ".", "srcs", ":", "thrift_cpp_files", "=", "self", ".", "_thrift_gen_cpp_files", "(", "self", ".", "path", ",", "src", ")", "thrift_cpp_src_files", "=", "[", "f", "for", "f", "in", "thrift_cpp_files", "if", "f", ".", "endswith", "(", "'.cpp'", ")", "]", "thrift_cpp2_files", "=", "self", ".", "_thrift_gen_cpp2_files", "(", "self", ".", "path", ",", "src", ")", "thrift_cpp2_src_files", "=", "[", "f", "for", "f", "in", "thrift_cpp2_files", "if", "f", ".", "endswith", "(", "'.cpp'", ")", "]", "self", ".", "_write_rule", "(", "'%s.FBThrift1(%s, \"%s\")'", "%", "(", "env_name", ",", "str", "(", "thrift_cpp_files", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "src", ")", ")", ")", "self", ".", "_write_rule", "(", "'%s.FBThrift2(%s, \"%s\")'", "%", "(", "env_name", ",", "str", "(", "thrift_cpp2_files", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "src", ")", ")", ")", "for", "thrift_cpp_src", "in", "thrift_cpp_src_files", ":", "obj_name", "=", "'%s_object'", "%", "self", ".", "_generate_variable_name", "(", "self", ".", "path", ",", "thrift_cpp_src", ")", "obj_names", ".", "append", "(", "obj_name", ")", "self", ".", "_write_rule", "(", "'%s = %s.SharedObject(target=\"%s\" + top_env[\"OBJSUFFIX\"], '", "'source=\"%s\")'", "%", "(", "obj_name", ",", "env_name", ",", "thrift_cpp_src", ",", "thrift_cpp_src", ")", ")", "sources", ".", "append", "(", "thrift_cpp_src", ")", "for", "thrift_cpp_src", "in", "thrift_cpp2_src_files", ":", "obj_name", "=", "'%s_object'", "%", "self", ".", "_generate_variable_name", "(", "self", ".", "path", ",", "thrift_cpp_src", ")", "obj_names", ".", "append", "(", "obj_name", ")", "self", ".", "_write_rule", "(", "'%s = %s.SharedObject(target=\"%s\" + top_env[\"OBJSUFFIX\"], '", "'source=\"%s\")'", "%", "(", "obj_name", ",", "env_name", ",", "thrift_cpp_src", ",", "thrift_cpp_src", ")", ")", "sources", ".", "append", "(", "thrift_cpp_src", ")", "self", ".", "_write_rule", "(", "'%s = [%s]'", "%", "(", "self", ".", "_objs_name", "(", ")", ",", "','", ".", "join", "(", "obj_names", ")", ")", ")", "self", ".", "_write_rule", "(", "'%s.Depends(%s, %s)'", "%", "(", "env_name", ",", "self", ".", "_objs_name", "(", ")", ",", "sources", ")", ")", "self", ".", "_cc_library", "(", ")", "options", "=", "self", ".", "blade", ".", "get_options", "(", ")", "if", "(", "getattr", "(", "options", ",", "'generate_dynamic'", ",", "False", ")", "or", "self", ".", "data", ".", "get", "(", "'build_dynamic'", ")", ")", ":", "self", ".", "_dynamic_cc_library", "(", ")" ]
https://github.com/Tencent/Pebble/blob/68315f176d9e328a233ace29b7579a829f89879f/tools/blade/src/blade/fbthrift_library.py#L130-L197
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/plat-mac/PixMapWrapper.py
python
PixMapWrapper.fromImage
(self,im)
Initialize this PixMap from a PIL Image object.
Initialize this PixMap from a PIL Image object.
[ "Initialize", "this", "PixMap", "from", "a", "PIL", "Image", "object", "." ]
def fromImage(self,im): """Initialize this PixMap from a PIL Image object.""" # We need data in ARGB format; PIL can't currently do that, # but it can do RGBA, which we can use by inserting one null # up frontpm = if im.mode != 'RGBA': im = im.convert('RGBA') data = chr(0) + im.tostring() self.fromstring(data, im.size[0], im.size[1])
[ "def", "fromImage", "(", "self", ",", "im", ")", ":", "# We need data in ARGB format; PIL can't currently do that,", "# but it can do RGBA, which we can use by inserting one null", "# up frontpm =", "if", "im", ".", "mode", "!=", "'RGBA'", ":", "im", "=", "im", ".", "convert", "(", "'RGBA'", ")", "data", "=", "chr", "(", "0", ")", "+", "im", ".", "tostring", "(", ")", "self", ".", "fromstring", "(", "data", ",", "im", ".", "size", "[", "0", "]", ",", "im", ".", "size", "[", "1", "]", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/plat-mac/PixMapWrapper.py#L191-L198
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/usb_gadget/usb_descriptors.py
python
Descriptor.total_size
(self)
return self.struct_size
Returns the total size of this descriptor.
Returns the total size of this descriptor.
[ "Returns", "the", "total", "size", "of", "this", "descriptor", "." ]
def total_size(self): """Returns the total size of this descriptor.""" return self.struct_size
[ "def", "total_size", "(", "self", ")", ":", "return", "self", ".", "struct_size" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/usb_gadget/usb_descriptors.py#L172-L174
KratosMultiphysics/Kratos
0000833054ed0503424eb28205d6508d9ca6cbbc
kratos/python_scripts/deprecation_management.py
python
DeprecationManager.ReplaceDeprecatedVariableName
(parameters, old_variable_name, new_variable_name)
Replace a key by another. The old key is assumed to be present.
Replace a key by another. The old key is assumed to be present.
[ "Replace", "a", "key", "by", "another", ".", "The", "old", "key", "is", "assumed", "to", "be", "present", "." ]
def ReplaceDeprecatedVariableName(parameters, old_variable_name, new_variable_name): """ Replace a key by another. The old key is assumed to be present.""" parameters.AddEmptyValue(new_variable_name) if parameters[old_variable_name].IsInt(): parameters[new_variable_name].SetInt(parameters[old_variable_name].GetInt()) elif parameters[old_variable_name].IsDouble(): parameters[new_variable_name].SetDouble(parameters[old_variable_name].GetDouble()) elif parameters[old_variable_name].IsString(): parameters[new_variable_name].SetString(parameters[old_variable_name].GetString()) elif parameters[old_variable_name].IsBool(): parameters[new_variable_name].SetBool(parameters[old_variable_name].GetBool()) else: pass parameters.RemoveValue(old_variable_name)
[ "def", "ReplaceDeprecatedVariableName", "(", "parameters", ",", "old_variable_name", ",", "new_variable_name", ")", ":", "parameters", ".", "AddEmptyValue", "(", "new_variable_name", ")", "if", "parameters", "[", "old_variable_name", "]", ".", "IsInt", "(", ")", ":", "parameters", "[", "new_variable_name", "]", ".", "SetInt", "(", "parameters", "[", "old_variable_name", "]", ".", "GetInt", "(", ")", ")", "elif", "parameters", "[", "old_variable_name", "]", ".", "IsDouble", "(", ")", ":", "parameters", "[", "new_variable_name", "]", ".", "SetDouble", "(", "parameters", "[", "old_variable_name", "]", ".", "GetDouble", "(", ")", ")", "elif", "parameters", "[", "old_variable_name", "]", ".", "IsString", "(", ")", ":", "parameters", "[", "new_variable_name", "]", ".", "SetString", "(", "parameters", "[", "old_variable_name", "]", ".", "GetString", "(", ")", ")", "elif", "parameters", "[", "old_variable_name", "]", ".", "IsBool", "(", ")", ":", "parameters", "[", "new_variable_name", "]", ".", "SetBool", "(", "parameters", "[", "old_variable_name", "]", ".", "GetBool", "(", ")", ")", "else", ":", "pass", "parameters", ".", "RemoveValue", "(", "old_variable_name", ")" ]
https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/kratos/python_scripts/deprecation_management.py#L37-L52
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py
python
VectorSinhArcsinhDiag.scale
(self)
return self._scale
The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2)).
The `LinearOperator` `scale` in `Y := loc + scale
[ "The", "LinearOperator", "scale", "in", "Y", ":", "=", "loc", "+", "scale" ]
def scale(self): """The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2)).""" return self._scale
[ "def", "scale", "(", "self", ")", ":", "return", "self", ".", "_scale" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py#L262-L264