nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/platform/tf_logging.py
python
log_if
(level, msg, condition, *args)
Log 'msg % args' at level 'level' only if condition is fulfilled.
Log 'msg % args' at level 'level' only if condition is fulfilled.
[ "Log", "msg", "%", "args", "at", "level", "level", "only", "if", "condition", "is", "fulfilled", "." ]
def log_if(level, msg, condition, *args): """Log 'msg % args' at level 'level' only if condition is fulfilled.""" if condition: vlog(level, msg, *args)
[ "def", "log_if", "(", "level", ",", "msg", ",", "condition", ",", "*", "args", ")", ":", "if", "condition", ":", "vlog", "(", "level", ",", "msg", ",", "*", "args", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/platform/tf_logging.py#L170-L173
bumptop/BumpTop
466d23597a07ae738f4265262fa01087fc6e257c
trunk/win/Source/bin/jinja2/visitor.py
python
NodeVisitor.generic_visit
(self, node, *args, **kwargs)
Called if no explicit visitor function exists for a node.
Called if no explicit visitor function exists for a node.
[ "Called", "if", "no", "explicit", "visitor", "function", "exists", "for", "a", "node", "." ]
def generic_visit(self, node, *args, **kwargs): """Called if no explicit visitor function exists for a node.""" for node in node.iter_child_nodes(): self.visit(node, *args, **kwargs)
[ "def", "generic_visit", "(", "self", ",", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "node", "in", "node", ".", "iter_child_nodes", "(", ")", ":", "self", ".", "visit", "(", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/bumptop/BumpTop/blob/466d23597a07ae738f4265262fa01087fc6e257c/trunk/win/Source/bin/jinja2/visitor.py#L41-L44
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/Paste/paste/url.py
python
URLResource.setvars
(self, **kw)
return self.__class__(self.url, vars=kw.items(), attrs=self.attrs, params=self.original_params)
Creates a copy of this URL, but with all the variables set/reset (like .setvar(), except clears past variables at the same time)
Creates a copy of this URL, but with all the variables set/reset (like .setvar(), except clears past variables at the same time)
[ "Creates", "a", "copy", "of", "this", "URL", "but", "with", "all", "the", "variables", "set", "/", "reset", "(", "like", ".", "setvar", "()", "except", "clears", "past", "variables", "at", "the", "same", "time", ")" ]
def setvars(self, **kw): """ Creates a copy of this URL, but with all the variables set/reset (like .setvar(), except clears past variables at the same time) """ return self.__class__(self.url, vars=kw.items(), attrs=self.attrs, params=self.original_params)
[ "def", "setvars", "(", "self", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "url", ",", "vars", "=", "kw", ".", "items", "(", ")", ",", "attrs", "=", "self", ".", "attrs", ",", "params", "=", "self", ".", "original_params", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/Paste/paste/url.py#L166-L173
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/html.py
python
HtmlHelpController.FindTopLevelWindow
(*args, **kwargs)
return _html.HtmlHelpController_FindTopLevelWindow(*args, **kwargs)
FindTopLevelWindow(self) -> Window
FindTopLevelWindow(self) -> Window
[ "FindTopLevelWindow", "(", "self", ")", "-", ">", "Window" ]
def FindTopLevelWindow(*args, **kwargs): """FindTopLevelWindow(self) -> Window""" return _html.HtmlHelpController_FindTopLevelWindow(*args, **kwargs)
[ "def", "FindTopLevelWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_html", ".", "HtmlHelpController_FindTopLevelWindow", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/html.py#L2006-L2008
arangodb/arangodb
0d658689c7d1b721b314fa3ca27d38303e1570c8
3rdParty/V8/gyp/lib/ninja_syntax.py
python
Writer._line
(self, text, indent=0)
Write 'text' word-wrapped at self.width characters.
Write 'text' word-wrapped at self.width characters.
[ "Write", "text", "word", "-", "wrapped", "at", "self", ".", "width", "characters", "." ]
def _line(self, text, indent=0): """Write 'text' word-wrapped at self.width characters.""" leading_space = ' ' * indent while len(leading_space) + len(text) > self.width: # The text is too wide; wrap if possible. # Find the rightmost space that would obey our width constraint and # that's not an escaped space. available_space = self.width - len(leading_space) - len(' $') space = available_space while True: space = text.rfind(' ', 0, space) if space < 0 or \ self._count_dollars_before_index(text, space) % 2 == 0: break if space < 0: # No such space; just use the first unescaped space we can find. space = available_space - 1 while True: space = text.find(' ', space + 1) if space < 0 or \ self._count_dollars_before_index(text, space) % 2 == 0: break if space < 0: # Give up on breaking. break self.output.write(leading_space + text[0:space] + ' $\n') text = text[space+1:] # Subsequent lines are continuations, so indent them. leading_space = ' ' * (indent+2) self.output.write(leading_space + text + '\n')
[ "def", "_line", "(", "self", ",", "text", ",", "indent", "=", "0", ")", ":", "leading_space", "=", "' '", "*", "indent", "while", "len", "(", "leading_space", ")", "+", "len", "(", "text", ")", ">", "self", ".", "width", ":", "# The text is too wide; wrap if possible.", "# Find the rightmost space that would obey our width constraint and", "# that's not an escaped space.", "available_space", "=", "self", ".", "width", "-", "len", "(", "leading_space", ")", "-", "len", "(", "' $'", ")", "space", "=", "available_space", "while", "True", ":", "space", "=", "text", ".", "rfind", "(", "' '", ",", "0", ",", "space", ")", "if", "space", "<", "0", "or", "self", ".", "_count_dollars_before_index", "(", "text", ",", "space", ")", "%", "2", "==", "0", ":", "break", "if", "space", "<", "0", ":", "# No such space; just use the first unescaped space we can find.", "space", "=", "available_space", "-", "1", "while", "True", ":", "space", "=", "text", ".", "find", "(", "' '", ",", "space", "+", "1", ")", "if", "space", "<", "0", "or", "self", ".", "_count_dollars_before_index", "(", "text", ",", "space", ")", "%", "2", "==", "0", ":", "break", "if", "space", "<", "0", ":", "# Give up on breaking.", "break", "self", ".", "output", ".", "write", "(", "leading_space", "+", "text", "[", "0", ":", "space", "]", "+", "' $\\n'", ")", "text", "=", "text", "[", "space", "+", "1", ":", "]", "# Subsequent lines are continuations, so indent them.", "leading_space", "=", "' '", "*", "(", "indent", "+", "2", ")", "self", ".", "output", ".", "write", "(", "leading_space", "+", "text", "+", "'\\n'", ")" ]
https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/V8/gyp/lib/ninja_syntax.py#L111-L145
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_basewin.py
python
EDBaseFileTree.OnDestroy
(self, event)
Cleanup message handlers
Cleanup message handlers
[ "Cleanup", "message", "handlers" ]
def OnDestroy(self, event): """Cleanup message handlers""" if self: ed_msg.Unsubscribe(self.OnActivateMsg) self.DoOnDestroy() event.Skip()
[ "def", "OnDestroy", "(", "self", ",", "event", ")", ":", "if", "self", ":", "ed_msg", ".", "Unsubscribe", "(", "self", ".", "OnActivateMsg", ")", "self", ".", "DoOnDestroy", "(", ")", "event", ".", "Skip", "(", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_basewin.py#L68-L73
PrincetonUniversity/athena-public-version
9c266692b9423743d8e23509b3ab266a232a92d2
tst/style/cpplint.py
python
CheckForBadCharacters
(filename, lines, error)
Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found.
Logs an error for each line containing bad characters.
[ "Logs", "an", "error", "for", "each", "line", "containing", "bad", "characters", "." ]
def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if unicode_escape_decode('\ufffd') in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
[ "def", "CheckForBadCharacters", "(", "filename", ",", "lines", ",", "error", ")", ":", "for", "linenum", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "unicode_escape_decode", "(", "'\\ufffd'", ")", "in", "line", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/utf8'", ",", "5", ",", "'Line contains invalid UTF-8 (or Unicode replacement character).'", ")", "if", "'\\0'", "in", "line", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/nul'", ",", "5", ",", "'Line contains NUL byte.'", ")" ]
https://github.com/PrincetonUniversity/athena-public-version/blob/9c266692b9423743d8e23509b3ab266a232a92d2/tst/style/cpplint.py#L2251-L2273
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/pybind/mgr/orchestrator/module.py
python
OrchestratorCli._rgw_add
(self, svc_id: str, placement: Optional[str] = None, _end_positional_: int = 0, port: Optional[int] = None, ssl: bool = False, inbuf: Optional[str] = None)
return self._daemon_add_misc(spec)
Start RGW daemon(s)
Start RGW daemon(s)
[ "Start", "RGW", "daemon", "(", "s", ")" ]
def _rgw_add(self, svc_id: str, placement: Optional[str] = None, _end_positional_: int = 0, port: Optional[int] = None, ssl: bool = False, inbuf: Optional[str] = None) -> HandleCommandResult: """Start RGW daemon(s)""" if inbuf: raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage') spec = RGWSpec( service_id=svc_id, rgw_frontend_port=port, ssl=ssl, placement=PlacementSpec.from_string(placement), ) return self._daemon_add_misc(spec)
[ "def", "_rgw_add", "(", "self", ",", "svc_id", ":", "str", ",", "placement", ":", "Optional", "[", "str", "]", "=", "None", ",", "_end_positional_", ":", "int", "=", "0", ",", "port", ":", "Optional", "[", "int", "]", "=", "None", ",", "ssl", ":", "bool", "=", "False", ",", "inbuf", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "HandleCommandResult", ":", "if", "inbuf", ":", "raise", "OrchestratorValidationError", "(", "'unrecognized command -i; -h or --help for usage'", ")", "spec", "=", "RGWSpec", "(", "service_id", "=", "svc_id", ",", "rgw_frontend_port", "=", "port", ",", "ssl", "=", "ssl", ",", "placement", "=", "PlacementSpec", ".", "from_string", "(", "placement", ")", ",", ")", "return", "self", ".", "_daemon_add_misc", "(", "spec", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/pybind/mgr/orchestrator/module.py#L893-L910
GJDuck/LowFat
ecf6a0f0fa1b73a27a626cf493cc39e477b6faea
llvm-4.0.0.src/tools/clang/utils/check_cfc/check_cfc.py
python
WrapperCheck.__init__
(self, output_file_a)
Record the base output file that will be compared against.
Record the base output file that will be compared against.
[ "Record", "the", "base", "output", "file", "that", "will", "be", "compared", "against", "." ]
def __init__(self, output_file_a): """Record the base output file that will be compared against.""" self._output_file_a = output_file_a
[ "def", "__init__", "(", "self", ",", "output_file_a", ")", ":", "self", ".", "_output_file_a", "=", "output_file_a" ]
https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/tools/clang/utils/check_cfc/check_cfc.py#L247-L249
wlanjie/AndroidFFmpeg
7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf
tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py
python
_create_carefully
(path)
Create a file if it doesn't exist and open for reading and writing.
Create a file if it doesn't exist and open for reading and writing.
[ "Create", "a", "file", "if", "it", "doesn", "t", "exist", "and", "open", "for", "reading", "and", "writing", "." ]
def _create_carefully(path): """Create a file if it doesn't exist and open for reading and writing.""" fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0666) try: return open(path, 'rb+') finally: os.close(fd)
[ "def", "_create_carefully", "(", "path", ")", ":", "fd", "=", "os", ".", "open", "(", "path", ",", "os", ".", "O_CREAT", "|", "os", ".", "O_EXCL", "|", "os", ".", "O_RDWR", ",", "0666", ")", "try", ":", "return", "open", "(", "path", ",", "'rb+'", ")", "finally", ":", "os", ".", "close", "(", "fd", ")" ]
https://github.com/wlanjie/AndroidFFmpeg/blob/7baf9122f4b8e1c74e7baf4be5c422c7a5ba5aaf/tools/fdk-aac-build/x86/toolchain/lib/python2.7/mailbox.py#L2013-L2019
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/lib/docview.py
python
Document.GetPrintableName
(self)
Copies a suitable document name into the supplied name buffer. The default function uses the title, or if there is no title, uses the filename; or if no filename, the string 'Untitled'.
Copies a suitable document name into the supplied name buffer. The default function uses the title, or if there is no title, uses the filename; or if no filename, the string 'Untitled'.
[ "Copies", "a", "suitable", "document", "name", "into", "the", "supplied", "name", "buffer", ".", "The", "default", "function", "uses", "the", "title", "or", "if", "there", "is", "no", "title", "uses", "the", "filename", ";", "or", "if", "no", "filename", "the", "string", "Untitled", "." ]
def GetPrintableName(self): """ Copies a suitable document name into the supplied name buffer. The default function uses the title, or if there is no title, uses the filename; or if no filename, the string 'Untitled'. """ if self._documentTitle: return self._documentTitle elif self._documentFile: return FileNameFromPath(self._documentFile) else: return _("Untitled")
[ "def", "GetPrintableName", "(", "self", ")", ":", "if", "self", ".", "_documentTitle", ":", "return", "self", ".", "_documentTitle", "elif", "self", ".", "_documentFile", ":", "return", "FileNameFromPath", "(", "self", ".", "_documentFile", ")", "else", ":", "return", "_", "(", "\"Untitled\"", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/docview.py#L584-L595
google-coral/edgetpu
5020de9386ff370dcc1f63291a2d0f98eeb98adb
benchmarks/imprinting_benchmarks.py
python
run_benchmark
(model)
return training_time
Measures training time for given model with random data. Args: model: string, file name of the input model. Returns: float, training time.
Measures training time for given model with random data.
[ "Measures", "training", "time", "for", "given", "model", "with", "random", "data", "." ]
def run_benchmark(model): """Measures training time for given model with random data. Args: model: string, file name of the input model. Returns: float, training time. """ input_size = input_tensor_size(model) engine = ImprintingEngine(test_utils.test_data_path(model), keep_classes=False) np.random.seed(12345) data_by_category = {} # 10 Categories, each has 20 images. for i in range(0, 10): data_by_category[i] = [] for j in range(0, 20): data_by_category[i].append(np.random.randint(0, 255, input_size)) start = time.perf_counter() for class_id, tensors in enumerate(data_by_category.values()): engine.train(tensors, class_id) with tempfile.NamedTemporaryFile() as f: engine.save_model(f.name) training_time = time.perf_counter() - start print('Model: %s' % model) print('Training time: %.2fs' % training_time) return training_time
[ "def", "run_benchmark", "(", "model", ")", ":", "input_size", "=", "input_tensor_size", "(", "model", ")", "engine", "=", "ImprintingEngine", "(", "test_utils", ".", "test_data_path", "(", "model", ")", ",", "keep_classes", "=", "False", ")", "np", ".", "random", ".", "seed", "(", "12345", ")", "data_by_category", "=", "{", "}", "# 10 Categories, each has 20 images.", "for", "i", "in", "range", "(", "0", ",", "10", ")", ":", "data_by_category", "[", "i", "]", "=", "[", "]", "for", "j", "in", "range", "(", "0", ",", "20", ")", ":", "data_by_category", "[", "i", "]", ".", "append", "(", "np", ".", "random", ".", "randint", "(", "0", ",", "255", ",", "input_size", ")", ")", "start", "=", "time", ".", "perf_counter", "(", ")", "for", "class_id", ",", "tensors", "in", "enumerate", "(", "data_by_category", ".", "values", "(", ")", ")", ":", "engine", ".", "train", "(", "tensors", ",", "class_id", ")", "with", "tempfile", ".", "NamedTemporaryFile", "(", ")", "as", "f", ":", "engine", ".", "save_model", "(", "f", ".", "name", ")", "training_time", "=", "time", ".", "perf_counter", "(", ")", "-", "start", "print", "(", "'Model: %s'", "%", "model", ")", "print", "(", "'Training time: %.2fs'", "%", "training_time", ")", "return", "training_time" ]
https://github.com/google-coral/edgetpu/blob/5020de9386ff370dcc1f63291a2d0f98eeb98adb/benchmarks/imprinting_benchmarks.py#L33-L62
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
lts/tools/inspector_protocol/jinja2/runtime.py
python
LoopContextBase.cycle
(self, *args)
return args[self.index0 % len(args)]
Cycles among the arguments with the current loop index.
Cycles among the arguments with the current loop index.
[ "Cycles", "among", "the", "arguments", "with", "the", "current", "loop", "index", "." ]
def cycle(self, *args): """Cycles among the arguments with the current loop index.""" if not args: raise TypeError('no items for cycling given') return args[self.index0 % len(args)]
[ "def", "cycle", "(", "self", ",", "*", "args", ")", ":", "if", "not", "args", ":", "raise", "TypeError", "(", "'no items for cycling given'", ")", "return", "args", "[", "self", ".", "index0", "%", "len", "(", "args", ")", "]" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/tools/inspector_protocol/jinja2/runtime.py#L366-L370
SequoiaDB/SequoiaDB
2894ed7e5bd6fe57330afc900cf76d0ff0df9f64
tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py
python
uCSIsSpecials
(code)
return ret
Check whether the character is part of Specials UCS Block
Check whether the character is part of Specials UCS Block
[ "Check", "whether", "the", "character", "is", "part", "of", "Specials", "UCS", "Block" ]
def uCSIsSpecials(code): """Check whether the character is part of Specials UCS Block """ ret = libxml2mod.xmlUCSIsSpecials(code) return ret
[ "def", "uCSIsSpecials", "(", "code", ")", ":", "ret", "=", "libxml2mod", ".", "xmlUCSIsSpecials", "(", "code", ")", "return", "ret" ]
https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L2811-L2814
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/pkg_resources/__init__.py
python
Environment.__iadd__
(self, other)
return self
In-place addition of a distribution or environment
In-place addition of a distribution or environment
[ "In", "-", "place", "addition", "of", "a", "distribution", "or", "environment" ]
def __iadd__(self, other): """In-place addition of a distribution or environment""" if isinstance(other, Distribution): self.add(other) elif isinstance(other, Environment): for project in other: for dist in other[project]: self.add(dist) else: raise TypeError("Can't add %r to environment" % (other,)) return self
[ "def", "__iadd__", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "Distribution", ")", ":", "self", ".", "add", "(", "other", ")", "elif", "isinstance", "(", "other", ",", "Environment", ")", ":", "for", "project", "in", "other", ":", "for", "dist", "in", "other", "[", "project", "]", ":", "self", ".", "add", "(", "dist", ")", "else", ":", "raise", "TypeError", "(", "\"Can't add %r to environment\"", "%", "(", "other", ",", ")", ")", "return", "self" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pkg_resources/__init__.py#L1086-L1096
TGAC/KAT
e8870331de2b4bb0a1b3b91c6afb8fb9d59e9216
deps/boost/tools/build/src/build/feature.py
python
set_default
(feature, value)
Sets the default value of the given feature, overriding any previous default. feature: the name of the feature value: the default value to assign
Sets the default value of the given feature, overriding any previous default. feature: the name of the feature value: the default value to assign
[ "Sets", "the", "default", "value", "of", "the", "given", "feature", "overriding", "any", "previous", "default", ".", "feature", ":", "the", "name", "of", "the", "feature", "value", ":", "the", "default", "value", "to", "assign" ]
def set_default (feature, value): """ Sets the default value of the given feature, overriding any previous default. feature: the name of the feature value: the default value to assign """ f = __all_features[feature] bad_attribute = None if f.free: bad_attribute = "free" elif f.optional: bad_attribute = "optional" if bad_attribute: raise InvalidValue ("%s property %s cannot have a default" % (bad_attribute, f.name)) if value not in f.values: raise InvalidValue ("The specified default value, '%s' is invalid.\n" % value + "allowed values are: %s" % f.values) f.set_default(value)
[ "def", "set_default", "(", "feature", ",", "value", ")", ":", "f", "=", "__all_features", "[", "feature", "]", "bad_attribute", "=", "None", "if", "f", ".", "free", ":", "bad_attribute", "=", "\"free\"", "elif", "f", ".", "optional", ":", "bad_attribute", "=", "\"optional\"", "if", "bad_attribute", ":", "raise", "InvalidValue", "(", "\"%s property %s cannot have a default\"", "%", "(", "bad_attribute", ",", "f", ".", "name", ")", ")", "if", "value", "not", "in", "f", ".", "values", ":", "raise", "InvalidValue", "(", "\"The specified default value, '%s' is invalid.\\n\"", "%", "value", "+", "\"allowed values are: %s\"", "%", "f", ".", "values", ")", "f", ".", "set_default", "(", "value", ")" ]
https://github.com/TGAC/KAT/blob/e8870331de2b4bb0a1b3b91c6afb8fb9d59e9216/deps/boost/tools/build/src/build/feature.py#L165-L184
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/vis/visualization.py
python
VisualizationScene.listItems
(self,root=None,indent=0)
Prints out all items in the visualization world.
Prints out all items in the visualization world.
[ "Prints", "out", "all", "items", "in", "the", "visualization", "world", "." ]
def listItems(self,root=None,indent=0): """Prints out all items in the visualization world.""" if root is None: for name,value in self.items.items(): self.listItems(value,indent) else: if isinstance(root,str): root = self.getItem(root) if indent > 0: print(" "*(indent-1), end=' ') print(root.name) for n,v in root.subAppearances.items(): self.listItems(v,indent+2)
[ "def", "listItems", "(", "self", ",", "root", "=", "None", ",", "indent", "=", "0", ")", ":", "if", "root", "is", "None", ":", "for", "name", ",", "value", "in", "self", ".", "items", ".", "items", "(", ")", ":", "self", ".", "listItems", "(", "value", ",", "indent", ")", "else", ":", "if", "isinstance", "(", "root", ",", "str", ")", ":", "root", "=", "self", ".", "getItem", "(", "root", ")", "if", "indent", ">", "0", ":", "print", "(", "\" \"", "*", "(", "indent", "-", "1", ")", ",", "end", "=", "' '", ")", "print", "(", "root", ".", "name", ")", "for", "n", ",", "v", "in", "root", ".", "subAppearances", ".", "items", "(", ")", ":", "self", ".", "listItems", "(", "v", ",", "indent", "+", "2", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/vis/visualization.py#L3462-L3474
PX4/PX4-Autopilot
0b9f60a0370be53d683352c63fd92db3d6586e18
Tools/ecl_ekf/plotting/data_plots.py
python
DataPlot.close
(self)
closes the figure. :return:
closes the figure. :return:
[ "closes", "the", "figure", ".", ":", "return", ":" ]
def close(self) -> None: """ closes the figure. :return: """ plt.close(self._fig)
[ "def", "close", "(", "self", ")", "->", "None", ":", "plt", ".", "close", "(", "self", ".", "_fig", ")" ]
https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/Tools/ecl_ekf/plotting/data_plots.py#L138-L143
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/contrib/layers/python/layers/feature_column.py
python
_BucketizedColumn.to_weighted_sum
(self, input_tensor, num_outputs=1, weight_collections=None, trainable=True)
return _create_embedding_lookup( input_tensor=self.to_sparse_tensor(input_tensor), weight_tensor=None, vocab_size=self.length * self.source_column.dimension, dimension=num_outputs, weight_collections=_add_variable_collection(weight_collections), initializer=init_ops.zeros_initializer, combiner="sum", trainable=trainable, name=self.name)
Returns a Tensor as linear predictions and a list of created Variable.
Returns a Tensor as linear predictions and a list of created Variable.
[ "Returns", "a", "Tensor", "as", "linear", "predictions", "and", "a", "list", "of", "created", "Variable", "." ]
def to_weighted_sum(self, input_tensor, num_outputs=1, weight_collections=None, trainable=True): """Returns a Tensor as linear predictions and a list of created Variable.""" return _create_embedding_lookup( input_tensor=self.to_sparse_tensor(input_tensor), weight_tensor=None, vocab_size=self.length * self.source_column.dimension, dimension=num_outputs, weight_collections=_add_variable_collection(weight_collections), initializer=init_ops.zeros_initializer, combiner="sum", trainable=trainable, name=self.name)
[ "def", "to_weighted_sum", "(", "self", ",", "input_tensor", ",", "num_outputs", "=", "1", ",", "weight_collections", "=", "None", ",", "trainable", "=", "True", ")", ":", "return", "_create_embedding_lookup", "(", "input_tensor", "=", "self", ".", "to_sparse_tensor", "(", "input_tensor", ")", ",", "weight_tensor", "=", "None", ",", "vocab_size", "=", "self", ".", "length", "*", "self", ".", "source_column", ".", "dimension", ",", "dimension", "=", "num_outputs", ",", "weight_collections", "=", "_add_variable_collection", "(", "weight_collections", ")", ",", "initializer", "=", "init_ops", ".", "zeros_initializer", ",", "combiner", "=", "\"sum\"", ",", "trainable", "=", "trainable", ",", "name", "=", "self", ".", "name", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/contrib/layers/python/layers/feature_column.py#L1109-L1124
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py
python
StateSpaceModel._filtering_step
(self, current_times, current_values, state, predictions)
return (filtered_state, predictions)
Compute posteriors and accumulate one-step-ahead predictions. Args: current_times: A [batch size] Tensor for times for each observation. current_values: A [batch size] Tensor of values for each observaiton. state: A tuple of (mean, covariance, previous_times) having shapes mean; [batch size x state dimension] covariance; [batch size x state dimension x state dimension] previous_times; [batch size] predictions: A dictionary containing mean and covariance Tensors, the output of _prediction_step. Returns: A tuple of (posteriors, outputs): posteriors: Model state updated to take `current_values` into account. outputs: The `predictions` dictionary updated to include "loss" and "log_likelihood" entries (loss simply being negative log likelihood).
Compute posteriors and accumulate one-step-ahead predictions.
[ "Compute", "posteriors", "and", "accumulate", "one", "-", "step", "-", "ahead", "predictions", "." ]
def _filtering_step(self, current_times, current_values, state, predictions): """Compute posteriors and accumulate one-step-ahead predictions. Args: current_times: A [batch size] Tensor for times for each observation. current_values: A [batch size] Tensor of values for each observaiton. state: A tuple of (mean, covariance, previous_times) having shapes mean; [batch size x state dimension] covariance; [batch size x state dimension x state dimension] previous_times; [batch size] predictions: A dictionary containing mean and covariance Tensors, the output of _prediction_step. Returns: A tuple of (posteriors, outputs): posteriors: Model state updated to take `current_values` into account. outputs: The `predictions` dictionary updated to include "loss" and "log_likelihood" entries (loss simply being negative log likelihood). """ estimated_state, estimated_state_covariance, previous_times = state observation_model = self.get_broadcasted_observation_model(current_times) imputed_to_current_step_assert = control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(current_times, previous_times)), ["Attempted to perform filtering without imputation/prediction"]) with ops.control_dependencies([imputed_to_current_step_assert]): estimated_state_covariance = math_utils.clip_covariance( estimated_state_covariance, self._configuration.filtering_maximum_posterior_variance_ratio, self._configuration.filtering_minimum_posterior_variance) (filtered_state, filtered_state_covariance, log_prob) = self._kalman_filter.do_filter( estimated_state=estimated_state, estimated_state_covariance=estimated_state_covariance, predicted_observation=predictions["mean"], predicted_observation_covariance=predictions["covariance"], observation=current_values, observation_model=observation_model, observation_noise=self._observation_noise_covariance) filtered_state = (filtered_state, filtered_state_covariance, current_times) log_prob.set_shape(current_times.get_shape()) predictions["loss"] = -log_prob predictions["log_likelihood"] = log_prob if self._configuration.filtering_postprocessor is not None: return self._configuration.filtering_postprocessor.process_filtering_step( current_times=current_times, current_values=current_values, predicted_state=state, filtered_state=filtered_state, outputs=predictions) return (filtered_state, predictions)
[ "def", "_filtering_step", "(", "self", ",", "current_times", ",", "current_values", ",", "state", ",", "predictions", ")", ":", "estimated_state", ",", "estimated_state_covariance", ",", "previous_times", "=", "state", "observation_model", "=", "self", ".", "get_broadcasted_observation_model", "(", "current_times", ")", "imputed_to_current_step_assert", "=", "control_flow_ops", ".", "Assert", "(", "math_ops", ".", "reduce_all", "(", "math_ops", ".", "equal", "(", "current_times", ",", "previous_times", ")", ")", ",", "[", "\"Attempted to perform filtering without imputation/prediction\"", "]", ")", "with", "ops", ".", "control_dependencies", "(", "[", "imputed_to_current_step_assert", "]", ")", ":", "estimated_state_covariance", "=", "math_utils", ".", "clip_covariance", "(", "estimated_state_covariance", ",", "self", ".", "_configuration", ".", "filtering_maximum_posterior_variance_ratio", ",", "self", ".", "_configuration", ".", "filtering_minimum_posterior_variance", ")", "(", "filtered_state", ",", "filtered_state_covariance", ",", "log_prob", ")", "=", "self", ".", "_kalman_filter", ".", "do_filter", "(", "estimated_state", "=", "estimated_state", ",", "estimated_state_covariance", "=", "estimated_state_covariance", ",", "predicted_observation", "=", "predictions", "[", "\"mean\"", "]", ",", "predicted_observation_covariance", "=", "predictions", "[", "\"covariance\"", "]", ",", "observation", "=", "current_values", ",", "observation_model", "=", "observation_model", ",", "observation_noise", "=", "self", ".", "_observation_noise_covariance", ")", "filtered_state", "=", "(", "filtered_state", ",", "filtered_state_covariance", ",", "current_times", ")", "log_prob", ".", "set_shape", "(", "current_times", ".", "get_shape", "(", ")", ")", "predictions", "[", "\"loss\"", "]", "=", "-", "log_prob", "predictions", "[", "\"log_likelihood\"", "]", "=", "log_prob", "if", "self", ".", "_configuration", ".", "filtering_postprocessor", "is", "not", "None", ":", "return", "self", ".", "_configuration", ".", "filtering_postprocessor", ".", "process_filtering_step", "(", "current_times", "=", "current_times", ",", "current_values", "=", "current_values", ",", "predicted_state", "=", "state", ",", "filtered_state", "=", "filtered_state", ",", "outputs", "=", "predictions", ")", "return", "(", "filtered_state", ",", "predictions", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/timeseries/python/timeseries/state_space_models/state_space_model.py#L389-L438
wenwei202/caffe
f54a74abaf6951d8485cbdcfa1d74a4c37839466
scripts/cpp_lint.py
python
_CppLintState.SetFilters
(self, filters)
Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
Sets the error-message filters.
[ "Sets", "the", "error", "-", "message", "filters", "." ]
def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt)
[ "def", "SetFilters", "(", "self", ",", "filters", ")", ":", "# Default filters always have less priority than the flag ones.", "self", ".", "filters", "=", "_DEFAULT_FILTERS", "[", ":", "]", "for", "filt", "in", "filters", ".", "split", "(", "','", ")", ":", "clean_filt", "=", "filt", ".", "strip", "(", ")", "if", "clean_filt", ":", "self", ".", "filters", ".", "append", "(", "clean_filt", ")", "for", "filt", "in", "self", ".", "filters", ":", "if", "not", "(", "filt", ".", "startswith", "(", "'+'", ")", "or", "filt", ".", "startswith", "(", "'-'", ")", ")", ":", "raise", "ValueError", "(", "'Every filter in --filters must start with + or -'", "' (%s does not)'", "%", "filt", ")" ]
https://github.com/wenwei202/caffe/blob/f54a74abaf6951d8485cbdcfa1d74a4c37839466/scripts/cpp_lint.py#L717-L740
thomaskeck/FastBDT
e67f71525612020acc78721031fca681d173c144
examples/ugboost.py
python
calculate_cdf_and_pdf
(X)
return numpy.hstack([0.0, cdf, 1.0]), numpy.hstack([0.0, pdf, 0.0]), bins
Calculates cdf and pdf of given sample and adds under/overflow bins @param X 1-d numpy.array
Calculates cdf and pdf of given sample and adds under/overflow bins
[ "Calculates", "cdf", "and", "pdf", "of", "given", "sample", "and", "adds", "under", "/", "overflow", "bins" ]
def calculate_cdf_and_pdf(X): """ Calculates cdf and pdf of given sample and adds under/overflow bins @param X 1-d numpy.array """ pdf, bins = numpy.histogram(X, bins=100, density=True) cdf = numpy.cumsum(pdf * (bins - numpy.roll(bins, 1))[1:]) return numpy.hstack([0.0, cdf, 1.0]), numpy.hstack([0.0, pdf, 0.0]), bins
[ "def", "calculate_cdf_and_pdf", "(", "X", ")", ":", "pdf", ",", "bins", "=", "numpy", ".", "histogram", "(", "X", ",", "bins", "=", "100", ",", "density", "=", "True", ")", "cdf", "=", "numpy", ".", "cumsum", "(", "pdf", "*", "(", "bins", "-", "numpy", ".", "roll", "(", "bins", ",", "1", ")", ")", "[", "1", ":", "]", ")", "return", "numpy", ".", "hstack", "(", "[", "0.0", ",", "cdf", ",", "1.0", "]", ")", ",", "numpy", ".", "hstack", "(", "[", "0.0", ",", "pdf", ",", "0.0", "]", ")", ",", "bins" ]
https://github.com/thomaskeck/FastBDT/blob/e67f71525612020acc78721031fca681d173c144/examples/ugboost.py#L14-L21
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py
python
GenerateOutput
(target_list, target_dicts, data, params)
Generate .sln and .vcproj files. This is the entry point for this generator. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dictionary containing per .gyp data.
Generate .sln and .vcproj files.
[ "Generate", ".", "sln", "and", ".", "vcproj", "files", "." ]
def GenerateOutput(target_list, target_dicts, data, params): """Generate .sln and .vcproj files. This is the entry point for this generator. Arguments: target_list: List of target pairs: 'base/base.gyp:base'. target_dicts: Dict of target properties keyed on target pair. data: Dictionary containing per .gyp data. """ global fixpath_prefix options = params['options'] # Get the project file format version back out of where we stashed it in # GeneratorCalculatedVariables. msvs_version = params['msvs_version'] generator_flags = params.get('generator_flags', {}) # Optionally shard targets marked with 'msvs_shard': SHARD_COUNT. (target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts) # Optionally use the large PDB workaround for targets marked with # 'msvs_large_pdb': 1. (target_list, target_dicts) = MSVSUtil.InsertLargePdbShims( target_list, target_dicts, generator_default_variables) # Optionally configure each spec to use ninja as the external builder. if params.get('flavor') == 'ninja': _InitNinjaFlavor(params, target_list, target_dicts) # Prepare the set of configurations. configs = set() for qualified_target in target_list: spec = target_dicts[qualified_target] for config_name, config in spec['configurations'].items(): configs.add(_ConfigFullName(config_name, config)) configs = list(configs) # Figure out all the projects that will be generated and their guids project_objects = _CreateProjectObjects(target_list, target_dicts, options, msvs_version) # Generate each project. missing_sources = [] for project in project_objects.values(): fixpath_prefix = project.fixpath_prefix missing_sources.extend(_GenerateProject(project, options, msvs_version, generator_flags)) fixpath_prefix = None for build_file in data: # Validate build_file extension if not build_file.endswith('.gyp'): continue sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln' if options.generator_output: sln_path = os.path.join(options.generator_output, sln_path) # Get projects in the solution, and their dependents. sln_projects = gyp.common.BuildFileTargets(target_list, build_file) sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects) # Create folder hierarchy. root_entries = _GatherSolutionFolders( sln_projects, project_objects, flat=msvs_version.FlatSolution()) # Create solution. sln = MSVSNew.MSVSSolution(sln_path, entries=root_entries, variants=configs, websiteProperties=False, version=msvs_version) sln.Write() if missing_sources: error_message = "Missing input files:\n" + \ '\n'.join(set(missing_sources)) if generator_flags.get('msvs_error_on_missing_sources', False): raise GypError(error_message) else: print("Warning: " + error_message, file=sys.stdout)
[ "def", "GenerateOutput", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ")", ":", "global", "fixpath_prefix", "options", "=", "params", "[", "'options'", "]", "# Get the project file format version back out of where we stashed it in", "# GeneratorCalculatedVariables.", "msvs_version", "=", "params", "[", "'msvs_version'", "]", "generator_flags", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", "# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.", "(", "target_list", ",", "target_dicts", ")", "=", "MSVSUtil", ".", "ShardTargets", "(", "target_list", ",", "target_dicts", ")", "# Optionally use the large PDB workaround for targets marked with", "# 'msvs_large_pdb': 1.", "(", "target_list", ",", "target_dicts", ")", "=", "MSVSUtil", ".", "InsertLargePdbShims", "(", "target_list", ",", "target_dicts", ",", "generator_default_variables", ")", "# Optionally configure each spec to use ninja as the external builder.", "if", "params", ".", "get", "(", "'flavor'", ")", "==", "'ninja'", ":", "_InitNinjaFlavor", "(", "params", ",", "target_list", ",", "target_dicts", ")", "# Prepare the set of configurations.", "configs", "=", "set", "(", ")", "for", "qualified_target", "in", "target_list", ":", "spec", "=", "target_dicts", "[", "qualified_target", "]", "for", "config_name", ",", "config", "in", "spec", "[", "'configurations'", "]", ".", "items", "(", ")", ":", "configs", ".", "add", "(", "_ConfigFullName", "(", "config_name", ",", "config", ")", ")", "configs", "=", "list", "(", "configs", ")", "# Figure out all the projects that will be generated and their guids", "project_objects", "=", "_CreateProjectObjects", "(", "target_list", ",", "target_dicts", ",", "options", ",", "msvs_version", ")", "# Generate each project.", "missing_sources", "=", "[", "]", "for", "project", "in", "project_objects", ".", "values", "(", ")", ":", "fixpath_prefix", "=", "project", ".", "fixpath_prefix", "missing_sources", ".", "extend", "(", "_GenerateProject", "(", "project", ",", "options", ",", "msvs_version", ",", "generator_flags", ")", ")", "fixpath_prefix", "=", "None", "for", "build_file", "in", "data", ":", "# Validate build_file extension", "if", "not", "build_file", ".", "endswith", "(", "'.gyp'", ")", ":", "continue", "sln_path", "=", "os", ".", "path", ".", "splitext", "(", "build_file", ")", "[", "0", "]", "+", "options", ".", "suffix", "+", "'.sln'", "if", "options", ".", "generator_output", ":", "sln_path", "=", "os", ".", "path", ".", "join", "(", "options", ".", "generator_output", ",", "sln_path", ")", "# Get projects in the solution, and their dependents.", "sln_projects", "=", "gyp", ".", "common", ".", "BuildFileTargets", "(", "target_list", ",", "build_file", ")", "sln_projects", "+=", "gyp", ".", "common", ".", "DeepDependencyTargets", "(", "target_dicts", ",", "sln_projects", ")", "# Create folder hierarchy.", "root_entries", "=", "_GatherSolutionFolders", "(", "sln_projects", ",", "project_objects", ",", "flat", "=", "msvs_version", ".", "FlatSolution", "(", ")", ")", "# Create solution.", "sln", "=", "MSVSNew", ".", "MSVSSolution", "(", "sln_path", ",", "entries", "=", "root_entries", ",", "variants", "=", "configs", ",", "websiteProperties", "=", "False", ",", "version", "=", "msvs_version", ")", "sln", ".", "Write", "(", ")", "if", "missing_sources", ":", "error_message", "=", "\"Missing input files:\\n\"", "+", "'\\n'", ".", "join", "(", "set", "(", "missing_sources", ")", ")", "if", "generator_flags", ".", "get", "(", "'msvs_error_on_missing_sources'", ",", "False", ")", ":", "raise", "GypError", "(", "error_message", ")", "else", ":", "print", "(", "\"Warning: \"", "+", "error_message", ",", "file", "=", "sys", ".", "stdout", ")" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/lts/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py#L1967-L2045
ceph/ceph
959663007321a369c83218414a29bd9dbc8bda3a
src/ceph-volume/ceph_volume/util/__init__.py
python
prompt_bool
(question, input_=None)
Interface to prompt a boolean (or boolean-like) response from a user. Usually a confirmation.
Interface to prompt a boolean (or boolean-like) response from a user. Usually a confirmation.
[ "Interface", "to", "prompt", "a", "boolean", "(", "or", "boolean", "-", "like", ")", "response", "from", "a", "user", ".", "Usually", "a", "confirmation", "." ]
def prompt_bool(question, input_=None): """ Interface to prompt a boolean (or boolean-like) response from a user. Usually a confirmation. """ input_prompt = input_ or input prompt_format = '--> {question} '.format(question=question) response = input_prompt(prompt_format) try: return str_to_bool(response) except ValueError: terminal.error('Valid true responses are: y, yes, <Enter>') terminal.error('Valid false responses are: n, no') terminal.error('That response was invalid, please try again') return prompt_bool(question, input_=input_prompt)
[ "def", "prompt_bool", "(", "question", ",", "input_", "=", "None", ")", ":", "input_prompt", "=", "input_", "or", "input", "prompt_format", "=", "'--> {question} '", ".", "format", "(", "question", "=", "question", ")", "response", "=", "input_prompt", "(", "prompt_format", ")", "try", ":", "return", "str_to_bool", "(", "response", ")", "except", "ValueError", ":", "terminal", ".", "error", "(", "'Valid true responses are: y, yes, <Enter>'", ")", "terminal", ".", "error", "(", "'Valid false responses are: n, no'", ")", "terminal", ".", "error", "(", "'That response was invalid, please try again'", ")", "return", "prompt_bool", "(", "question", ",", "input_", "=", "input_prompt", ")" ]
https://github.com/ceph/ceph/blob/959663007321a369c83218414a29bd9dbc8bda3a/src/ceph-volume/ceph_volume/util/__init__.py#L86-L100
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
tools/json_schema_compiler/js_util.py
python
JsUtil.GetLicense
(self)
return (LICENSE % datetime.now().year)
Returns the license text for JS extern and interface files.
Returns the license text for JS extern and interface files.
[ "Returns", "the", "license", "text", "for", "JS", "extern", "and", "interface", "files", "." ]
def GetLicense(self): """Returns the license text for JS extern and interface files. """ return (LICENSE % datetime.now().year)
[ "def", "GetLicense", "(", "self", ")", ":", "return", "(", "LICENSE", "%", "datetime", ".", "now", "(", ")", ".", "year", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/tools/json_schema_compiler/js_util.py#L23-L26
wang-bin/QtAV
3b937991afce248648836ae811324d4051b31def
python/configure.py
python
inform
(msg)
Display an information message. msg is the text of the error message.
Display an information message. msg is the text of the error message.
[ "Display", "an", "information", "message", ".", "msg", "is", "the", "text", "of", "the", "error", "message", "." ]
def inform(msg): """ Display an information message. msg is the text of the error message. """ sys.stdout.write(_format(msg) + "\n")
[ "def", "inform", "(", "msg", ")", ":", "sys", ".", "stdout", ".", "write", "(", "_format", "(", "msg", ")", "+", "\"\\n\"", ")" ]
https://github.com/wang-bin/QtAV/blob/3b937991afce248648836ae811324d4051b31def/python/configure.py#L381-L385
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/learn/python/learn/estimators/head.py
python
_logits
(logits_input, logits, logits_dimension)
return logits
Validate logits args, and create `logits` if necessary. Exactly one of `logits_input` and `logits` must be provided. Args: logits_input: `Tensor` input to `logits`. logits: `Tensor` output. logits_dimension: Integer, last dimension of `logits`. This is used to create `logits` from `logits_input` if `logits` is `None`; otherwise, it's used to validate `logits`. Returns: `logits` `Tensor`. Raises: ValueError: if neither or both of `logits` and `logits_input` are supplied.
Validate logits args, and create `logits` if necessary.
[ "Validate", "logits", "args", "and", "create", "logits", "if", "necessary", "." ]
def _logits(logits_input, logits, logits_dimension): """Validate logits args, and create `logits` if necessary. Exactly one of `logits_input` and `logits` must be provided. Args: logits_input: `Tensor` input to `logits`. logits: `Tensor` output. logits_dimension: Integer, last dimension of `logits`. This is used to create `logits` from `logits_input` if `logits` is `None`; otherwise, it's used to validate `logits`. Returns: `logits` `Tensor`. Raises: ValueError: if neither or both of `logits` and `logits_input` are supplied. """ if (logits_dimension is None) or (logits_dimension < 1): raise ValueError("Invalid logits_dimension %s." % logits_dimension) # If not provided, create logits. if logits is None: if logits_input is None: raise ValueError("Neither logits nor logits_input supplied.") return layers_lib.linear(logits_input, logits_dimension, scope="logits") if logits_input is not None: raise ValueError("Both logits and logits_input supplied.") logits = ops.convert_to_tensor(logits, name="logits") logits_dims = logits.get_shape().dims if logits_dims is not None: logits_dims[-1].assert_is_compatible_with(logits_dimension) return logits
[ "def", "_logits", "(", "logits_input", ",", "logits", ",", "logits_dimension", ")", ":", "if", "(", "logits_dimension", "is", "None", ")", "or", "(", "logits_dimension", "<", "1", ")", ":", "raise", "ValueError", "(", "\"Invalid logits_dimension %s.\"", "%", "logits_dimension", ")", "# If not provided, create logits.", "if", "logits", "is", "None", ":", "if", "logits_input", "is", "None", ":", "raise", "ValueError", "(", "\"Neither logits nor logits_input supplied.\"", ")", "return", "layers_lib", ".", "linear", "(", "logits_input", ",", "logits_dimension", ",", "scope", "=", "\"logits\"", ")", "if", "logits_input", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Both logits and logits_input supplied.\"", ")", "logits", "=", "ops", ".", "convert_to_tensor", "(", "logits", ",", "name", "=", "\"logits\"", ")", "logits_dims", "=", "logits", ".", "get_shape", "(", ")", ".", "dims", "if", "logits_dims", "is", "not", "None", ":", "logits_dims", "[", "-", "1", "]", ".", "assert_is_compatible_with", "(", "logits_dimension", ")", "return", "logits" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/learn/python/learn/estimators/head.py#L571-L606
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py3/sklearn/metrics/_plot/precision_recall_curve.py
python
plot_precision_recall_curve
(estimator, X, y, sample_weight=None, response_method="auto", name=None, ax=None, **kwargs)
return viz.plot(ax=ax, name=name, **kwargs)
Plot Precision Recall Curve for binary classifiers. Extra keyword arguments will be passed to matplotlib's `plot`. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- estimator : estimator instance Trained classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Binary target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. response_method : {'predict_proba', 'decision_function', 'auto'}, \ default='auto' Specifies whether to use :term:`predict_proba` or :term:`decision_function` as the target response. If set to 'auto', :term:`predict_proba` is tried first and if it does not exist :term:`decision_function` is tried next. name : str, default=None Name for labeling curve. If `None`, the name of the estimator is used. ax : matplotlib axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. **kwargs : dict Keyword arguments to be passed to matplotlib's `plot`. Returns ------- display : :class:`~sklearn.metrics.PrecisionRecallDisplay` Object that stores computed values.
Plot Precision Recall Curve for binary classifiers.
[ "Plot", "Precision", "Recall", "Curve", "for", "binary", "classifiers", "." ]
def plot_precision_recall_curve(estimator, X, y, sample_weight=None, response_method="auto", name=None, ax=None, **kwargs): """Plot Precision Recall Curve for binary classifiers. Extra keyword arguments will be passed to matplotlib's `plot`. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- estimator : estimator instance Trained classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y : array-like of shape (n_samples,) Binary target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. response_method : {'predict_proba', 'decision_function', 'auto'}, \ default='auto' Specifies whether to use :term:`predict_proba` or :term:`decision_function` as the target response. If set to 'auto', :term:`predict_proba` is tried first and if it does not exist :term:`decision_function` is tried next. name : str, default=None Name for labeling curve. If `None`, the name of the estimator is used. ax : matplotlib axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. **kwargs : dict Keyword arguments to be passed to matplotlib's `plot`. Returns ------- display : :class:`~sklearn.metrics.PrecisionRecallDisplay` Object that stores computed values. """ check_matplotlib_support("plot_precision_recall_curve") classification_error = ("{} should be a binary classifier".format( estimator.__class__.__name__)) if not is_classifier(estimator): raise ValueError(classification_error) prediction_method = _check_classifer_response_method(estimator, response_method) y_pred = prediction_method(X) if y_pred.ndim != 1: if y_pred.shape[1] != 2: raise ValueError(classification_error) else: y_pred = y_pred[:, 1] pos_label = estimator.classes_[1] precision, recall, _ = precision_recall_curve(y, y_pred, pos_label=pos_label, sample_weight=sample_weight) average_precision = average_precision_score(y, y_pred, pos_label=pos_label, sample_weight=sample_weight) name = name if name is not None else estimator.__class__.__name__ viz = PrecisionRecallDisplay( precision=precision, recall=recall, average_precision=average_precision, estimator_name=name ) return viz.plot(ax=ax, name=name, **kwargs)
[ "def", "plot_precision_recall_curve", "(", "estimator", ",", "X", ",", "y", ",", "sample_weight", "=", "None", ",", "response_method", "=", "\"auto\"", ",", "name", "=", "None", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "check_matplotlib_support", "(", "\"plot_precision_recall_curve\"", ")", "classification_error", "=", "(", "\"{} should be a binary classifier\"", ".", "format", "(", "estimator", ".", "__class__", ".", "__name__", ")", ")", "if", "not", "is_classifier", "(", "estimator", ")", ":", "raise", "ValueError", "(", "classification_error", ")", "prediction_method", "=", "_check_classifer_response_method", "(", "estimator", ",", "response_method", ")", "y_pred", "=", "prediction_method", "(", "X", ")", "if", "y_pred", ".", "ndim", "!=", "1", ":", "if", "y_pred", ".", "shape", "[", "1", "]", "!=", "2", ":", "raise", "ValueError", "(", "classification_error", ")", "else", ":", "y_pred", "=", "y_pred", "[", ":", ",", "1", "]", "pos_label", "=", "estimator", ".", "classes_", "[", "1", "]", "precision", ",", "recall", ",", "_", "=", "precision_recall_curve", "(", "y", ",", "y_pred", ",", "pos_label", "=", "pos_label", ",", "sample_weight", "=", "sample_weight", ")", "average_precision", "=", "average_precision_score", "(", "y", ",", "y_pred", ",", "pos_label", "=", "pos_label", ",", "sample_weight", "=", "sample_weight", ")", "name", "=", "name", "if", "name", "is", "not", "None", "else", "estimator", ".", "__class__", ".", "__name__", "viz", "=", "PrecisionRecallDisplay", "(", "precision", "=", "precision", ",", "recall", "=", "recall", ",", "average_precision", "=", "average_precision", ",", "estimator_name", "=", "name", ")", "return", "viz", ".", "plot", "(", "ax", "=", "ax", ",", "name", "=", "name", ",", "*", "*", "kwargs", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/metrics/_plot/precision_recall_curve.py#L97-L171
NREL/EnergyPlus
fadc5973b85c70e8cc923efb69c144e808a26078
src/EnergyPlus/api/datatransfer.py
python
DataExchange.today_weather_outdoor_dry_bulb_at_time
(self, state: c_void_p, hour: int, time_step_number: int)
return self.api.todayWeatherOutDryBulbAtTime(state, hour, time_step_number)
Gets the specified weather data at the specified hour and time step index within that hour :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :param hour: Integer hour of day (0 to 23) :param time_step_number: Time step index in hour, from 1 to the number of zone time steps per hour :return: Value of the weather condition at the specified time
Gets the specified weather data at the specified hour and time step index within that hour
[ "Gets", "the", "specified", "weather", "data", "at", "the", "specified", "hour", "and", "time", "step", "index", "within", "that", "hour" ]
def today_weather_outdoor_dry_bulb_at_time(self, state: c_void_p, hour: int, time_step_number: int) -> float: """ Gets the specified weather data at the specified hour and time step index within that hour :param state: An active EnergyPlus "state" that is returned from a call to `api.state_manager.new_state()`. :param hour: Integer hour of day (0 to 23) :param time_step_number: Time step index in hour, from 1 to the number of zone time steps per hour :return: Value of the weather condition at the specified time """ return self.api.todayWeatherOutDryBulbAtTime(state, hour, time_step_number)
[ "def", "today_weather_outdoor_dry_bulb_at_time", "(", "self", ",", "state", ":", "c_void_p", ",", "hour", ":", "int", ",", "time_step_number", ":", "int", ")", "->", "float", ":", "return", "self", ".", "api", ".", "todayWeatherOutDryBulbAtTime", "(", "state", ",", "hour", ",", "time_step_number", ")" ]
https://github.com/NREL/EnergyPlus/blob/fadc5973b85c70e8cc923efb69c144e808a26078/src/EnergyPlus/api/datatransfer.py#L1141-L1150
zeakey/DeepSkeleton
dc70170f8fd2ec8ca1157484ce66129981104486
scripts/cpp_lint.py
python
_IncludeState.IsInAlphabeticalOrder
(self, clean_lines, linenum, header_path)
return True
Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order.
Check if a header is in alphabetical order with the previous header.
[ "Check", "if", "a", "header", "is", "in", "alphabetical", "order", "with", "the", "previous", "header", "." ]
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): """Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order. """ # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and not Match(r'^\s*$', clean_lines.elided[linenum - 1])): return False return True
[ "def", "IsInAlphabeticalOrder", "(", "self", ",", "clean_lines", ",", "linenum", ",", "header_path", ")", ":", "# If previous section is different from current section, _last_header will", "# be reset to empty string, so it's always less than current header.", "#", "# If previous line was a blank line, assume that the headers are", "# intentionally sorted the way they are.", "if", "(", "self", ".", "_last_header", ">", "header_path", "and", "not", "Match", "(", "r'^\\s*$'", ",", "clean_lines", ".", "elided", "[", "linenum", "-", "1", "]", ")", ")", ":", "return", "False", "return", "True" ]
https://github.com/zeakey/DeepSkeleton/blob/dc70170f8fd2ec8ca1157484ce66129981104486/scripts/cpp_lint.py#L612-L631
tensorflow/io
92b44e180674a8af0e12e405530f7343e3e693e4
tensorflow_io/python/ops/hdf5_io_tensor_ops.py
python
BaseHDF5GraphIOTensor.to_tensor
(self)
return core_ops.io_hdf5_readable_read( input=self._filename, shared=self._filename, component=self._component, shape=self._shape, start=0, stop=-1, dtype=self._dtype, container="HDF5IOTensor", )
Converts this `IOTensor` into a `tf.Tensor`. Args: name: A name prefix for the returned tensors (optional). Returns: A `Tensor` with value obtained from this `IOTensor`.
Converts this `IOTensor` into a `tf.Tensor`.
[ "Converts", "this", "IOTensor", "into", "a", "tf", ".", "Tensor", "." ]
def to_tensor(self): """Converts this `IOTensor` into a `tf.Tensor`. Args: name: A name prefix for the returned tensors (optional). Returns: A `Tensor` with value obtained from this `IOTensor`. """ return core_ops.io_hdf5_readable_read( input=self._filename, shared=self._filename, component=self._component, shape=self._shape, start=0, stop=-1, dtype=self._dtype, container="HDF5IOTensor", )
[ "def", "to_tensor", "(", "self", ")", ":", "return", "core_ops", ".", "io_hdf5_readable_read", "(", "input", "=", "self", ".", "_filename", ",", "shared", "=", "self", ".", "_filename", ",", "component", "=", "self", ".", "_component", ",", "shape", "=", "self", ".", "_shape", ",", "start", "=", "0", ",", "stop", "=", "-", "1", ",", "dtype", "=", "self", ".", "_dtype", ",", "container", "=", "\"HDF5IOTensor\"", ",", ")" ]
https://github.com/tensorflow/io/blob/92b44e180674a8af0e12e405530f7343e3e693e4/tensorflow_io/python/ops/hdf5_io_tensor_ops.py#L63-L81
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py
python
_flatten
(seq)
return res
Internal function.
Internal function.
[ "Internal", "function", "." ]
def _flatten(seq): """Internal function.""" res = () for item in seq: if isinstance(item, (tuple, list)): res = res + _flatten(item) elif item is not None: res = res + (item,) return res
[ "def", "_flatten", "(", "seq", ")", ":", "res", "=", "(", ")", "for", "item", "in", "seq", ":", "if", "isinstance", "(", "item", ",", "(", "tuple", ",", "list", ")", ")", ":", "res", "=", "res", "+", "_flatten", "(", "item", ")", "elif", "item", "is", "not", "None", ":", "res", "=", "res", "+", "(", "item", ",", ")", "return", "res" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/tkinter/__init__.py#L83-L91
pytorch/pytorch
7176c92687d3cc847cc046bf002269c6949a21c2
torch/package/file_structure_representation.py
python
Directory.has_file
(self, filename: str)
return False
Checks if a file is present in a :class:`Directory`. Args: filename (str): Path of file to search for. Returns: bool: If a :class:`Directory` contains the specified file.
Checks if a file is present in a :class:`Directory`.
[ "Checks", "if", "a", "file", "is", "present", "in", "a", ":", "class", ":", "Directory", "." ]
def has_file(self, filename: str) -> bool: """Checks if a file is present in a :class:`Directory`. Args: filename (str): Path of file to search for. Returns: bool: If a :class:`Directory` contains the specified file. """ lineage = filename.split("/", maxsplit=1) child = lineage[0] grandchildren = lineage[1] if len(lineage) > 1 else None if child in self.children.keys(): if grandchildren is None: return True else: return self.children[child].has_file(grandchildren) return False
[ "def", "has_file", "(", "self", ",", "filename", ":", "str", ")", "->", "bool", ":", "lineage", "=", "filename", ".", "split", "(", "\"/\"", ",", "maxsplit", "=", "1", ")", "child", "=", "lineage", "[", "0", "]", "grandchildren", "=", "lineage", "[", "1", "]", "if", "len", "(", "lineage", ")", ">", "1", "else", "None", "if", "child", "in", "self", ".", "children", ".", "keys", "(", ")", ":", "if", "grandchildren", "is", "None", ":", "return", "True", "else", ":", "return", "self", ".", "children", "[", "child", "]", ".", "has_file", "(", "grandchildren", ")", "return", "False" ]
https://github.com/pytorch/pytorch/blob/7176c92687d3cc847cc046bf002269c6949a21c2/torch/package/file_structure_representation.py#L45-L61
OAID/Caffe-HRT
aae71e498ab842c6f92bcc23fc668423615a4d65
scripts/cpp_lint.py
python
IsBlankLine
(line)
return not line or line.isspace()
Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank.
Returns true if the given line is blank.
[ "Returns", "true", "if", "the", "given", "line", "is", "blank", "." ]
def IsBlankLine(line): """Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank. """ return not line or line.isspace()
[ "def", "IsBlankLine", "(", "line", ")", ":", "return", "not", "line", "or", "line", ".", "isspace", "(", ")" ]
https://github.com/OAID/Caffe-HRT/blob/aae71e498ab842c6f92bcc23fc668423615a4d65/scripts/cpp_lint.py#L2369-L2381
google-coral/edgetpu
5020de9386ff370dcc1f63291a2d0f98eeb98adb
edgetpu/learn/backprop/softmax_regression.py
python
SoftmaxRegression.save_as_tflite_model
(self, in_model_path, out_model_path)
Appends learned weights to your TensorFlow Lite model and saves it as a copy. Beware that learned weights and biases are quantized from float32 to uint8. Args: in_model_path (str): Path to the embedding extractor model (``.tflite`` file). out_model_path (str): Path where you'd like to save the new model with learned weights and a softmax layer appended (``.tflite`` file).
Appends learned weights to your TensorFlow Lite model and saves it as a copy.
[ "Appends", "learned", "weights", "to", "your", "TensorFlow", "Lite", "model", "and", "saves", "it", "as", "a", "copy", "." ]
def save_as_tflite_model(self, in_model_path, out_model_path): """Appends learned weights to your TensorFlow Lite model and saves it as a copy. Beware that learned weights and biases are quantized from float32 to uint8. Args: in_model_path (str): Path to the embedding extractor model (``.tflite`` file). out_model_path (str): Path where you'd like to save the new model with learned weights and a softmax layer appended (``.tflite`` file). """ # Note: this function assumes flattened weights, whose dimension is # num_classes x feature_dim. That's why the transpose is needed. AppendFullyConnectedAndSoftmaxLayerToModel( in_model_path, out_model_path, self.params['mat_w'].transpose().flatten(), self.params['vec_b'].flatten(), float(self.min_score), float(self.max_score))
[ "def", "save_as_tflite_model", "(", "self", ",", "in_model_path", ",", "out_model_path", ")", ":", "# Note: this function assumes flattened weights, whose dimension is", "# num_classes x feature_dim. That's why the transpose is needed.", "AppendFullyConnectedAndSoftmaxLayerToModel", "(", "in_model_path", ",", "out_model_path", ",", "self", ".", "params", "[", "'mat_w'", "]", ".", "transpose", "(", ")", ".", "flatten", "(", ")", ",", "self", ".", "params", "[", "'vec_b'", "]", ".", "flatten", "(", ")", ",", "float", "(", "self", ".", "min_score", ")", ",", "float", "(", "self", ".", "max_score", ")", ")" ]
https://github.com/google-coral/edgetpu/blob/5020de9386ff370dcc1f63291a2d0f98eeb98adb/edgetpu/learn/backprop/softmax_regression.py#L139-L155
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/html.py
python
HtmlContainerCell.SetBackgroundColour
(*args, **kwargs)
return _html.HtmlContainerCell_SetBackgroundColour(*args, **kwargs)
SetBackgroundColour(self, Colour clr)
SetBackgroundColour(self, Colour clr)
[ "SetBackgroundColour", "(", "self", "Colour", "clr", ")" ]
def SetBackgroundColour(*args, **kwargs): """SetBackgroundColour(self, Colour clr)""" return _html.HtmlContainerCell_SetBackgroundColour(*args, **kwargs)
[ "def", "SetBackgroundColour", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_html", ".", "HtmlContainerCell_SetBackgroundColour", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/html.py#L849-L851
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/format.py
python
reformat_comment
(data, limit, comment_header)
return '\n'.join(comment_header+line for line in newdata) + block_suffix
Return data reformatted to specified width with comment header.
Return data reformatted to specified width with comment header.
[ "Return", "data", "reformatted", "to", "specified", "width", "with", "comment", "header", "." ]
def reformat_comment(data, limit, comment_header): """Return data reformatted to specified width with comment header.""" # Remove header from the comment lines lc = len(comment_header) data = "\n".join(line[lc:] for line in data.split("\n")) # Reformat to maxformatwidth chars or a 20 char width, # whichever is greater. format_width = max(limit - len(comment_header), 20) newdata = reformat_paragraph(data, format_width) # re-split and re-insert the comment header. newdata = newdata.split("\n") # If the block ends in a \n, we don't want the comment prefix # inserted after it. (Im not sure it makes sense to reformat a # comment block that is not made of complete lines, but whatever!) # Can't think of a clean solution, so we hack away block_suffix = "" if not newdata[-1]: block_suffix = "\n" newdata = newdata[:-1] return '\n'.join(comment_header+line for line in newdata) + block_suffix
[ "def", "reformat_comment", "(", "data", ",", "limit", ",", "comment_header", ")", ":", "# Remove header from the comment lines", "lc", "=", "len", "(", "comment_header", ")", "data", "=", "\"\\n\"", ".", "join", "(", "line", "[", "lc", ":", "]", "for", "line", "in", "data", ".", "split", "(", "\"\\n\"", ")", ")", "# Reformat to maxformatwidth chars or a 20 char width,", "# whichever is greater.", "format_width", "=", "max", "(", "limit", "-", "len", "(", "comment_header", ")", ",", "20", ")", "newdata", "=", "reformat_paragraph", "(", "data", ",", "format_width", ")", "# re-split and re-insert the comment header.", "newdata", "=", "newdata", ".", "split", "(", "\"\\n\"", ")", "# If the block ends in a \\n, we don't want the comment prefix", "# inserted after it. (Im not sure it makes sense to reformat a", "# comment block that is not made of complete lines, but whatever!)", "# Can't think of a clean solution, so we hack away", "block_suffix", "=", "\"\"", "if", "not", "newdata", "[", "-", "1", "]", ":", "block_suffix", "=", "\"\\n\"", "newdata", "=", "newdata", "[", ":", "-", "1", "]", "return", "'\\n'", ".", "join", "(", "comment_header", "+", "line", "for", "line", "in", "newdata", ")", "+", "block_suffix" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/idlelib/format.py#L156-L176
envoyproxy/envoy
65541accdafe255e72310b4298d646e091da2d80
tools/protodoc/protodoc.py
python
format_proto_as_block_comment
(proto)
return '\n\nproto::\n\n' + map_lines(functools.partial(indent, 2), str(proto)) + '\n'
Format a proto as a RST block comment. Useful in debugging, not usually referenced.
Format a proto as a RST block comment.
[ "Format", "a", "proto", "as", "a", "RST", "block", "comment", "." ]
def format_proto_as_block_comment(proto): """Format a proto as a RST block comment. Useful in debugging, not usually referenced. """ return '\n\nproto::\n\n' + map_lines(functools.partial(indent, 2), str(proto)) + '\n'
[ "def", "format_proto_as_block_comment", "(", "proto", ")", ":", "return", "'\\n\\nproto::\\n\\n'", "+", "map_lines", "(", "functools", ".", "partial", "(", "indent", ",", "2", ")", ",", "str", "(", "proto", ")", ")", "+", "'\\n'" ]
https://github.com/envoyproxy/envoy/blob/65541accdafe255e72310b4298d646e091da2d80/tools/protodoc/protodoc.py#L691-L696
CaoWGG/TensorRT-CenterNet
f949252e37b51e60f873808f46d3683f15735e79
onnx-tensorrt/third_party/onnx/onnx/helper.py
python
make_tensor
( name, # type: Text data_type, # type: int dims, # type: Sequence[int] vals, # type: Any raw=False # type: bool )
return tensor
Make a TensorProto with specified arguments. If raw is False, this function will choose the corresponding proto field to store the values based on data_type. If raw is True, use "raw_data" proto field to store the values, and values should be of type bytes in this case.
Make a TensorProto with specified arguments. If raw is False, this function will choose the corresponding proto field to store the values based on data_type. If raw is True, use "raw_data" proto field to store the values, and values should be of type bytes in this case.
[ "Make", "a", "TensorProto", "with", "specified", "arguments", ".", "If", "raw", "is", "False", "this", "function", "will", "choose", "the", "corresponding", "proto", "field", "to", "store", "the", "values", "based", "on", "data_type", ".", "If", "raw", "is", "True", "use", "raw_data", "proto", "field", "to", "store", "the", "values", "and", "values", "should", "be", "of", "type", "bytes", "in", "this", "case", "." ]
def make_tensor( name, # type: Text data_type, # type: int dims, # type: Sequence[int] vals, # type: Any raw=False # type: bool ): # type: (...) -> TensorProto ''' Make a TensorProto with specified arguments. If raw is False, this function will choose the corresponding proto field to store the values based on data_type. If raw is True, use "raw_data" proto field to store the values, and values should be of type bytes in this case. ''' tensor = TensorProto() tensor.data_type = data_type tensor.name = name if data_type == TensorProto.STRING: assert not raw, "Can not use raw_data to store string type" if (data_type == TensorProto.COMPLEX64 or data_type == TensorProto.COMPLEX128): vals = split_complex_to_pairs(vals) if raw: tensor.raw_data = vals else: field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[ mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[data_type]] getattr(tensor, field).extend(vals) tensor.dims.extend(dims) return tensor
[ "def", "make_tensor", "(", "name", ",", "# type: Text", "data_type", ",", "# type: int", "dims", ",", "# type: Sequence[int]", "vals", ",", "# type: Any", "raw", "=", "False", "# type: bool", ")", ":", "# type: (...) -> TensorProto", "tensor", "=", "TensorProto", "(", ")", "tensor", ".", "data_type", "=", "data_type", "tensor", ".", "name", "=", "name", "if", "data_type", "==", "TensorProto", ".", "STRING", ":", "assert", "not", "raw", ",", "\"Can not use raw_data to store string type\"", "if", "(", "data_type", "==", "TensorProto", ".", "COMPLEX64", "or", "data_type", "==", "TensorProto", ".", "COMPLEX128", ")", ":", "vals", "=", "split_complex_to_pairs", "(", "vals", ")", "if", "raw", ":", "tensor", ".", "raw_data", "=", "vals", "else", ":", "field", "=", "mapping", ".", "STORAGE_TENSOR_TYPE_TO_FIELD", "[", "mapping", ".", "TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE", "[", "data_type", "]", "]", "getattr", "(", "tensor", ",", "field", ")", ".", "extend", "(", "vals", ")", "tensor", ".", "dims", ".", "extend", "(", "dims", ")", "return", "tensor" ]
https://github.com/CaoWGG/TensorRT-CenterNet/blob/f949252e37b51e60f873808f46d3683f15735e79/onnx-tensorrt/third_party/onnx/onnx/helper.py#L144-L176
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py
python
AbstractBufferedFile.readlines
(self)
Return all data, split by the newline character
Return all data, split by the newline character
[ "Return", "all", "data", "split", "by", "the", "newline", "character" ]
def readlines(self): """Return all data, split by the newline character""" data = self.read() lines = data.split(b"\n") out = [l + b"\n" for l in lines[:-1]] if data.endswith(b"\n"): return out else: return out + [lines[-1]]
[ "def", "readlines", "(", "self", ")", ":", "data", "=", "self", ".", "read", "(", ")", "lines", "=", "data", ".", "split", "(", "b\"\\n\"", ")", "out", "=", "[", "l", "+", "b\"\\n\"", "for", "l", "in", "lines", "[", ":", "-", "1", "]", "]", "if", "data", ".", "endswith", "(", "b\"\\n\"", ")", ":", "return", "out", "else", ":", "return", "out", "+", "[", "lines", "[", "-", "1", "]", "]" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/fsspec/spec.py#L1288-L1296
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/convert/processor/conversion/de1/processor.py
python
DE1Processor.convert
(cls, gamespec, args, string_resources, existing_graphics)
return modpacks
Input game specification and media here and get a set of modpacks back. :param gamespec: Gamedata from empires.dat read in by the reader functions. :type gamespec: class: ...dataformat.value_members.ArrayMember :returns: A list of modpacks. :rtype: list
Input game specification and media here and get a set of modpacks back.
[ "Input", "game", "specification", "and", "media", "here", "and", "get", "a", "set", "of", "modpacks", "back", "." ]
def convert(cls, gamespec, args, string_resources, existing_graphics): """ Input game specification and media here and get a set of modpacks back. :param gamespec: Gamedata from empires.dat read in by the reader functions. :type gamespec: class: ...dataformat.value_members.ArrayMember :returns: A list of modpacks. :rtype: list """ info("Starting conversion...") # Create a new container for the conversion process dataset = cls._pre_processor( gamespec, args.game_version, string_resources, existing_graphics ) debug_converter_objects(args.debugdir, args.debug_info, dataset) # Create the custom openage formats (nyan, sprite, terrain) dataset = cls._processor(gamespec, dataset) debug_converter_object_groups(args.debugdir, args.debug_info, dataset) # Create modpack definitions modpacks = cls._post_processor(dataset) return modpacks
[ "def", "convert", "(", "cls", ",", "gamespec", ",", "args", ",", "string_resources", ",", "existing_graphics", ")", ":", "info", "(", "\"Starting conversion...\"", ")", "# Create a new container for the conversion process", "dataset", "=", "cls", ".", "_pre_processor", "(", "gamespec", ",", "args", ".", "game_version", ",", "string_resources", ",", "existing_graphics", ")", "debug_converter_objects", "(", "args", ".", "debugdir", ",", "args", ".", "debug_info", ",", "dataset", ")", "# Create the custom openage formats (nyan, sprite, terrain)", "dataset", "=", "cls", ".", "_processor", "(", "gamespec", ",", "dataset", ")", "debug_converter_object_groups", "(", "args", ".", "debugdir", ",", "args", ".", "debug_info", ",", "dataset", ")", "# Create modpack definitions", "modpacks", "=", "cls", ".", "_post_processor", "(", "dataset", ")", "return", "modpacks" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/processor/conversion/de1/processor.py#L28-L58
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_gdi.py
python
GraphicsContext.ConcatTransform
(*args, **kwargs)
return _gdi_.GraphicsContext_ConcatTransform(*args, **kwargs)
ConcatTransform(self, GraphicsMatrix matrix) Concatenates the passed in transform with the current transform of this context.
ConcatTransform(self, GraphicsMatrix matrix)
[ "ConcatTransform", "(", "self", "GraphicsMatrix", "matrix", ")" ]
def ConcatTransform(*args, **kwargs): """ ConcatTransform(self, GraphicsMatrix matrix) Concatenates the passed in transform with the current transform of this context. """ return _gdi_.GraphicsContext_ConcatTransform(*args, **kwargs)
[ "def", "ConcatTransform", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_gdi_", ".", "GraphicsContext_ConcatTransform", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_gdi.py#L6466-L6473
swift/swift
12d031cf8177fdec0137f9aa7e2912fa23c4416b
3rdParty/SCons/scons-3.0.1/engine/SCons/Util.py
python
unique
(s)
return u
Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time.
Return a list of the elements in s, but without duplicates.
[ "Return", "a", "list", "of", "the", "elements", "in", "s", "but", "without", "duplicates", "." ]
def unique(s): """Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """ n = len(s) if n == 0: return [] # Try using a dict first, as that's the fastest and will usually # work. If it doesn't work, it will usually fail quickly, so it # usually doesn't cost much to *try* it. It requires that all the # sequence elements be hashable, and support equality comparison. u = {} try: for x in s: u[x] = 1 except TypeError: pass # move on to the next method else: return list(u.keys()) del u # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. # NOTE: Python's list.sort() was designed to be efficient in the # presence of many duplicate elements. This isn't true of all # sort functions in all languages or libraries, so this approach # is more effective in Python than it may be elsewhere. try: t = sorted(s) except TypeError: pass # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti = lasti + 1 i = i + 1 return t[:lasti] del t # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u
[ "def", "unique", "(", "s", ")", ":", "n", "=", "len", "(", "s", ")", "if", "n", "==", "0", ":", "return", "[", "]", "# Try using a dict first, as that's the fastest and will usually", "# work. If it doesn't work, it will usually fail quickly, so it", "# usually doesn't cost much to *try* it. It requires that all the", "# sequence elements be hashable, and support equality comparison.", "u", "=", "{", "}", "try", ":", "for", "x", "in", "s", ":", "u", "[", "x", "]", "=", "1", "except", "TypeError", ":", "pass", "# move on to the next method", "else", ":", "return", "list", "(", "u", ".", "keys", "(", ")", ")", "del", "u", "# We can't hash all the elements. Second fastest is to sort,", "# which brings the equal elements together; then duplicates are", "# easy to weed out in a single pass.", "# NOTE: Python's list.sort() was designed to be efficient in the", "# presence of many duplicate elements. This isn't true of all", "# sort functions in all languages or libraries, so this approach", "# is more effective in Python than it may be elsewhere.", "try", ":", "t", "=", "sorted", "(", "s", ")", "except", "TypeError", ":", "pass", "# move on to the next method", "else", ":", "assert", "n", ">", "0", "last", "=", "t", "[", "0", "]", "lasti", "=", "i", "=", "1", "while", "i", "<", "n", ":", "if", "t", "[", "i", "]", "!=", "last", ":", "t", "[", "lasti", "]", "=", "last", "=", "t", "[", "i", "]", "lasti", "=", "lasti", "+", "1", "i", "=", "i", "+", "1", "return", "t", "[", ":", "lasti", "]", "del", "t", "# Brute force is all that's left.", "u", "=", "[", "]", "for", "x", "in", "s", ":", "if", "x", "not", "in", "u", ":", "u", ".", "append", "(", "x", ")", "return", "u" ]
https://github.com/swift/swift/blob/12d031cf8177fdec0137f9aa7e2912fa23c4416b/3rdParty/SCons/scons-3.0.1/engine/SCons/Util.py#L1155-L1222
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/_vendor/pyparsing.py
python
ParseBaseException.markInputline
( self, markerString = ">!<" )
return line_str.strip()
Extracts the exception line from the input string, and marks the location of the exception with a special symbol.
Extracts the exception line from the input string, and marks the location of the exception with a special symbol.
[ "Extracts", "the", "exception", "line", "from", "the", "input", "string", "and", "marks", "the", "location", "of", "the", "exception", "with", "a", "special", "symbol", "." ]
def markInputline( self, markerString = ">!<" ): """Extracts the exception line from the input string, and marks the location of the exception with a special symbol. """ line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join((line_str[:line_column], markerString, line_str[line_column:])) return line_str.strip()
[ "def", "markInputline", "(", "self", ",", "markerString", "=", "\">!<\"", ")", ":", "line_str", "=", "self", ".", "line", "line_column", "=", "self", ".", "column", "-", "1", "if", "markerString", ":", "line_str", "=", "\"\"", ".", "join", "(", "(", "line_str", "[", ":", "line_column", "]", ",", "markerString", ",", "line_str", "[", "line_column", ":", "]", ")", ")", "return", "line_str", ".", "strip", "(", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/setuptools/_vendor/pyparsing.py#L248-L257
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/nccl_ops.py
python
_apply_reduce
(reduction, tensors)
return result
Helper function for reduce_* functions.
Helper function for reduce_* functions.
[ "Helper", "function", "for", "reduce_", "*", "functions", "." ]
def _apply_reduce(reduction, tensors): """Helper function for reduce_* functions.""" if not tensors: raise ValueError('Must pass >0 tensors to reduce operations') for t in tensors: _check_device(t) result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction) try: next(t for t in tensors if t.device == result.device) except StopIteration: raise ValueError('One input tensor must be assigned to current device') return result
[ "def", "_apply_reduce", "(", "reduction", ",", "tensors", ")", ":", "if", "not", "tensors", ":", "raise", "ValueError", "(", "'Must pass >0 tensors to reduce operations'", ")", "for", "t", "in", "tensors", ":", "_check_device", "(", "t", ")", "result", "=", "gen_nccl_ops", ".", "nccl_reduce", "(", "input", "=", "tensors", ",", "reduction", "=", "reduction", ")", "try", ":", "next", "(", "t", "for", "t", "in", "tensors", "if", "t", ".", "device", "==", "result", ".", "device", ")", "except", "StopIteration", ":", "raise", "ValueError", "(", "'One input tensor must be assigned to current device'", ")", "return", "result" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/nccl_ops.py#L237-L249
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
tools/python/google/path_utils.py
python
FindAncestor
(start_dir, ancestor)
Finds an ancestor dir in a path. For example, FindAncestor('c:\foo\bar\baz', 'bar') would return 'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors.
Finds an ancestor dir in a path.
[ "Finds", "an", "ancestor", "dir", "in", "a", "path", "." ]
def FindAncestor(start_dir, ancestor): """Finds an ancestor dir in a path. For example, FindAncestor('c:\foo\bar\baz', 'bar') would return 'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors. """ start_dir = os.path.abspath(start_dir) path = start_dir while True: (parent, tail) = os.path.split(path) if tail == ancestor: return path if not tail: break path = parent raise PathNotFound("Unable to find ancestor %s in %s" % (ancestor, start_dir))
[ "def", "FindAncestor", "(", "start_dir", ",", "ancestor", ")", ":", "start_dir", "=", "os", ".", "path", ".", "abspath", "(", "start_dir", ")", "path", "=", "start_dir", "while", "True", ":", "(", "parent", ",", "tail", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "tail", "==", "ancestor", ":", "return", "path", "if", "not", "tail", ":", "break", "path", "=", "parent", "raise", "PathNotFound", "(", "\"Unable to find ancestor %s in %s\"", "%", "(", "ancestor", ",", "start_dir", ")", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/python/google/path_utils.py#L21-L36
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/pipeline/pipeline/pipeline.py
python
After._thread_init
(cls)
Ensure thread local is initialized.
Ensure thread local is initialized.
[ "Ensure", "thread", "local", "is", "initialized", "." ]
def _thread_init(cls): """Ensure thread local is initialized.""" if not hasattr(cls._local, '_after_all_futures'): cls._local._after_all_futures = []
[ "def", "_thread_init", "(", "cls", ")", ":", "if", "not", "hasattr", "(", "cls", ".", "_local", ",", "'_after_all_futures'", ")", ":", "cls", ".", "_local", ".", "_after_all_futures", "=", "[", "]" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/pipeline/pipeline/pipeline.py#L1175-L1178
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py
python
shape1
(tik_instance, input_x_shape, ori_shape, input_x, res)
return tik_instance, res
shape1
shape1
[ "shape1" ]
def shape1(tik_instance, input_x_shape, ori_shape, input_x, res): """shape1""" if ori_shape == (147, 147): phase_1 = 16384 blocks = 32 each_block_element = phase_1 // blocks + 64 with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub", scope=tik.scope_ubuf) broadcast_0_local_ub = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_ub", scope=tik.scope_ubuf) tik_instance.data_move(input_x_ub, input_x[512 * block_index], 0, 1, 512 // 8, 0, 0) line_id = block_index % 19 tik_instance.data_move(input_x_ub[512], input_x[16384 + 128 * line_id], 0, 1, 8, 0, 0) repeat_time = each_block_element // 64 tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8) tik_instance.vmax(19, input_x_ub, input_x_ub, input_x_ub[512], 1, 1, 1, 1, 8, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[256], 4, 1, 1, 1, 8, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[128], 2, 1, 1, 1, 8, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[64], 1, 1, 1, 1, 8, 8, 8) tik_instance, res = _update_tik(tik_instance, input_x_ub, broadcast_0_local_ub, block_index, res) elif ori_shape in ((256, 256), None, (-1, -1)): total_elements1 = 1 for val in input_x_shape: total_elements1 *= val blocks = 32 each_block_element = total_elements1 // blocks with tik_instance.for_range(0, blocks, block_num=blocks) as block_index: input_x_ub = tik_instance.Tensor("float32", (each_block_element,), name="input_x_ub", scope=tik.scope_ubuf) broadcast_0_local_ub = tik_instance.Tensor("float32", (4096,), name="broadcast_0_local_ub", scope=tik.scope_ubuf) tik_instance.data_move(input_x_ub, input_x[each_block_element * block_index], 0, 1, each_block_element // 8, 0, 0) repeat_time = each_block_element // 64 tik_instance.vabs(64, input_x_ub, input_x_ub, repeat_time, 1, 1, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[512], 8, 1, 1, 1, 8, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[256], 4, 1, 1, 1, 8, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[128], 2, 1, 1, 1, 8, 8, 8) tik_instance.vmax(64, input_x_ub, input_x_ub, input_x_ub[64], 1, 1, 1, 1, 8, 8, 8) tik_instance, res = _update_tik(tik_instance, input_x_ub, broadcast_0_local_ub, block_index, res) else: raise RuntimeError("origin shape %s is not supported" % str(ori_shape)) return tik_instance, res
[ "def", "shape1", "(", "tik_instance", ",", "input_x_shape", ",", "ori_shape", ",", "input_x", ",", "res", ")", ":", "if", "ori_shape", "==", "(", "147", ",", "147", ")", ":", "phase_1", "=", "16384", "blocks", "=", "32", "each_block_element", "=", "phase_1", "//", "blocks", "+", "64", "with", "tik_instance", ".", "for_range", "(", "0", ",", "blocks", ",", "block_num", "=", "blocks", ")", "as", "block_index", ":", "input_x_ub", "=", "tik_instance", ".", "Tensor", "(", "\"float32\"", ",", "(", "each_block_element", ",", ")", ",", "name", "=", "\"input_x_ub\"", ",", "scope", "=", "tik", ".", "scope_ubuf", ")", "broadcast_0_local_ub", "=", "tik_instance", ".", "Tensor", "(", "\"float32\"", ",", "(", "4096", ",", ")", ",", "name", "=", "\"broadcast_0_local_ub\"", ",", "scope", "=", "tik", ".", "scope_ubuf", ")", "tik_instance", ".", "data_move", "(", "input_x_ub", ",", "input_x", "[", "512", "*", "block_index", "]", ",", "0", ",", "1", ",", "512", "//", "8", ",", "0", ",", "0", ")", "line_id", "=", "block_index", "%", "19", "tik_instance", ".", "data_move", "(", "input_x_ub", "[", "512", "]", ",", "input_x", "[", "16384", "+", "128", "*", "line_id", "]", ",", "0", ",", "1", ",", "8", ",", "0", ",", "0", ")", "repeat_time", "=", "each_block_element", "//", "64", "tik_instance", ".", "vabs", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "repeat_time", ",", "1", ",", "1", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "19", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "512", "]", ",", "1", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "256", "]", ",", "4", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "128", "]", ",", "2", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "64", "]", ",", "1", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ",", "res", "=", "_update_tik", "(", "tik_instance", ",", "input_x_ub", ",", "broadcast_0_local_ub", ",", "block_index", ",", "res", ")", "elif", "ori_shape", "in", "(", "(", "256", ",", "256", ")", ",", "None", ",", "(", "-", "1", ",", "-", "1", ")", ")", ":", "total_elements1", "=", "1", "for", "val", "in", "input_x_shape", ":", "total_elements1", "*=", "val", "blocks", "=", "32", "each_block_element", "=", "total_elements1", "//", "blocks", "with", "tik_instance", ".", "for_range", "(", "0", ",", "blocks", ",", "block_num", "=", "blocks", ")", "as", "block_index", ":", "input_x_ub", "=", "tik_instance", ".", "Tensor", "(", "\"float32\"", ",", "(", "each_block_element", ",", ")", ",", "name", "=", "\"input_x_ub\"", ",", "scope", "=", "tik", ".", "scope_ubuf", ")", "broadcast_0_local_ub", "=", "tik_instance", ".", "Tensor", "(", "\"float32\"", ",", "(", "4096", ",", ")", ",", "name", "=", "\"broadcast_0_local_ub\"", ",", "scope", "=", "tik", ".", "scope_ubuf", ")", "tik_instance", ".", "data_move", "(", "input_x_ub", ",", "input_x", "[", "each_block_element", "*", "block_index", "]", ",", "0", ",", "1", ",", "each_block_element", "//", "8", ",", "0", ",", "0", ")", "repeat_time", "=", "each_block_element", "//", "64", "tik_instance", ".", "vabs", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "repeat_time", ",", "1", ",", "1", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "512", "]", ",", "8", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "256", "]", ",", "4", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "128", "]", ",", "2", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ".", "vmax", "(", "64", ",", "input_x_ub", ",", "input_x_ub", ",", "input_x_ub", "[", "64", "]", ",", "1", ",", "1", ",", "1", ",", "1", ",", "8", ",", "8", ",", "8", ")", "tik_instance", ",", "res", "=", "_update_tik", "(", "tik_instance", ",", "input_x_ub", ",", "broadcast_0_local_ub", ",", "block_index", ",", "res", ")", "else", ":", "raise", "RuntimeError", "(", "\"origin shape %s is not supported\"", "%", "str", "(", "ori_shape", ")", ")", "return", "tik_instance", ",", "res" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py#L100-L143
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/python/debug/cli/debugger_cli_common.py
python
RichTextLines.append
(self, line, font_attr_segs=None)
Append a single line of text. Args: line: (str) The text to be added to the end. font_attr_segs: (list of tuples) Font attribute segments of the appended line.
Append a single line of text.
[ "Append", "a", "single", "line", "of", "text", "." ]
def append(self, line, font_attr_segs=None): """Append a single line of text. Args: line: (str) The text to be added to the end. font_attr_segs: (list of tuples) Font attribute segments of the appended line. """ self._lines.append(line) if font_attr_segs: self._font_attr_segs[len(self._lines) - 1] = font_attr_segs
[ "def", "append", "(", "self", ",", "line", ",", "font_attr_segs", "=", "None", ")", ":", "self", ".", "_lines", ".", "append", "(", "line", ")", "if", "font_attr_segs", ":", "self", ".", "_font_attr_segs", "[", "len", "(", "self", ".", "_lines", ")", "-", "1", "]", "=", "font_attr_segs" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/python/debug/cli/debugger_cli_common.py#L314-L325
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/valgrind/suppressions.py
python
ValgrindStyleSuppression.__str__
(self)
return "{\n %s\n}\n" % "\n ".join(lines)
Stringify.
Stringify.
[ "Stringify", "." ]
def __str__(self): """Stringify.""" lines = [self.description, self.type] + self.stack return "{\n %s\n}\n" % "\n ".join(lines)
[ "def", "__str__", "(", "self", ")", ":", "lines", "=", "[", "self", ".", "description", ",", "self", ".", "type", "]", "+", "self", ".", "stack", "return", "\"{\\n %s\\n}\\n\"", "%", "\"\\n \"", ".", "join", "(", "lines", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/valgrind/suppressions.py#L242-L245
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/numpy/py2/numpy/polynomial/chebyshev.py
python
chebsub
(c1, c2)
return pu.trimseq(ret)
Subtract one Chebyshev series from another. Returns the difference of two Chebyshev series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Chebyshev series coefficients ordered from low to high. Returns ------- out : ndarray Of Chebyshev series coefficients representing their difference. See Also -------- chebadd, chebmulx, chebmul, chebdiv, chebpow Notes ----- Unlike multiplication, division, etc., the difference of two Chebyshev series is a Chebyshev series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial import chebyshev as C >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebsub(c1,c2) array([-2., 0., 2.]) >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) array([ 2., 0., -2.])
Subtract one Chebyshev series from another.
[ "Subtract", "one", "Chebyshev", "series", "from", "another", "." ]
def chebsub(c1, c2): """ Subtract one Chebyshev series from another. Returns the difference of two Chebyshev series `c1` - `c2`. The sequences of coefficients are from lowest order term to highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. Parameters ---------- c1, c2 : array_like 1-D arrays of Chebyshev series coefficients ordered from low to high. Returns ------- out : ndarray Of Chebyshev series coefficients representing their difference. See Also -------- chebadd, chebmulx, chebmul, chebdiv, chebpow Notes ----- Unlike multiplication, division, etc., the difference of two Chebyshev series is a Chebyshev series (without having to "reproject" the result onto the basis set) so subtraction, just like that of "standard" polynomials, is simply "component-wise." Examples -------- >>> from numpy.polynomial import chebyshev as C >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebsub(c1,c2) array([-2., 0., 2.]) >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) array([ 2., 0., -2.]) """ # c1, c2 are trimmed copies [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c1[:c2.size] -= c2 ret = c1 else: c2 = -c2 c2[:c1.size] += c1 ret = c2 return pu.trimseq(ret)
[ "def", "chebsub", "(", "c1", ",", "c2", ")", ":", "# c1, c2 are trimmed copies", "[", "c1", ",", "c2", "]", "=", "pu", ".", "as_series", "(", "[", "c1", ",", "c2", "]", ")", "if", "len", "(", "c1", ")", ">", "len", "(", "c2", ")", ":", "c1", "[", ":", "c2", ".", "size", "]", "-=", "c2", "ret", "=", "c1", "else", ":", "c2", "=", "-", "c2", "c2", "[", ":", "c1", ".", "size", "]", "+=", "c1", "ret", "=", "c2", "return", "pu", ".", "trimseq", "(", "ret", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/numpy/py2/numpy/polynomial/chebyshev.py#L611-L661
KhronosGroup/SPIRV-LLVM
1eb85593f3fe2c39379b9a9b088d51eda4f42b8b
utils/llvm-build/llvmbuild/main.py
python
cmake_quote_string
(value)
return value
cmake_quote_string(value) -> str Return a quoted form of the given value that is suitable for use in CMake language files.
cmake_quote_string(value) -> str
[ "cmake_quote_string", "(", "value", ")", "-", ">", "str" ]
def cmake_quote_string(value): """ cmake_quote_string(value) -> str Return a quoted form of the given value that is suitable for use in CMake language files. """ # Currently, we only handle escaping backslashes. value = value.replace("\\", "\\\\") return value
[ "def", "cmake_quote_string", "(", "value", ")", ":", "# Currently, we only handle escaping backslashes.", "value", "=", "value", ".", "replace", "(", "\"\\\\\"", ",", "\"\\\\\\\\\"", ")", "return", "value" ]
https://github.com/KhronosGroup/SPIRV-LLVM/blob/1eb85593f3fe2c39379b9a9b088d51eda4f42b8b/utils/llvm-build/llvmbuild/main.py#L13-L24
OpenXRay/xray-15
1390dfb08ed20997d7e8c95147ea8e8cb71f5e86
cs/sdk/3d_sdk/maya/ver-2008/devkit/plug-ins/scripted/motionTraceCmd.py
python
motionTrace.__jMakeCurve
(self, cvs)
Make a degree 1 curve from the given CVs. Note that in Python, a double underscore in front of a member name make the method "private" to the class through name-mangling
Make a degree 1 curve from the given CVs. Note that in Python, a double underscore in front of a member name make the method "private" to the class through name-mangling
[ "Make", "a", "degree", "1", "curve", "from", "the", "given", "CVs", ".", "Note", "that", "in", "Python", "a", "double", "underscore", "in", "front", "of", "a", "member", "name", "make", "the", "method", "private", "to", "the", "class", "through", "name", "-", "mangling" ]
def __jMakeCurve(self, cvs): """ Make a degree 1 curve from the given CVs. Note that in Python, a double underscore in front of a member name make the method "private" to the class through name-mangling """ deg = 1 knots = OpenMaya.MDoubleArray() for i in range(cvs.length()): knots.append(i) # Now create the curve nullObj = OpenMaya.MObject() curveFn = OpenMaya.MFnNurbsCurve() curveFn.create(cvs, knots, deg, OpenMaya.MFnNurbsCurve.kOpen, False, False, nullObj)
[ "def", "__jMakeCurve", "(", "self", ",", "cvs", ")", ":", "deg", "=", "1", "knots", "=", "OpenMaya", ".", "MDoubleArray", "(", ")", "for", "i", "in", "range", "(", "cvs", ".", "length", "(", ")", ")", ":", "knots", ".", "append", "(", "i", ")", "# Now create the curve", "nullObj", "=", "OpenMaya", ".", "MObject", "(", ")", "curveFn", "=", "OpenMaya", ".", "MFnNurbsCurve", "(", ")", "curveFn", ".", "create", "(", "cvs", ",", "knots", ",", "deg", ",", "OpenMaya", ".", "MFnNurbsCurve", ".", "kOpen", ",", "False", ",", "False", ",", "nullObj", ")" ]
https://github.com/OpenXRay/xray-15/blob/1390dfb08ed20997d7e8c95147ea8e8cb71f5e86/cs/sdk/3d_sdk/maya/ver-2008/devkit/plug-ins/scripted/motionTraceCmd.py#L173-L189
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py
python
ParserElement.setResultsName
( self, name, listAllMatches=False )
return newself
Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names.
[ "Define", "name", "for", "referencing", "matching", "tokens", "as", "a", "nested", "attribute", "of", "the", "returned", "parse", "results", ".", "NOTE", ":", "this", "returns", "a", "*", "copy", "*", "of", "the", "original", "C", "{", "ParserElement", "}", "object", ";", "this", "is", "so", "that", "the", "client", "can", "define", "a", "basic", "element", "such", "as", "an", "integer", "and", "reference", "it", "in", "multiple", "places", "with", "different", "names", "." ]
def setResultsName( self, name, listAllMatches=False ): """ Define name for referencing matching tokens as a nested attribute of the returned parse results. NOTE: this returns a *copy* of the original C{ParserElement} object; this is so that the client can define a basic element, such as an integer, and reference it in multiple places with different names. You can also set results names using the abbreviated syntax, C{expr("name")} in place of C{expr.setResultsName("name")} - see L{I{__call__}<__call__>}. Example:: date_str = (integer.setResultsName("year") + '/' + integer.setResultsName("month") + '/' + integer.setResultsName("day")) # equivalent form: date_str = integer("year") + '/' + integer("month") + '/' + integer("day") """ newself = self.copy() if name.endswith("*"): name = name[:-1] listAllMatches=True newself.resultsName = name newself.modalResults = not listAllMatches return newself
[ "def", "setResultsName", "(", "self", ",", "name", ",", "listAllMatches", "=", "False", ")", ":", "newself", "=", "self", ".", "copy", "(", ")", "if", "name", ".", "endswith", "(", "\"*\"", ")", ":", "name", "=", "name", "[", ":", "-", "1", "]", "listAllMatches", "=", "True", "newself", ".", "resultsName", "=", "name", "newself", ".", "modalResults", "=", "not", "listAllMatches", "return", "newself" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py#L1181-L1207
BitMEX/api-connectors
37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812
auto-generated/python/swagger_client/models/error.py
python
Error.__init__
(self, error=None)
Error - a model defined in Swagger
Error - a model defined in Swagger
[ "Error", "-", "a", "model", "defined", "in", "Swagger" ]
def __init__(self, error=None): # noqa: E501 """Error - a model defined in Swagger""" # noqa: E501 self._error = None self.discriminator = None self.error = error
[ "def", "__init__", "(", "self", ",", "error", "=", "None", ")", ":", "# noqa: E501", "# noqa: E501", "self", ".", "_error", "=", "None", "self", ".", "discriminator", "=", "None", "self", ".", "error", "=", "error" ]
https://github.com/BitMEX/api-connectors/blob/37a3a5b806ad5d0e0fc975ab86d9ed43c3bcd812/auto-generated/python/swagger_client/models/error.py#L41-L47
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/python/ops/linalg/linear_operator.py
python
LinearOperator.log_abs_determinant
(self, name="log_abs_det")
Log absolute value of determinant for every batch member. Args: name: A name for this `Op. Returns: `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. Raises: NotImplementedError: If `self.is_square` is `False`.
Log absolute value of determinant for every batch member.
[ "Log", "absolute", "value", "of", "determinant", "for", "every", "batch", "member", "." ]
def log_abs_determinant(self, name="log_abs_det"): """Log absolute value of determinant for every batch member. Args: name: A name for this `Op. Returns: `Tensor` with shape `self.batch_shape` and same `dtype` as `self`. Raises: NotImplementedError: If `self.is_square` is `False`. """ if self.is_square is False: raise NotImplementedError( "Determinant not implemented for an operator that is expected to " "not be square.") with self._name_scope(name): return self._log_abs_determinant()
[ "def", "log_abs_determinant", "(", "self", ",", "name", "=", "\"log_abs_det\"", ")", ":", "if", "self", ".", "is_square", "is", "False", ":", "raise", "NotImplementedError", "(", "\"Determinant not implemented for an operator that is expected to \"", "\"not be square.\"", ")", "with", "self", ".", "_name_scope", "(", "name", ")", ":", "return", "self", ".", "_log_abs_determinant", "(", ")" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/linalg/linear_operator.py#L699-L716
PX4/PX4-Autopilot
0b9f60a0370be53d683352c63fd92db3d6586e18
src/lib/mixer/MultirotorMixer/geometries/tools/px_generate_mixers.py
python
geometry_to_thrust_matrix
(geometry)
return At
Compute thrust matrix At from geometry dictionnary At is a 3xN matrix where N is the number of rotors Each column is the thrust generated by one rotor
Compute thrust matrix At from geometry dictionnary At is a 3xN matrix where N is the number of rotors Each column is the thrust generated by one rotor
[ "Compute", "thrust", "matrix", "At", "from", "geometry", "dictionnary", "At", "is", "a", "3xN", "matrix", "where", "N", "is", "the", "number", "of", "rotors", "Each", "column", "is", "the", "thrust", "generated", "by", "one", "rotor" ]
def geometry_to_thrust_matrix(geometry): ''' Compute thrust matrix At from geometry dictionnary At is a 3xN matrix where N is the number of rotors Each column is the thrust generated by one rotor ''' At = thrust_matrix(axis=np.array([rotor['axis'] for rotor in geometry['rotors']]), Ct=np.array([[rotor['Ct']] for rotor in geometry['rotors']])).T return At
[ "def", "geometry_to_thrust_matrix", "(", "geometry", ")", ":", "At", "=", "thrust_matrix", "(", "axis", "=", "np", ".", "array", "(", "[", "rotor", "[", "'axis'", "]", "for", "rotor", "in", "geometry", "[", "'rotors'", "]", "]", ")", ",", "Ct", "=", "np", ".", "array", "(", "[", "[", "rotor", "[", "'Ct'", "]", "]", "for", "rotor", "in", "geometry", "[", "'rotors'", "]", "]", ")", ")", ".", "T", "return", "At" ]
https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/src/lib/mixer/MultirotorMixer/geometries/tools/px_generate_mixers.py#L165-L174
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/tools/Editra/src/ed_style.py
python
StyleItem.GetSize
(self)
return self.size
Returns the value of the size attribute as a string @return: style items font size attribute
Returns the value of the size attribute as a string @return: style items font size attribute
[ "Returns", "the", "value", "of", "the", "size", "attribute", "as", "a", "string", "@return", ":", "style", "items", "font", "size", "attribute" ]
def GetSize(self): """Returns the value of the size attribute as a string @return: style items font size attribute """ return self.size
[ "def", "GetSize", "(", "self", ")", ":", "return", "self", ".", "size" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/tools/Editra/src/ed_style.py#L171-L176
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/propgrid.py
python
PropertyGrid.IsEditorFocused
(*args, **kwargs)
return _propgrid.PropertyGrid_IsEditorFocused(*args, **kwargs)
IsEditorFocused(self) -> bool
IsEditorFocused(self) -> bool
[ "IsEditorFocused", "(", "self", ")", "-", ">", "bool" ]
def IsEditorFocused(*args, **kwargs): """IsEditorFocused(self) -> bool""" return _propgrid.PropertyGrid_IsEditorFocused(*args, **kwargs)
[ "def", "IsEditorFocused", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_propgrid", ".", "PropertyGrid_IsEditorFocused", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/propgrid.py#L2154-L2156
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pyclbr.py
python
_readmodule
(module, path, inpackage=None)
return _create_tree(fullmodule, path, fname, source, tree, inpackage)
Do the hard work for readmodule[_ex]. If inpackage is given, it must be the dotted name of the package in which we are searching for a submodule, and then PATH must be the package search path; otherwise, we are searching for a top-level module, and path is combined with sys.path.
Do the hard work for readmodule[_ex].
[ "Do", "the", "hard", "work", "for", "readmodule", "[", "_ex", "]", "." ]
def _readmodule(module, path, inpackage=None): """Do the hard work for readmodule[_ex]. If inpackage is given, it must be the dotted name of the package in which we are searching for a submodule, and then PATH must be the package search path; otherwise, we are searching for a top-level module, and path is combined with sys.path. """ # Compute the full module name (prepending inpackage if set). if inpackage is not None: fullmodule = "%s.%s" % (inpackage, module) else: fullmodule = module # Check in the cache. if fullmodule in _modules: return _modules[fullmodule] # Initialize the dict for this module's contents. tree = {} # Check if it is a built-in module; we don't do much for these. if module in sys.builtin_module_names and inpackage is None: _modules[module] = tree return tree # Check for a dotted module name. i = module.rfind('.') if i >= 0: package = module[:i] submodule = module[i+1:] parent = _readmodule(package, path, inpackage) if inpackage is not None: package = "%s.%s" % (inpackage, package) if not '__path__' in parent: raise ImportError('No package named {}'.format(package)) return _readmodule(submodule, parent['__path__'], package) # Search the path for the module. f = None if inpackage is not None: search_path = path else: search_path = path + sys.path spec = importlib.util._find_spec_from_path(fullmodule, search_path) _modules[fullmodule] = tree # Is module a package? if spec.submodule_search_locations is not None: tree['__path__'] = spec.submodule_search_locations try: source = spec.loader.get_source(fullmodule) if source is None: return tree except (AttributeError, ImportError): # If module is not Python source, we cannot do anything. return tree fname = spec.loader.get_filename(fullmodule) return _create_tree(fullmodule, path, fname, source, tree, inpackage)
[ "def", "_readmodule", "(", "module", ",", "path", ",", "inpackage", "=", "None", ")", ":", "# Compute the full module name (prepending inpackage if set).", "if", "inpackage", "is", "not", "None", ":", "fullmodule", "=", "\"%s.%s\"", "%", "(", "inpackage", ",", "module", ")", "else", ":", "fullmodule", "=", "module", "# Check in the cache.", "if", "fullmodule", "in", "_modules", ":", "return", "_modules", "[", "fullmodule", "]", "# Initialize the dict for this module's contents.", "tree", "=", "{", "}", "# Check if it is a built-in module; we don't do much for these.", "if", "module", "in", "sys", ".", "builtin_module_names", "and", "inpackage", "is", "None", ":", "_modules", "[", "module", "]", "=", "tree", "return", "tree", "# Check for a dotted module name.", "i", "=", "module", ".", "rfind", "(", "'.'", ")", "if", "i", ">=", "0", ":", "package", "=", "module", "[", ":", "i", "]", "submodule", "=", "module", "[", "i", "+", "1", ":", "]", "parent", "=", "_readmodule", "(", "package", ",", "path", ",", "inpackage", ")", "if", "inpackage", "is", "not", "None", ":", "package", "=", "\"%s.%s\"", "%", "(", "inpackage", ",", "package", ")", "if", "not", "'__path__'", "in", "parent", ":", "raise", "ImportError", "(", "'No package named {}'", ".", "format", "(", "package", ")", ")", "return", "_readmodule", "(", "submodule", ",", "parent", "[", "'__path__'", "]", ",", "package", ")", "# Search the path for the module.", "f", "=", "None", "if", "inpackage", "is", "not", "None", ":", "search_path", "=", "path", "else", ":", "search_path", "=", "path", "+", "sys", ".", "path", "spec", "=", "importlib", ".", "util", ".", "_find_spec_from_path", "(", "fullmodule", ",", "search_path", ")", "_modules", "[", "fullmodule", "]", "=", "tree", "# Is module a package?", "if", "spec", ".", "submodule_search_locations", "is", "not", "None", ":", "tree", "[", "'__path__'", "]", "=", "spec", ".", "submodule_search_locations", "try", ":", "source", "=", "spec", ".", "loader", ".", "get_source", "(", "fullmodule", ")", "if", "source", "is", "None", ":", "return", "tree", "except", "(", "AttributeError", ",", "ImportError", ")", ":", "# If module is not Python source, we cannot do anything.", "return", "tree", "fname", "=", "spec", ".", "loader", ".", "get_filename", "(", "fullmodule", ")", "return", "_create_tree", "(", "fullmodule", ",", "path", ",", "fname", ",", "source", ",", "tree", ",", "inpackage", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pyclbr.py#L118-L176
polyworld/polyworld
eb7e6bbc82fe77ba79e3bc48c3da2ad8c8238c26
scripts/agent/reader.py
python
BackwardsReader.__init__
(self, file, blksize=4096)
initialize the internal structures
initialize the internal structures
[ "initialize", "the", "internal", "structures" ]
def __init__(self, file, blksize=4096): """initialize the internal structures""" # get the file size self.size = os.stat(file)[6] # how big of a block to read from the file... self.blksize = blksize # how many blocks we've read self.blkcount = 1 self.f = open(file, 'rb') # if the file is smaller than the blocksize, read a block, # otherwise, read the whole thing... if self.size > self.blksize: self.f.seek(-self.blksize * self.blkcount, 2) # read from end of file self.data = string.split(self.f.read(self.blksize), '\n') # strip the last item if it's empty... a byproduct of the last line having # a newline at the end of it if not self.data[-1]: # self.data.pop() self.data = self.data[:-1]
[ "def", "__init__", "(", "self", ",", "file", ",", "blksize", "=", "4096", ")", ":", "# get the file size", "self", ".", "size", "=", "os", ".", "stat", "(", "file", ")", "[", "6", "]", "# how big of a block to read from the file...", "self", ".", "blksize", "=", "blksize", "# how many blocks we've read", "self", ".", "blkcount", "=", "1", "self", ".", "f", "=", "open", "(", "file", ",", "'rb'", ")", "# if the file is smaller than the blocksize, read a block,", "# otherwise, read the whole thing...", "if", "self", ".", "size", ">", "self", ".", "blksize", ":", "self", ".", "f", ".", "seek", "(", "-", "self", ".", "blksize", "*", "self", ".", "blkcount", ",", "2", ")", "# read from end of file", "self", ".", "data", "=", "string", ".", "split", "(", "self", ".", "f", ".", "read", "(", "self", ".", "blksize", ")", ",", "'\\n'", ")", "# strip the last item if it's empty... a byproduct of the last line having", "# a newline at the end of it", "if", "not", "self", ".", "data", "[", "-", "1", "]", ":", "# self.data.pop()", "self", ".", "data", "=", "self", ".", "data", "[", ":", "-", "1", "]" ]
https://github.com/polyworld/polyworld/blob/eb7e6bbc82fe77ba79e3bc48c3da2ad8c8238c26/scripts/agent/reader.py#L33-L51
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/ipython/py3/IPython/core/magics/execution.py
python
TimeitTemplateFiller.visit_FunctionDef
(self, node)
return node
Fill in the setup statement
Fill in the setup statement
[ "Fill", "in", "the", "setup", "statement" ]
def visit_FunctionDef(self, node): "Fill in the setup statement" self.generic_visit(node) if node.name == "inner": node.body[:1] = self.ast_setup.body return node
[ "def", "visit_FunctionDef", "(", "self", ",", "node", ")", ":", "self", ".", "generic_visit", "(", "node", ")", "if", "node", ".", "name", "==", "\"inner\"", ":", "node", ".", "body", "[", ":", "1", "]", "=", "self", ".", "ast_setup", ".", "body", "return", "node" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/core/magics/execution.py#L133-L139
scribusproject/scribus
41ec7c775a060912cf251682a8b1437f753f80f4
scribus/plugins/scriptplugin/scripts/Ligatursatz.py
python
GermanLigatureSupport.simple_case_fold_for_lookup
(self, my_unicode_string)
return my_unicode_string.lower().replace("ſ", "s")
Before applying the hyphenation algorithm to a string, some “folding” has to be done. The german word “auffallend” has the ligature ["auf", "fallend"]. If it is the first word of a sentence, than it is written with capital letter “Auffallend”. The “case” (the fact that a letter is a small letter or a capital letter) does not matter. You can read more about this topic in the Unicode standard: 3.13 Default Case Algorithms → Caseless matching The pattern uses lower case. So we have to map all upper case letters in a string to lower case letters before applying the hyphenation algorithm. Unicode describes “full case folding” and “simple case folding”. “full case folding” converts for example both lowercase ß and uppercase ẞ to ss: it maps one original character to two substitution characters. “simple case folding” leaves lowercase ß as is, and converts uppercase ẞ to lowercase ß. I think the only relevant application of this “one-to-many mapping” for the german language is the sharp s. As the patter is generated for both (normal german with ß and swiss german without ß but with ss), this “one-to-many folding” is not necessary. A simple toLowercase() with additional mapping of the lowercase long s (ſ) to the lowercase normal s should be enough. Preconditions: my_unicode_string is of type “unicode”. Postconditions: Returns a “unicode” value that corresponds to my_unicode_string, but has mapped uppercase characters to lowercase characters – or at least these that are important for our patterns. The mapping is guaranteed to be a one-to-one mapping of Unicode Scalar Values. That means that one Unicode Scalar Value is replaced by exactly one other Unicode Scalar Value. So the count of Unicode Scalar Values of the return value is equal to the count of Unicode Scalar Values of my_unicode_string. (Note that the count of code units might change between input and output if you do not use UTF32.) WARNING This function must be kept in synch with isWordCharacter().
Before applying the hyphenation algorithm to a string, some “folding” has to be done. The german word “auffallend” has the ligature ["auf", "fallend"]. If it is the first word of a sentence, than it is written with capital letter “Auffallend”. The “case” (the fact that a letter is a small letter or a capital letter) does not matter. You can read more about this topic in the Unicode standard: 3.13 Default Case Algorithms → Caseless matching The pattern uses lower case. So we have to map all upper case letters in a string to lower case letters before applying the hyphenation algorithm. Unicode describes “full case folding” and “simple case folding”. “full case folding” converts for example both lowercase ß and uppercase ẞ to ss: it maps one original character to two substitution characters. “simple case folding” leaves lowercase ß as is, and converts uppercase ẞ to lowercase ß. I think the only relevant application of this “one-to-many mapping” for the german language is the sharp s. As the patter is generated for both (normal german with ß and swiss german without ß but with ss), this “one-to-many folding” is not necessary. A simple toLowercase() with additional mapping of the lowercase long s (ſ) to the lowercase normal s should be enough.
[ "Before", "applying", "the", "hyphenation", "algorithm", "to", "a", "string", "some", "“folding”", "has", "to", "be", "done", ".", "The", "german", "word", "“auffallend”", "has", "the", "ligature", "[", "auf", "fallend", "]", ".", "If", "it", "is", "the", "first", "word", "of", "a", "sentence", "than", "it", "is", "written", "with", "capital", "letter", "“Auffallend”", ".", "The", "“case”", "(", "the", "fact", "that", "a", "letter", "is", "a", "small", "letter", "or", "a", "capital", "letter", ")", "does", "not", "matter", ".", "You", "can", "read", "more", "about", "this", "topic", "in", "the", "Unicode", "standard", ":", "3", ".", "13", "Default", "Case", "Algorithms", "→", "Caseless", "matching", "The", "pattern", "uses", "lower", "case", ".", "So", "we", "have", "to", "map", "all", "upper", "case", "letters", "in", "a", "string", "to", "lower", "case", "letters", "before", "applying", "the", "hyphenation", "algorithm", ".", "Unicode", "describes", "“full", "case", "folding”", "and", "“simple", "case", "folding”", ".", "“full", "case", "folding”", "converts", "for", "example", "both", "lowercase", "ß", "and", "uppercase", "ẞ", "to", "ss", ":", "it", "maps", "one", "original", "character", "to", "two", "substitution", "characters", ".", "“simple", "case", "folding”", "leaves", "lowercase", "ß", "as", "is", "and", "converts", "uppercase", "ẞ", "to", "lowercase", "ß", ".", "I", "think", "the", "only", "relevant", "application", "of", "this", "“one", "-", "to", "-", "many", "mapping”", "for", "the", "german", "language", "is", "the", "sharp", "s", ".", "As", "the", "patter", "is", "generated", "for", "both", "(", "normal", "german", "with", "ß", "and", "swiss", "german", "without", "ß", "but", "with", "ss", ")", "this", "“one", "-", "to", "-", "many", "folding”", "is", "not", "necessary", ".", "A", "simple", "toLowercase", "()", "with", "additional", "mapping", "of", "the", "lowercase", "long", "s", "(", "ſ", ")", "to", "the", "lowercase", "normal", "s", "should", "be", "enough", "." ]
def simple_case_fold_for_lookup(self, my_unicode_string): """Before applying the hyphenation algorithm to a string, some “folding” has to be done. The german word “auffallend” has the ligature ["auf", "fallend"]. If it is the first word of a sentence, than it is written with capital letter “Auffallend”. The “case” (the fact that a letter is a small letter or a capital letter) does not matter. You can read more about this topic in the Unicode standard: 3.13 Default Case Algorithms → Caseless matching The pattern uses lower case. So we have to map all upper case letters in a string to lower case letters before applying the hyphenation algorithm. Unicode describes “full case folding” and “simple case folding”. “full case folding” converts for example both lowercase ß and uppercase ẞ to ss: it maps one original character to two substitution characters. “simple case folding” leaves lowercase ß as is, and converts uppercase ẞ to lowercase ß. I think the only relevant application of this “one-to-many mapping” for the german language is the sharp s. As the patter is generated for both (normal german with ß and swiss german without ß but with ss), this “one-to-many folding” is not necessary. A simple toLowercase() with additional mapping of the lowercase long s (ſ) to the lowercase normal s should be enough. Preconditions: my_unicode_string is of type “unicode”. Postconditions: Returns a “unicode” value that corresponds to my_unicode_string, but has mapped uppercase characters to lowercase characters – or at least these that are important for our patterns. The mapping is guaranteed to be a one-to-one mapping of Unicode Scalar Values. That means that one Unicode Scalar Value is replaced by exactly one other Unicode Scalar Value. So the count of Unicode Scalar Values of the return value is equal to the count of Unicode Scalar Values of my_unicode_string. (Note that the count of code units might change between input and output if you do not use UTF32.) WARNING This function must be kept in synch with isWordCharacter(). """ if type(my_unicode_string) is not str: raise TypeError("The “my_unicode_string” parameter must be of " "type “unicode”, but it isn’t.") return my_unicode_string.lower().replace("ſ", "s")
[ "def", "simple_case_fold_for_lookup", "(", "self", ",", "my_unicode_string", ")", ":", "if", "type", "(", "my_unicode_string", ")", "is", "not", "str", ":", "raise", "TypeError", "(", "\"The “my_unicode_string” parameter must be of \"", "\"type “unicode”, but it isn’t.\")", "", "return", "my_unicode_string", ".", "lower", "(", ")", ".", "replace", "(", "\"ſ\",", " ", "s\")", "" ]
https://github.com/scribusproject/scribus/blob/41ec7c775a060912cf251682a8b1437f753f80f4/scribus/plugins/scriptplugin/scripts/Ligatursatz.py#L243-L295
FreeCAD/FreeCAD
ba42231b9c6889b89e064d6d563448ed81e376ec
src/Mod/Start/StartPage/StartPage.py
python
postStart
()
executes needed operations after loading a file
executes needed operations after loading a file
[ "executes", "needed", "operations", "after", "loading", "a", "file" ]
def postStart(): "executes needed operations after loading a file" param = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Start") # switch workbench wb = param.GetString("AutoloadModule","") if "$LastModule" == wb: wb = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/General").GetString("LastModule","") if wb: # don't switch workbenches if we are not in Start anymore if FreeCADGui.activeWorkbench() and (FreeCADGui.activeWorkbench().name() == "StartWorkbench"): FreeCADGui.activateWorkbench(wb) # close start tab cl = param.GetBool("closeStart",False) if cl: title = QtGui.QApplication.translate("Workbench","Start page") mw = FreeCADGui.getMainWindow() if mw: mdi = mw.findChild(QtGui.QMdiArea) if mdi: for mdichild in mdi.children(): for subw in mdichild.findChildren(QtGui.QMdiSubWindow): if subw.windowTitle() == title: subw.close()
[ "def", "postStart", "(", ")", ":", "param", "=", "FreeCAD", ".", "ParamGet", "(", "\"User parameter:BaseApp/Preferences/Mod/Start\"", ")", "# switch workbench", "wb", "=", "param", ".", "GetString", "(", "\"AutoloadModule\"", ",", "\"\"", ")", "if", "\"$LastModule\"", "==", "wb", ":", "wb", "=", "FreeCAD", ".", "ParamGet", "(", "\"User parameter:BaseApp/Preferences/General\"", ")", ".", "GetString", "(", "\"LastModule\"", ",", "\"\"", ")", "if", "wb", ":", "# don't switch workbenches if we are not in Start anymore", "if", "FreeCADGui", ".", "activeWorkbench", "(", ")", "and", "(", "FreeCADGui", ".", "activeWorkbench", "(", ")", ".", "name", "(", ")", "==", "\"StartWorkbench\"", ")", ":", "FreeCADGui", ".", "activateWorkbench", "(", "wb", ")", "# close start tab", "cl", "=", "param", ".", "GetBool", "(", "\"closeStart\"", ",", "False", ")", "if", "cl", ":", "title", "=", "QtGui", ".", "QApplication", ".", "translate", "(", "\"Workbench\"", ",", "\"Start page\"", ")", "mw", "=", "FreeCADGui", ".", "getMainWindow", "(", ")", "if", "mw", ":", "mdi", "=", "mw", ".", "findChild", "(", "QtGui", ".", "QMdiArea", ")", "if", "mdi", ":", "for", "mdichild", "in", "mdi", ".", "children", "(", ")", ":", "for", "subw", "in", "mdichild", ".", "findChildren", "(", "QtGui", ".", "QMdiSubWindow", ")", ":", "if", "subw", ".", "windowTitle", "(", ")", "==", "title", ":", "subw", ".", "close", "(", ")" ]
https://github.com/FreeCAD/FreeCAD/blob/ba42231b9c6889b89e064d6d563448ed81e376ec/src/Mod/Start/StartPage/StartPage.py#L603-L629
baidu/AnyQ
d94d450d2aaa5f7ed73424b10aa4539835b97527
tools/simnet/train/paddle/util/data_reader.py
python
get_reader
(conf_dict, is_infer, samples_file)
Get Reader
Get Reader
[ "Get", "Reader" ]
def get_reader(conf_dict, is_infer, samples_file): """ Get Reader """ def reader_with_pairwise(): """ Reader with Pairwise """ if is_infer: with open(conf_dict["test_file_path"]) as file: for line in file: if not utils.pattern_match(r"(\d+)\t(\d+)\t((\d+ )*\d+)\t((\d+ )*\d+)\n", line): logging.warning("line not match format in test file") continue items = line.strip("\n").split("\t") query = [int(id) for id in items[2].split(" ")] title = [int(id) for id in items[3].split(" ")] if samples_file: samples_file.write(line) yield [query, title] else: with open(conf_dict["train_file_path"]) as file: for line in file: if not utils.pattern_match(r"((\d+ )*\d+)\t((\d+ )*\d+)\t((\d+ )*\d+)\n", line): logging.warning("line not match format in train file") continue items = line.strip("\n").split("\t") query = [int(id) for id in items[0].split(" ")] pos_title = [int(id) for id in items[1].split(" ")] neg_title = [int(id) for id in items[2].split(" ")] if samples_file: samples_file.write(line) yield [query, pos_title, neg_title] def reader_with_pointwise(): """ Reader with Pointwise """ if is_infer: with open(conf_dict["test_file_path"]) as file: for line in file: if not utils.pattern_match(r"((\d+ )*\d+)\t((\d+ )*\d+)\t(\d+)\n", line): logging.warning("line not match format in test file") continue items = line.strip("\n").split("\t") query = [int(id) for id in items[0].split(" ")] title = [int(id) for id in items[1].split(" ")] if samples_file: samples_file.write(line) yield [query, title] else: with open(conf_dict["train_file_path"]) as file: for line in file: if not utils.pattern_match(r"((\d+ )*\d+)\t((\d+ )*\d+)\t(\d+)\n", line): logging.warning("line not match format in train file: %s" % line) continue items = line.strip("\n").split("\t") query = [int(id) for id in items[0].split(" ")] title = [int(id) for id in items[1].split(" ")] label = int(items[2]) if samples_file: samples_file.write(line) yield [query, title, label] if conf_dict["task_mode"] == "pairwise": return reader_with_pairwise else: return reader_with_pointwise
[ "def", "get_reader", "(", "conf_dict", ",", "is_infer", ",", "samples_file", ")", ":", "def", "reader_with_pairwise", "(", ")", ":", "\"\"\"\n Reader with Pairwise\n \"\"\"", "if", "is_infer", ":", "with", "open", "(", "conf_dict", "[", "\"test_file_path\"", "]", ")", "as", "file", ":", "for", "line", "in", "file", ":", "if", "not", "utils", ".", "pattern_match", "(", "r\"(\\d+)\\t(\\d+)\\t((\\d+ )*\\d+)\\t((\\d+ )*\\d+)\\n\"", ",", "line", ")", ":", "logging", ".", "warning", "(", "\"line not match format in test file\"", ")", "continue", "items", "=", "line", ".", "strip", "(", "\"\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "query", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "2", "]", ".", "split", "(", "\" \"", ")", "]", "title", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "3", "]", ".", "split", "(", "\" \"", ")", "]", "if", "samples_file", ":", "samples_file", ".", "write", "(", "line", ")", "yield", "[", "query", ",", "title", "]", "else", ":", "with", "open", "(", "conf_dict", "[", "\"train_file_path\"", "]", ")", "as", "file", ":", "for", "line", "in", "file", ":", "if", "not", "utils", ".", "pattern_match", "(", "r\"((\\d+ )*\\d+)\\t((\\d+ )*\\d+)\\t((\\d+ )*\\d+)\\n\"", ",", "line", ")", ":", "logging", ".", "warning", "(", "\"line not match format in train file\"", ")", "continue", "items", "=", "line", ".", "strip", "(", "\"\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "query", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "0", "]", ".", "split", "(", "\" \"", ")", "]", "pos_title", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "1", "]", ".", "split", "(", "\" \"", ")", "]", "neg_title", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "2", "]", ".", "split", "(", "\" \"", ")", "]", "if", "samples_file", ":", "samples_file", ".", "write", "(", "line", ")", "yield", "[", "query", ",", "pos_title", ",", "neg_title", "]", "def", "reader_with_pointwise", "(", ")", ":", "\"\"\"\n Reader with Pointwise\n \"\"\"", "if", "is_infer", ":", "with", "open", "(", "conf_dict", "[", "\"test_file_path\"", "]", ")", "as", "file", ":", "for", "line", "in", "file", ":", "if", "not", "utils", ".", "pattern_match", "(", "r\"((\\d+ )*\\d+)\\t((\\d+ )*\\d+)\\t(\\d+)\\n\"", ",", "line", ")", ":", "logging", ".", "warning", "(", "\"line not match format in test file\"", ")", "continue", "items", "=", "line", ".", "strip", "(", "\"\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "query", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "0", "]", ".", "split", "(", "\" \"", ")", "]", "title", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "1", "]", ".", "split", "(", "\" \"", ")", "]", "if", "samples_file", ":", "samples_file", ".", "write", "(", "line", ")", "yield", "[", "query", ",", "title", "]", "else", ":", "with", "open", "(", "conf_dict", "[", "\"train_file_path\"", "]", ")", "as", "file", ":", "for", "line", "in", "file", ":", "if", "not", "utils", ".", "pattern_match", "(", "r\"((\\d+ )*\\d+)\\t((\\d+ )*\\d+)\\t(\\d+)\\n\"", ",", "line", ")", ":", "logging", ".", "warning", "(", "\"line not match format in train file: %s\"", "%", "line", ")", "continue", "items", "=", "line", ".", "strip", "(", "\"\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "query", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "0", "]", ".", "split", "(", "\" \"", ")", "]", "title", "=", "[", "int", "(", "id", ")", "for", "id", "in", "items", "[", "1", "]", ".", "split", "(", "\" \"", ")", "]", "label", "=", "int", "(", "items", "[", "2", "]", ")", "if", "samples_file", ":", "samples_file", ".", "write", "(", "line", ")", "yield", "[", "query", ",", "title", ",", "label", "]", "if", "conf_dict", "[", "\"task_mode\"", "]", "==", "\"pairwise\"", ":", "return", "reader_with_pairwise", "else", ":", "return", "reader_with_pointwise" ]
https://github.com/baidu/AnyQ/blob/d94d450d2aaa5f7ed73424b10aa4539835b97527/tools/simnet/train/paddle/util/data_reader.py#L26-L93
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/Jinja2/py2/jinja2/ext.py
python
InternationalizationExtension.parse
(self, parser)
Parse a translatable tag.
Parse a translatable tag.
[ "Parse", "a", "translatable", "tag", "." ]
def parse(self, parser): """Parse a translatable tag.""" lineno = next(parser.stream).lineno num_called_num = False # find all the variables referenced. Additionally a variable can be # defined in the body of the trans block too, but this is checked at # a later state. plural_expr = None plural_expr_assignment = None variables = {} trimmed = None while parser.stream.current.type != "block_end": if variables: parser.stream.expect("comma") # skip colon for python compatibility if parser.stream.skip_if("colon"): break name = parser.stream.expect("name") if name.value in variables: parser.fail( "translatable variable %r defined twice." % name.value, name.lineno, exc=TemplateAssertionError, ) # expressions if parser.stream.current.type == "assign": next(parser.stream) variables[name.value] = var = parser.parse_expression() elif trimmed is None and name.value in ("trimmed", "notrimmed"): trimmed = name.value == "trimmed" continue else: variables[name.value] = var = nodes.Name(name.value, "load") if plural_expr is None: if isinstance(var, nodes.Call): plural_expr = nodes.Name("_trans", "load") variables[name.value] = plural_expr plural_expr_assignment = nodes.Assign( nodes.Name("_trans", "store"), var ) else: plural_expr = var num_called_num = name.value == "num" parser.stream.expect("block_end") plural = None have_plural = False referenced = set() # now parse until endtrans or pluralize singular_names, singular = self._parse_block(parser, True) if singular_names: referenced.update(singular_names) if plural_expr is None: plural_expr = nodes.Name(singular_names[0], "load") num_called_num = singular_names[0] == "num" # if we have a pluralize block, we parse that too if parser.stream.current.test("name:pluralize"): have_plural = True next(parser.stream) if parser.stream.current.type != "block_end": name = parser.stream.expect("name") if name.value not in variables: parser.fail( "unknown variable %r for pluralization" % name.value, name.lineno, exc=TemplateAssertionError, ) plural_expr = variables[name.value] num_called_num = name.value == "num" parser.stream.expect("block_end") plural_names, plural = self._parse_block(parser, False) next(parser.stream) referenced.update(plural_names) else: next(parser.stream) # register free names as simple name expressions for var in referenced: if var not in variables: variables[var] = nodes.Name(var, "load") if not have_plural: plural_expr = None elif plural_expr is None: parser.fail("pluralize without variables", lineno) if trimmed is None: trimmed = self.environment.policies["ext.i18n.trimmed"] if trimmed: singular = self._trim_whitespace(singular) if plural: plural = self._trim_whitespace(plural) node = self._make_node( singular, plural, variables, plural_expr, bool(referenced), num_called_num and have_plural, ) node.set_lineno(lineno) if plural_expr_assignment is not None: return [plural_expr_assignment, node] else: return node
[ "def", "parse", "(", "self", ",", "parser", ")", ":", "lineno", "=", "next", "(", "parser", ".", "stream", ")", ".", "lineno", "num_called_num", "=", "False", "# find all the variables referenced. Additionally a variable can be", "# defined in the body of the trans block too, but this is checked at", "# a later state.", "plural_expr", "=", "None", "plural_expr_assignment", "=", "None", "variables", "=", "{", "}", "trimmed", "=", "None", "while", "parser", ".", "stream", ".", "current", ".", "type", "!=", "\"block_end\"", ":", "if", "variables", ":", "parser", ".", "stream", ".", "expect", "(", "\"comma\"", ")", "# skip colon for python compatibility", "if", "parser", ".", "stream", ".", "skip_if", "(", "\"colon\"", ")", ":", "break", "name", "=", "parser", ".", "stream", ".", "expect", "(", "\"name\"", ")", "if", "name", ".", "value", "in", "variables", ":", "parser", ".", "fail", "(", "\"translatable variable %r defined twice.\"", "%", "name", ".", "value", ",", "name", ".", "lineno", ",", "exc", "=", "TemplateAssertionError", ",", ")", "# expressions", "if", "parser", ".", "stream", ".", "current", ".", "type", "==", "\"assign\"", ":", "next", "(", "parser", ".", "stream", ")", "variables", "[", "name", ".", "value", "]", "=", "var", "=", "parser", ".", "parse_expression", "(", ")", "elif", "trimmed", "is", "None", "and", "name", ".", "value", "in", "(", "\"trimmed\"", ",", "\"notrimmed\"", ")", ":", "trimmed", "=", "name", ".", "value", "==", "\"trimmed\"", "continue", "else", ":", "variables", "[", "name", ".", "value", "]", "=", "var", "=", "nodes", ".", "Name", "(", "name", ".", "value", ",", "\"load\"", ")", "if", "plural_expr", "is", "None", ":", "if", "isinstance", "(", "var", ",", "nodes", ".", "Call", ")", ":", "plural_expr", "=", "nodes", ".", "Name", "(", "\"_trans\"", ",", "\"load\"", ")", "variables", "[", "name", ".", "value", "]", "=", "plural_expr", "plural_expr_assignment", "=", "nodes", ".", "Assign", "(", "nodes", ".", "Name", "(", "\"_trans\"", ",", "\"store\"", ")", ",", "var", ")", "else", ":", "plural_expr", "=", "var", "num_called_num", "=", "name", ".", "value", "==", "\"num\"", "parser", ".", "stream", ".", "expect", "(", "\"block_end\"", ")", "plural", "=", "None", "have_plural", "=", "False", "referenced", "=", "set", "(", ")", "# now parse until endtrans or pluralize", "singular_names", ",", "singular", "=", "self", ".", "_parse_block", "(", "parser", ",", "True", ")", "if", "singular_names", ":", "referenced", ".", "update", "(", "singular_names", ")", "if", "plural_expr", "is", "None", ":", "plural_expr", "=", "nodes", ".", "Name", "(", "singular_names", "[", "0", "]", ",", "\"load\"", ")", "num_called_num", "=", "singular_names", "[", "0", "]", "==", "\"num\"", "# if we have a pluralize block, we parse that too", "if", "parser", ".", "stream", ".", "current", ".", "test", "(", "\"name:pluralize\"", ")", ":", "have_plural", "=", "True", "next", "(", "parser", ".", "stream", ")", "if", "parser", ".", "stream", ".", "current", ".", "type", "!=", "\"block_end\"", ":", "name", "=", "parser", ".", "stream", ".", "expect", "(", "\"name\"", ")", "if", "name", ".", "value", "not", "in", "variables", ":", "parser", ".", "fail", "(", "\"unknown variable %r for pluralization\"", "%", "name", ".", "value", ",", "name", ".", "lineno", ",", "exc", "=", "TemplateAssertionError", ",", ")", "plural_expr", "=", "variables", "[", "name", ".", "value", "]", "num_called_num", "=", "name", ".", "value", "==", "\"num\"", "parser", ".", "stream", ".", "expect", "(", "\"block_end\"", ")", "plural_names", ",", "plural", "=", "self", ".", "_parse_block", "(", "parser", ",", "False", ")", "next", "(", "parser", ".", "stream", ")", "referenced", ".", "update", "(", "plural_names", ")", "else", ":", "next", "(", "parser", ".", "stream", ")", "# register free names as simple name expressions", "for", "var", "in", "referenced", ":", "if", "var", "not", "in", "variables", ":", "variables", "[", "var", "]", "=", "nodes", ".", "Name", "(", "var", ",", "\"load\"", ")", "if", "not", "have_plural", ":", "plural_expr", "=", "None", "elif", "plural_expr", "is", "None", ":", "parser", ".", "fail", "(", "\"pluralize without variables\"", ",", "lineno", ")", "if", "trimmed", "is", "None", ":", "trimmed", "=", "self", ".", "environment", ".", "policies", "[", "\"ext.i18n.trimmed\"", "]", "if", "trimmed", ":", "singular", "=", "self", ".", "_trim_whitespace", "(", "singular", ")", "if", "plural", ":", "plural", "=", "self", ".", "_trim_whitespace", "(", "plural", ")", "node", "=", "self", ".", "_make_node", "(", "singular", ",", "plural", ",", "variables", ",", "plural_expr", ",", "bool", "(", "referenced", ")", ",", "num_called_num", "and", "have_plural", ",", ")", "node", ".", "set_lineno", "(", "lineno", ")", "if", "plural_expr_assignment", "is", "not", "None", ":", "return", "[", "plural_expr_assignment", ",", "node", "]", "else", ":", "return", "node" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/Jinja2/py2/jinja2/ext.py#L229-L342
okex/V3-Open-API-SDK
c5abb0db7e2287718e0055e17e57672ce0ec7fd9
okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/urllib3/contrib/pyopenssl.py
python
extract_from_urllib3
()
Undo monkey-patching by :func:`inject_into_urllib3`.
Undo monkey-patching by :func:`inject_into_urllib3`.
[ "Undo", "monkey", "-", "patching", "by", ":", "func", ":", "inject_into_urllib3", "." ]
def extract_from_urllib3(): 'Undo monkey-patching by :func:`inject_into_urllib3`.' util.ssl_.SSLContext = orig_util_SSLContext util.HAS_SNI = orig_util_HAS_SNI util.ssl_.HAS_SNI = orig_util_HAS_SNI util.IS_PYOPENSSL = False util.ssl_.IS_PYOPENSSL = False
[ "def", "extract_from_urllib3", "(", ")", ":", "util", ".", "ssl_", ".", "SSLContext", "=", "orig_util_SSLContext", "util", ".", "HAS_SNI", "=", "orig_util_HAS_SNI", "util", ".", "ssl_", ".", "HAS_SNI", "=", "orig_util_HAS_SNI", "util", ".", "IS_PYOPENSSL", "=", "False", "util", ".", "ssl_", ".", "IS_PYOPENSSL", "=", "False" ]
https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/urllib3/contrib/pyopenssl.py#L127-L134
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/boosted_trees/python/utils/losses.py
python
per_example_exp_loss
(labels, weights, predictions, name=None, eps=0.1)
return unweighted_loss * weights, control_flow_ops.no_op()
Trimmed exponential loss given labels, example weights and predictions. Note that this is only for binary classification. If logistic loss tries to make sure that the classifier is certain of its predictions, exp loss says: "as long as it got it correct, even barely, i don't care". Can be used on noisy data, or when you don't care about getting the actual probabilities from the model, just the correct label. The loss returns is exp(-targets*modified_predictions), where modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps). Args: labels: Rank 2 (N, D) tensor of per-example labels. weights: Rank 2 (N, 1) tensor of per-example weights. predictions: Rank 2 (N, D) tensor of per-example predictions. name: A name for the operation (optional). eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b. Returns: loss: A Rank 2 (N, 1) tensor of per-example exp loss update_op: An update operation to update the loss's internal state.
Trimmed exponential loss given labels, example weights and predictions.
[ "Trimmed", "exponential", "loss", "given", "labels", "example", "weights", "and", "predictions", "." ]
def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1): """Trimmed exponential loss given labels, example weights and predictions. Note that this is only for binary classification. If logistic loss tries to make sure that the classifier is certain of its predictions, exp loss says: "as long as it got it correct, even barely, i don't care". Can be used on noisy data, or when you don't care about getting the actual probabilities from the model, just the correct label. The loss returns is exp(-targets*modified_predictions), where modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps). Args: labels: Rank 2 (N, D) tensor of per-example labels. weights: Rank 2 (N, 1) tensor of per-example weights. predictions: Rank 2 (N, D) tensor of per-example predictions. name: A name for the operation (optional). eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b. Returns: loss: A Rank 2 (N, 1) tensor of per-example exp loss update_op: An update operation to update the loss's internal state. """ def exp_with_logits(name, eps, labels=None, logits=None): """Computes exponential loss given `logits`. The loss returns is exp(-targets*modified_predictions), where modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps). Args: name: A name for the operation (optional). eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b. labels: A `Tensor` of the same type and shape as `logits`. logits: A `Tensor` of type `float32` or `float64`. Returns: A `Tensor` of the same shape as `logits` with the componentwise exponential losses. Raises: ValueError: If `logits` and `labels` do not have the same shape. """ with ops.name_scope(name, "exp_loss", [logits, labels]) as name: logits = ops.convert_to_tensor(logits, name="logits") labels = ops.convert_to_tensor(labels, name="labels") try: labels.get_shape().merge_with(logits.get_shape()) except ValueError: raise ValueError("logits and labels must have the same shape (%s vs %s)" % (logits.get_shape(), labels.get_shape())) # Default threshold to switch between classes zeros = array_ops.zeros_like(logits, dtype=logits.dtype) ones = array_ops.ones_like(logits, dtype=logits.dtype) neg_ones = -array_ops.ones_like(logits, dtype=logits.dtype) # Convert labels to 1 and -1 cond_labels = (labels > zeros) labels_converted = array_ops.where(cond_labels, ones, neg_ones) # Convert predictions to 1 and -1 # The loss we build is min(1, max(-1,ax+b)) # where a=1/eps, b=-1/2eps. a = 1.0 / eps b = -1.0 / 2 / eps probs = math_ops.sigmoid(logits) y = a * probs + b # Build max(-1, ax+b) cond = (y < -1) max_res = array_ops.where(cond, neg_ones, y) # Build min part cond = (max_res > 1) min_res = array_ops.where(cond, ones, max_res) preds_converted = min_res return math_ops.exp(-preds_converted * labels_converted) labels = math_ops.cast(labels, dtypes.float32) unweighted_loss = exp_with_logits( name=name, eps=eps, labels=labels, logits=predictions) return unweighted_loss * weights, control_flow_ops.no_op()
[ "def", "per_example_exp_loss", "(", "labels", ",", "weights", ",", "predictions", ",", "name", "=", "None", ",", "eps", "=", "0.1", ")", ":", "def", "exp_with_logits", "(", "name", ",", "eps", ",", "labels", "=", "None", ",", "logits", "=", "None", ")", ":", "\"\"\"Computes exponential loss given `logits`.\n\n The loss returns is exp(-targets*modified_predictions), where\n modified_predictions are 1 if sigmoid is >= 0.5+eps (eg we predict positive\n class), -1 if sigmoid < 0.5-eps (e.g. we predict negative class) and ax+b in\n the interval 0.5-eps, 0.5+eps, where a = 1/eps, b=1/(2eps).\n\n Args:\n name: A name for the operation (optional).\n eps: For the range (0.5-eps, 0.5+eps) we set the predictions to be ax+b.\n labels: A `Tensor` of the same type and shape as `logits`.\n logits: A `Tensor` of type `float32` or `float64`.\n\n Returns:\n A `Tensor` of the same shape as `logits` with the componentwise\n exponential losses.\n\n Raises:\n ValueError: If `logits` and `labels` do not have the same shape.\n \"\"\"", "with", "ops", ".", "name_scope", "(", "name", ",", "\"exp_loss\"", ",", "[", "logits", ",", "labels", "]", ")", "as", "name", ":", "logits", "=", "ops", ".", "convert_to_tensor", "(", "logits", ",", "name", "=", "\"logits\"", ")", "labels", "=", "ops", ".", "convert_to_tensor", "(", "labels", ",", "name", "=", "\"labels\"", ")", "try", ":", "labels", ".", "get_shape", "(", ")", ".", "merge_with", "(", "logits", ".", "get_shape", "(", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"logits and labels must have the same shape (%s vs %s)\"", "%", "(", "logits", ".", "get_shape", "(", ")", ",", "labels", ".", "get_shape", "(", ")", ")", ")", "# Default threshold to switch between classes", "zeros", "=", "array_ops", ".", "zeros_like", "(", "logits", ",", "dtype", "=", "logits", ".", "dtype", ")", "ones", "=", "array_ops", ".", "ones_like", "(", "logits", ",", "dtype", "=", "logits", ".", "dtype", ")", "neg_ones", "=", "-", "array_ops", ".", "ones_like", "(", "logits", ",", "dtype", "=", "logits", ".", "dtype", ")", "# Convert labels to 1 and -1", "cond_labels", "=", "(", "labels", ">", "zeros", ")", "labels_converted", "=", "array_ops", ".", "where", "(", "cond_labels", ",", "ones", ",", "neg_ones", ")", "# Convert predictions to 1 and -1", "# The loss we build is min(1, max(-1,ax+b))", "# where a=1/eps, b=-1/2eps.", "a", "=", "1.0", "/", "eps", "b", "=", "-", "1.0", "/", "2", "/", "eps", "probs", "=", "math_ops", ".", "sigmoid", "(", "logits", ")", "y", "=", "a", "*", "probs", "+", "b", "# Build max(-1, ax+b)", "cond", "=", "(", "y", "<", "-", "1", ")", "max_res", "=", "array_ops", ".", "where", "(", "cond", ",", "neg_ones", ",", "y", ")", "# Build min part", "cond", "=", "(", "max_res", ">", "1", ")", "min_res", "=", "array_ops", ".", "where", "(", "cond", ",", "ones", ",", "max_res", ")", "preds_converted", "=", "min_res", "return", "math_ops", ".", "exp", "(", "-", "preds_converted", "*", "labels_converted", ")", "labels", "=", "math_ops", ".", "cast", "(", "labels", ",", "dtypes", ".", "float32", ")", "unweighted_loss", "=", "exp_with_logits", "(", "name", "=", "name", ",", "eps", "=", "eps", ",", "labels", "=", "labels", ",", "logits", "=", "predictions", ")", "return", "unweighted_loss", "*", "weights", ",", "control_flow_ops", ".", "no_op", "(", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/boosted_trees/python/utils/losses.py#L175-L260
ultralight-ux/WebCore
8a83d7f3a7517b75ae7dc18183b4ff9ce898419b
Source/JavaScriptCore/Scripts/jsmin.py
python
jsmin
(js)
return outs.getvalue()
returns a minified version of the javascript string
returns a minified version of the javascript string
[ "returns", "a", "minified", "version", "of", "the", "javascript", "string" ]
def jsmin(js): """ returns a minified version of the javascript string """ if not is_3: if cStringIO and not isinstance(js, unicode): # strings can use cStringIO for a 3x performance # improvement, but unicode (in python2) cannot klass = cStringIO.StringIO else: klass = StringIO.StringIO else: klass = io.StringIO ins = klass(js) outs = klass() JavascriptMinify(ins, outs).minify() return outs.getvalue()
[ "def", "jsmin", "(", "js", ")", ":", "if", "not", "is_3", ":", "if", "cStringIO", "and", "not", "isinstance", "(", "js", ",", "unicode", ")", ":", "# strings can use cStringIO for a 3x performance", "# improvement, but unicode (in python2) cannot", "klass", "=", "cStringIO", ".", "StringIO", "else", ":", "klass", "=", "StringIO", ".", "StringIO", "else", ":", "klass", "=", "io", ".", "StringIO", "ins", "=", "klass", "(", "js", ")", "outs", "=", "klass", "(", ")", "JavascriptMinify", "(", "ins", ",", "outs", ")", ".", "minify", "(", ")", "return", "outs", ".", "getvalue", "(", ")" ]
https://github.com/ultralight-ux/WebCore/blob/8a83d7f3a7517b75ae7dc18183b4ff9ce898419b/Source/JavaScriptCore/Scripts/jsmin.py#L43-L59
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/msw/_core.py
python
NavigationKeyEvent.SetFlags
(*args, **kwargs)
return _core_.NavigationKeyEvent_SetFlags(*args, **kwargs)
SetFlags(self, long flags) Set the navigation flags to a combination of the following: * wx.NavigationKeyEvent.IsBackward * wx.NavigationKeyEvent.IsForward * wx.NavigationKeyEvent.WinChange * wx.NavigationKeyEvent.FromTab
SetFlags(self, long flags)
[ "SetFlags", "(", "self", "long", "flags", ")" ]
def SetFlags(*args, **kwargs): """ SetFlags(self, long flags) Set the navigation flags to a combination of the following: * wx.NavigationKeyEvent.IsBackward * wx.NavigationKeyEvent.IsForward * wx.NavigationKeyEvent.WinChange * wx.NavigationKeyEvent.FromTab """ return _core_.NavigationKeyEvent_SetFlags(*args, **kwargs)
[ "def", "SetFlags", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "NavigationKeyEvent_SetFlags", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_core.py#L7284-L7296
trilinos/Trilinos
6168be6dd51e35e1cd681e9c4b24433e709df140
packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriExtractStructuredMultiBlock.py
python
FigureBlockIndicesFromBlockListOneBlock
(includeIndexList, includeBlockList, inMetaData, ioFlatIndexCounter, inCsdata, inForceSetting)
determine if this one block should have it's flat index tripped on for the extract block filter (leaf item of recursion)
determine if this one block should have it's flat index tripped on for the extract block filter (leaf item of recursion)
[ "determine", "if", "this", "one", "block", "should", "have", "it", "s", "flat", "index", "tripped", "on", "for", "the", "extract", "block", "filter", "(", "leaf", "item", "of", "recursion", ")" ]
def FigureBlockIndicesFromBlockListOneBlock(includeIndexList, includeBlockList, inMetaData, ioFlatIndexCounter, inCsdata, inForceSetting): """determine if this one block should have it's flat index tripped on for the extract block filter (leaf item of recursion)""" if PhactoriDbg(100): myDebugPrint3("FigureBlockIndicesFromBlockListOneBlock entered\n" "ioFlatIndexCounter " + str(ioFlatIndexCounter,) + " inForceSetting " + str(inForceSetting) + "\n" "2 inMetaData: " + str(inMetaData) + "\n") if inMetaData == None: thisBlockName = None else: thisBlockName = inMetaData.Get(vtk.vtkCompositeDataSet.NAME()) if (thisBlockName == None) and (inForceSetting != 1): if PhactoriDbg(100): myDebugPrint3("block with no name " + \ " not in include list, not + to mBlockIndices (flat index " + \ str(ioFlatIndexCounter[0] - 1) + ")\n") elif (inForceSetting == 1) or (thisBlockName in includeBlockList): includeIndexList.append(int(ioFlatIndexCounter[0]) - 1) blockClassName = inCsdata.GetClassName() if blockClassName == "vtkStructuredGrid": global gStructuredGridFound gStructuredGridFound = inCsdata global gStructuredGridFoundExtent gStructuredGridFoundExtent = inMetaData.Get(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT()) if PhactoriDbg(100): myDebugPrint3("this leaf is structured grid: " + str(thisBlockName) + "\n" "vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(): " + str(gStructuredGridFoundExtent) + "\n" "A inCsdata.GetExtent(): " + str(inCsdata.GetExtent()) + "\n") #global gDuplicateNameCounter #if thisBlockName in gDuplicateNameCounter: # oldCount = gDuplicateNameCounter[thisBlockName] # gDuplicateNameCounter[thisBlockName] = oldCount+1 #else: # gDuplicateNameCounter[thisBlockName] = 1 if PhactoriDbg(100): myDebugPrint3("block " + str(thisBlockName) + \ " in include list, + to mBlockIndices (flat index " + \ str(ioFlatIndexCounter[0] - 1) + ")\n") else: if PhactoriDbg(100): myDebugPrint3("block " + str(thisBlockName) + \ " not in include list, not + to mBlockIndices (flat index " + \ str(ioFlatIndexCounter[0] - 1) + ")\n") if PhactoriDbg(100): myDebugPrint3("FigureBlockIndicesFromBlockListOneBlock returning\n")
[ "def", "FigureBlockIndicesFromBlockListOneBlock", "(", "includeIndexList", ",", "includeBlockList", ",", "inMetaData", ",", "ioFlatIndexCounter", ",", "inCsdata", ",", "inForceSetting", ")", ":", "if", "PhactoriDbg", "(", "100", ")", ":", "myDebugPrint3", "(", "\"FigureBlockIndicesFromBlockListOneBlock entered\\n\"", "\"ioFlatIndexCounter \"", "+", "str", "(", "ioFlatIndexCounter", ",", ")", "+", "\" inForceSetting \"", "+", "str", "(", "inForceSetting", ")", "+", "\"\\n\"", "\"2 inMetaData: \"", "+", "str", "(", "inMetaData", ")", "+", "\"\\n\"", ")", "if", "inMetaData", "==", "None", ":", "thisBlockName", "=", "None", "else", ":", "thisBlockName", "=", "inMetaData", ".", "Get", "(", "vtk", ".", "vtkCompositeDataSet", ".", "NAME", "(", ")", ")", "if", "(", "thisBlockName", "==", "None", ")", "and", "(", "inForceSetting", "!=", "1", ")", ":", "if", "PhactoriDbg", "(", "100", ")", ":", "myDebugPrint3", "(", "\"block with no name \"", "+", "\" not in include list, not + to mBlockIndices (flat index \"", "+", "str", "(", "ioFlatIndexCounter", "[", "0", "]", "-", "1", ")", "+", "\")\\n\"", ")", "elif", "(", "inForceSetting", "==", "1", ")", "or", "(", "thisBlockName", "in", "includeBlockList", ")", ":", "includeIndexList", ".", "append", "(", "int", "(", "ioFlatIndexCounter", "[", "0", "]", ")", "-", "1", ")", "blockClassName", "=", "inCsdata", ".", "GetClassName", "(", ")", "if", "blockClassName", "==", "\"vtkStructuredGrid\"", ":", "global", "gStructuredGridFound", "gStructuredGridFound", "=", "inCsdata", "global", "gStructuredGridFoundExtent", "gStructuredGridFoundExtent", "=", "inMetaData", ".", "Get", "(", "vtk", ".", "vtkStreamingDemandDrivenPipeline", ".", "WHOLE_EXTENT", "(", ")", ")", "if", "PhactoriDbg", "(", "100", ")", ":", "myDebugPrint3", "(", "\"this leaf is structured grid: \"", "+", "str", "(", "thisBlockName", ")", "+", "\"\\n\"", "\"vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(): \"", "+", "str", "(", "gStructuredGridFoundExtent", ")", "+", "\"\\n\"", "\"A inCsdata.GetExtent(): \"", "+", "str", "(", "inCsdata", ".", "GetExtent", "(", ")", ")", "+", "\"\\n\"", ")", "#global gDuplicateNameCounter", "#if thisBlockName in gDuplicateNameCounter:", "# oldCount = gDuplicateNameCounter[thisBlockName]", "# gDuplicateNameCounter[thisBlockName] = oldCount+1", "#else:", "# gDuplicateNameCounter[thisBlockName] = 1", "if", "PhactoriDbg", "(", "100", ")", ":", "myDebugPrint3", "(", "\"block \"", "+", "str", "(", "thisBlockName", ")", "+", "\" in include list, + to mBlockIndices (flat index \"", "+", "str", "(", "ioFlatIndexCounter", "[", "0", "]", "-", "1", ")", "+", "\")\\n\"", ")", "else", ":", "if", "PhactoriDbg", "(", "100", ")", ":", "myDebugPrint3", "(", "\"block \"", "+", "str", "(", "thisBlockName", ")", "+", "\" not in include list, not + to mBlockIndices (flat index \"", "+", "str", "(", "ioFlatIndexCounter", "[", "0", "]", "-", "1", ")", "+", "\")\\n\"", ")", "if", "PhactoriDbg", "(", "100", ")", ":", "myDebugPrint3", "(", "\"FigureBlockIndicesFromBlockListOneBlock returning\\n\"", ")" ]
https://github.com/trilinos/Trilinos/blob/6168be6dd51e35e1cd681e9c4b24433e709df140/packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/PhactoriExtractStructuredMultiBlock.py#L43-L91
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pathlib2/pathlib2/__init__.py
python
PurePath.with_name
(self, name)
return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + parts[-1:])
Return a new path with the file name changed.
Return a new path with the file name changed.
[ "Return", "a", "new", "path", "with", "the", "file", "name", "changed", "." ]
def with_name(self, name): """Return a new path with the file name changed.""" if not self.name: raise ValueError("%r has an empty name" % (self,)) drv, root, parts = self._flavour.parse_parts((name,)) if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep] or drv or root or len(parts) != 1): raise ValueError("Invalid name %r" % (name)) return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + parts[-1:])
[ "def", "with_name", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "name", ":", "raise", "ValueError", "(", "\"%r has an empty name\"", "%", "(", "self", ",", ")", ")", "drv", ",", "root", ",", "parts", "=", "self", ".", "_flavour", ".", "parse_parts", "(", "(", "name", ",", ")", ")", "if", "(", "not", "name", "or", "name", "[", "-", "1", "]", "in", "[", "self", ".", "_flavour", ".", "sep", ",", "self", ".", "_flavour", ".", "altsep", "]", "or", "drv", "or", "root", "or", "len", "(", "parts", ")", "!=", "1", ")", ":", "raise", "ValueError", "(", "\"Invalid name %r\"", "%", "(", "name", ")", ")", "return", "self", ".", "_from_parsed_parts", "(", "self", ".", "_drv", ",", "self", ".", "_root", ",", "self", ".", "_parts", "[", ":", "-", "1", "]", "+", "parts", "[", "-", "1", ":", "]", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pathlib2/pathlib2/__init__.py#L1076-L1085
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
wx/py/shell.py
python
Shell.redirectStdout
(self, redirect=True)
If redirect is true then sys.stdout will go to the shell.
If redirect is true then sys.stdout will go to the shell.
[ "If", "redirect", "is", "true", "then", "sys", ".", "stdout", "will", "go", "to", "the", "shell", "." ]
def redirectStdout(self, redirect=True): """If redirect is true then sys.stdout will go to the shell.""" if redirect: sys.stdout = PseudoFileOut(self.writeOut) else: sys.stdout = self.stdout
[ "def", "redirectStdout", "(", "self", ",", "redirect", "=", "True", ")", ":", "if", "redirect", ":", "sys", ".", "stdout", "=", "PseudoFileOut", "(", "self", ".", "writeOut", ")", "else", ":", "sys", ".", "stdout", "=", "self", ".", "stdout" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/py/shell.py#L1256-L1261
tfwu/FaceDetection-ConvNet-3D
f9251c48eb40c5aec8fba7455115c355466555be
python/build/lib.linux-x86_64-2.7/mxnet/kvstore.py
python
KVStore._set_updater
(self, updater)
Set a push updater into the store. This function only changes the local store. Use set_optimizer for multi-machines. Parameters ---------- updater : function the updater function Examples -------- >>> def update(key, input, stored): ... print "update on key: %d" % key ... stored += input * 2 >>> kv._set_updater(update) >>> kv.pull(3, out=a) >>> print a.asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> kv.push(3, mx.nd.ones(shape)) update on key: 3 >>> kv.pull(3, out=a) >>> print a.asnumpy() [[ 6. 6. 6.] [ 6. 6. 6.]]
Set a push updater into the store.
[ "Set", "a", "push", "updater", "into", "the", "store", "." ]
def _set_updater(self, updater): """Set a push updater into the store. This function only changes the local store. Use set_optimizer for multi-machines. Parameters ---------- updater : function the updater function Examples -------- >>> def update(key, input, stored): ... print "update on key: %d" % key ... stored += input * 2 >>> kv._set_updater(update) >>> kv.pull(3, out=a) >>> print a.asnumpy() [[ 4. 4. 4.] [ 4. 4. 4.]] >>> kv.push(3, mx.nd.ones(shape)) update on key: 3 >>> kv.pull(3, out=a) >>> print a.asnumpy() [[ 6. 6. 6.] [ 6. 6. 6.]] """ _updater_proto = ctypes.CFUNCTYPE( None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p) self._updater_func = _updater_proto(_updater_wrapper(updater)) check_call(_LIB.MXKVStoreSetUpdater(self.handle, self._updater_func, None))
[ "def", "_set_updater", "(", "self", ",", "updater", ")", ":", "_updater_proto", "=", "ctypes", ".", "CFUNCTYPE", "(", "None", ",", "ctypes", ".", "c_int", ",", "NDArrayHandle", ",", "NDArrayHandle", ",", "ctypes", ".", "c_void_p", ")", "self", ".", "_updater_func", "=", "_updater_proto", "(", "_updater_wrapper", "(", "updater", ")", ")", "check_call", "(", "_LIB", ".", "MXKVStoreSetUpdater", "(", "self", ".", "handle", ",", "self", ".", "_updater_func", ",", "None", ")", ")" ]
https://github.com/tfwu/FaceDetection-ConvNet-3D/blob/f9251c48eb40c5aec8fba7455115c355466555be/python/build/lib.linux-x86_64-2.7/mxnet/kvstore.py#L297-L328
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/blackduck_hub.py
python
ReportManager.add_report_metric
(self, comp_name: str, metric: str)
Add a column to be included in the pretty table.
Add a column to be included in the pretty table.
[ "Add", "a", "column", "to", "be", "included", "in", "the", "pretty", "table", "." ]
def add_report_metric(self, comp_name: str, metric: str): """Add a column to be included in the pretty table.""" comp_name = ReportManager._get_norm_comp_name(comp_name) self._data.add_value(comp_name, metric)
[ "def", "add_report_metric", "(", "self", ",", "comp_name", ":", "str", ",", "metric", ":", "str", ")", ":", "comp_name", "=", "ReportManager", ".", "_get_norm_comp_name", "(", "comp_name", ")", "self", ".", "_data", ".", "add_value", "(", "comp_name", ",", "metric", ")" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/blackduck_hub.py#L810-L814
sailing-pmls/bosen
06cb58902d011fbea5f9428f10ce30e621492204
style_script/cpplint.py
python
Search
(pattern, s)
return _regexp_compile_cache[pattern].search(s)
Searches the string for the pattern, caching the compiled regexp.
Searches the string for the pattern, caching the compiled regexp.
[ "Searches", "the", "string", "for", "the", "pattern", "caching", "the", "compiled", "regexp", "." ]
def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s)
[ "def", "Search", "(", "pattern", ",", "s", ")", ":", "if", "pattern", "not", "in", "_regexp_compile_cache", ":", "_regexp_compile_cache", "[", "pattern", "]", "=", "sre_compile", ".", "compile", "(", "pattern", ")", "return", "_regexp_compile_cache", "[", "pattern", "]", ".", "search", "(", "s", ")" ]
https://github.com/sailing-pmls/bosen/blob/06cb58902d011fbea5f9428f10ce30e621492204/style_script/cpplint.py#L585-L589
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/gsutil/gslib/commands/perfdiag.py
python
PerfDiagCommand._RunOperation
(self, func)
return return_val
Runs an operation with retry logic. Args: func: The function to run. Returns: True if the operation succeeds, False if aborted.
Runs an operation with retry logic.
[ "Runs", "an", "operation", "with", "retry", "logic", "." ]
def _RunOperation(self, func): """Runs an operation with retry logic. Args: func: The function to run. Returns: True if the operation succeeds, False if aborted. """ # We retry on httplib exceptions that can happen if the socket was closed # by the remote party or the connection broke because of network issues. # Only the BotoServerError is counted as a 5xx error towards the retry # limit. success = False server_error_retried = 0 total_retried = 0 i = 0 return_val = None while not success: next_sleep = min(random.random() * (2 ** i) + 1, GetMaxRetryDelay()) try: return_val = func() self.total_requests += 1 success = True except tuple(self.exceptions) as e: total_retried += 1 if total_retried > self.MAX_TOTAL_RETRIES: self.logger.info('Reached maximum total retries. Not retrying.') break if isinstance(e, ServiceException): if e.status >= 500: self.error_responses_by_code[e.status] += 1 self.total_requests += 1 self.request_errors += 1 server_error_retried += 1 time.sleep(next_sleep) else: raise if server_error_retried > self.MAX_SERVER_ERROR_RETRIES: self.logger.info( 'Reached maximum server error retries. Not retrying.') break else: self.connection_breaks += 1 return return_val
[ "def", "_RunOperation", "(", "self", ",", "func", ")", ":", "# We retry on httplib exceptions that can happen if the socket was closed", "# by the remote party or the connection broke because of network issues.", "# Only the BotoServerError is counted as a 5xx error towards the retry", "# limit.", "success", "=", "False", "server_error_retried", "=", "0", "total_retried", "=", "0", "i", "=", "0", "return_val", "=", "None", "while", "not", "success", ":", "next_sleep", "=", "min", "(", "random", ".", "random", "(", ")", "*", "(", "2", "**", "i", ")", "+", "1", ",", "GetMaxRetryDelay", "(", ")", ")", "try", ":", "return_val", "=", "func", "(", ")", "self", ".", "total_requests", "+=", "1", "success", "=", "True", "except", "tuple", "(", "self", ".", "exceptions", ")", "as", "e", ":", "total_retried", "+=", "1", "if", "total_retried", ">", "self", ".", "MAX_TOTAL_RETRIES", ":", "self", ".", "logger", ".", "info", "(", "'Reached maximum total retries. Not retrying.'", ")", "break", "if", "isinstance", "(", "e", ",", "ServiceException", ")", ":", "if", "e", ".", "status", ">=", "500", ":", "self", ".", "error_responses_by_code", "[", "e", ".", "status", "]", "+=", "1", "self", ".", "total_requests", "+=", "1", "self", ".", "request_errors", "+=", "1", "server_error_retried", "+=", "1", "time", ".", "sleep", "(", "next_sleep", ")", "else", ":", "raise", "if", "server_error_retried", ">", "self", ".", "MAX_SERVER_ERROR_RETRIES", ":", "self", ".", "logger", ".", "info", "(", "'Reached maximum server error retries. Not retrying.'", ")", "break", "else", ":", "self", ".", "connection_breaks", "+=", "1", "return", "return_val" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/gsutil/gslib/commands/perfdiag.py#L642-L686
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/ops/check_ops.py
python
assert_integer_v2
(x, message=None, name=None)
Assert that `x` is of integer dtype. If `x` has a non-integer type, `message`, as well as the dtype of `x` are printed, and `InvalidArgumentError` is raised. This can always be checked statically, so this method returns nothing. Args: x: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_integer". Raises: TypeError: If `x.dtype` is not a non-quantized integer type.
Assert that `x` is of integer dtype.
[ "Assert", "that", "x", "is", "of", "integer", "dtype", "." ]
def assert_integer_v2(x, message=None, name=None): """Assert that `x` is of integer dtype. If `x` has a non-integer type, `message`, as well as the dtype of `x` are printed, and `InvalidArgumentError` is raised. This can always be checked statically, so this method returns nothing. Args: x: A `Tensor`. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_integer". Raises: TypeError: If `x.dtype` is not a non-quantized integer type. """ assert_integer(x=x, message=message, name=name)
[ "def", "assert_integer_v2", "(", "x", ",", "message", "=", "None", ",", "name", "=", "None", ")", ":", "assert_integer", "(", "x", "=", "x", ",", "message", "=", "message", ",", "name", "=", "name", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/ops/check_ops.py#L1510-L1526
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Node/Python.py
python
ValueNodeInfo.__getstate__
(self)
return state
Return all fields that shall be pickled. Walk the slots in the class hierarchy and add those to the state dictionary. If a '__dict__' slot is available, copy all entries to the dictionary. Also include the version id, which is fixed for all instances of a class.
Return all fields that shall be pickled. Walk the slots in the class hierarchy and add those to the state dictionary. If a '__dict__' slot is available, copy all entries to the dictionary. Also include the version id, which is fixed for all instances of a class.
[ "Return", "all", "fields", "that", "shall", "be", "pickled", ".", "Walk", "the", "slots", "in", "the", "class", "hierarchy", "and", "add", "those", "to", "the", "state", "dictionary", ".", "If", "a", "__dict__", "slot", "is", "available", "copy", "all", "entries", "to", "the", "dictionary", ".", "Also", "include", "the", "version", "id", "which", "is", "fixed", "for", "all", "instances", "of", "a", "class", "." ]
def __getstate__(self): """ Return all fields that shall be pickled. Walk the slots in the class hierarchy and add those to the state dictionary. If a '__dict__' slot is available, copy all entries to the dictionary. Also include the version id, which is fixed for all instances of a class. """ state = getattr(self, '__dict__', {}).copy() for obj in type(self).mro(): for name in getattr(obj,'__slots__',()): if hasattr(self, name): state[name] = getattr(self, name) state['_version_id'] = self.current_version_id try: del state['__weakref__'] except KeyError: pass return state
[ "def", "__getstate__", "(", "self", ")", ":", "state", "=", "getattr", "(", "self", ",", "'__dict__'", ",", "{", "}", ")", ".", "copy", "(", ")", "for", "obj", "in", "type", "(", "self", ")", ".", "mro", "(", ")", ":", "for", "name", "in", "getattr", "(", "obj", ",", "'__slots__'", ",", "(", ")", ")", ":", "if", "hasattr", "(", "self", ",", "name", ")", ":", "state", "[", "name", "]", "=", "getattr", "(", "self", ",", "name", ")", "state", "[", "'_version_id'", "]", "=", "self", ".", "current_version_id", "try", ":", "del", "state", "[", "'__weakref__'", "]", "except", "KeyError", ":", "pass", "return", "state" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Node/Python.py#L43-L62
ideawu/ssdb-rocks
a3cbb322cafb2f493252829c608e2239df98c9ac
deps/cpy/antlr3/tree.py
python
TreeAdaptor.deleteChild
(self, t, i)
Remove ith child and shift children down from right.
Remove ith child and shift children down from right.
[ "Remove", "ith", "child", "and", "shift", "children", "down", "from", "right", "." ]
def deleteChild(self, t, i): """Remove ith child and shift children down from right.""" raise NotImplementedError
[ "def", "deleteChild", "(", "self", ",", "t", ",", "i", ")", ":", "raise", "NotImplementedError" ]
https://github.com/ideawu/ssdb-rocks/blob/a3cbb322cafb2f493252829c608e2239df98c9ac/deps/cpy/antlr3/tree.py#L509-L512
google/or-tools
2cb85b4eead4c38e1c54b48044f92087cf165bce
ortools/constraint_solver/samples/cvrptw.py
python
create_distance_evaluator
(data)
return distance_evaluator
Creates callback to return distance between points.
Creates callback to return distance between points.
[ "Creates", "callback", "to", "return", "distance", "between", "points", "." ]
def create_distance_evaluator(data): """Creates callback to return distance between points.""" _distances = {} # precompute distance between location to have distance callback in O(1) for from_node in range(data['num_locations']): _distances[from_node] = {} for to_node in range(data['num_locations']): if from_node == to_node: _distances[from_node][to_node] = 0 else: _distances[from_node][to_node] = (manhattan_distance( data['locations'][from_node], data['locations'][to_node])) def distance_evaluator(manager, from_node, to_node): """Returns the manhattan distance between the two nodes""" return _distances[manager.IndexToNode(from_node)][manager.IndexToNode( to_node)] return distance_evaluator
[ "def", "create_distance_evaluator", "(", "data", ")", ":", "_distances", "=", "{", "}", "# precompute distance between location to have distance callback in O(1)", "for", "from_node", "in", "range", "(", "data", "[", "'num_locations'", "]", ")", ":", "_distances", "[", "from_node", "]", "=", "{", "}", "for", "to_node", "in", "range", "(", "data", "[", "'num_locations'", "]", ")", ":", "if", "from_node", "==", "to_node", ":", "_distances", "[", "from_node", "]", "[", "to_node", "]", "=", "0", "else", ":", "_distances", "[", "from_node", "]", "[", "to_node", "]", "=", "(", "manhattan_distance", "(", "data", "[", "'locations'", "]", "[", "from_node", "]", ",", "data", "[", "'locations'", "]", "[", "to_node", "]", ")", ")", "def", "distance_evaluator", "(", "manager", ",", "from_node", ",", "to_node", ")", ":", "\"\"\"Returns the manhattan distance between the two nodes\"\"\"", "return", "_distances", "[", "manager", ".", "IndexToNode", "(", "from_node", ")", "]", "[", "manager", ".", "IndexToNode", "(", "to_node", ")", "]", "return", "distance_evaluator" ]
https://github.com/google/or-tools/blob/2cb85b4eead4c38e1c54b48044f92087cf165bce/ortools/constraint_solver/samples/cvrptw.py#L93-L111
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/nntplib.py
python
_NNTPBase.getwelcome
(self)
return self.welcome
Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.
Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.
[ "Get", "the", "welcome", "message", "from", "the", "server", "(", "this", "is", "read", "and", "squirreled", "away", "by", "__init__", "()", ")", ".", "If", "the", "response", "code", "is", "200", "posting", "is", "allowed", ";", "if", "it", "201", "posting", "is", "not", "allowed", "." ]
def getwelcome(self): """Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.""" if self.debugging: print('*welcome*', repr(self.welcome)) return self.welcome
[ "def", "getwelcome", "(", "self", ")", ":", "if", "self", ".", "debugging", ":", "print", "(", "'*welcome*'", ",", "repr", "(", "self", ".", "welcome", ")", ")", "return", "self", ".", "welcome" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/nntplib.py#L373-L380
apache/mesos
97d9a4063332aae3825d78de71611657e05cf5e2
support/verify-reviews.py
python
main
()
Main function to verify the submitted reviews.
Main function to verify the submitted reviews.
[ "Main", "function", "to", "verify", "the", "submitted", "reviews", "." ]
def main(): """Main function to verify the submitted reviews.""" review_requests_url = \ "%s/api/review-requests/%s" % (REVIEWBOARD_URL, QUERY_PARAMS) review_requests = api(review_requests_url) review_ids = [] for review_request in reversed(review_requests["review_requests"]): if (NUM_REVIEWS == -1 or len(review_ids) < NUM_REVIEWS) and \ needs_verification(review_request): if not SKIP_VERIFY: verify_review(review_request) review_ids.append(str(review_request["id"])) write_review_ids(review_ids)
[ "def", "main", "(", ")", ":", "review_requests_url", "=", "\"%s/api/review-requests/%s\"", "%", "(", "REVIEWBOARD_URL", ",", "QUERY_PARAMS", ")", "review_requests", "=", "api", "(", "review_requests_url", ")", "review_ids", "=", "[", "]", "for", "review_request", "in", "reversed", "(", "review_requests", "[", "\"review_requests\"", "]", ")", ":", "if", "(", "NUM_REVIEWS", "==", "-", "1", "or", "len", "(", "review_ids", ")", "<", "NUM_REVIEWS", ")", "and", "needs_verification", "(", "review_request", ")", ":", "if", "not", "SKIP_VERIFY", ":", "verify_review", "(", "review_request", ")", "review_ids", ".", "append", "(", "str", "(", "review_request", "[", "\"id\"", "]", ")", ")", "write_review_ids", "(", "review_ids", ")" ]
https://github.com/apache/mesos/blob/97d9a4063332aae3825d78de71611657e05cf5e2/support/verify-reviews.py#L388-L402
hughperkins/tf-coriander
970d3df6c11400ad68405f22b0c42a52374e94ca
tensorflow/python/ops/nn_ops.py
python
_calc_bias_add_flops
(graph, node)
return ops.OpStats("flops", input_count)
Calculates the computing needed for BiasAdd.
Calculates the computing needed for BiasAdd.
[ "Calculates", "the", "computing", "needed", "for", "BiasAdd", "." ]
def _calc_bias_add_flops(graph, node): """Calculates the computing needed for BiasAdd.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() input_count = np.prod(input_shape.as_list()) return ops.OpStats("flops", input_count)
[ "def", "_calc_bias_add_flops", "(", "graph", ",", "node", ")", ":", "input_shape", "=", "graph_util", ".", "tensor_shape_from_node_def_name", "(", "graph", ",", "node", ".", "input", "[", "0", "]", ")", "input_shape", ".", "assert_is_fully_defined", "(", ")", "input_count", "=", "np", ".", "prod", "(", "input_shape", ".", "as_list", "(", ")", ")", "return", "ops", ".", "OpStats", "(", "\"flops\"", ",", "input_count", ")" ]
https://github.com/hughperkins/tf-coriander/blob/970d3df6c11400ad68405f22b0c42a52374e94ca/tensorflow/python/ops/nn_ops.py#L1640-L1645
mongodb/mongo
d8ff665343ad29cf286ee2cf4a1960d29371937b
buildscripts/mongosymb.py
python
PathDbgFileResolver.__init__
(self, bin_path_guess)
Initialize PathDbgFileResolver.
Initialize PathDbgFileResolver.
[ "Initialize", "PathDbgFileResolver", "." ]
def __init__(self, bin_path_guess): """Initialize PathDbgFileResolver.""" self._bin_path_guess = os.path.realpath(bin_path_guess) self.mci_build_dir = None
[ "def", "__init__", "(", "self", ",", "bin_path_guess", ")", ":", "self", ".", "_bin_path_guess", "=", "os", ".", "path", ".", "realpath", "(", "bin_path_guess", ")", "self", ".", "mci_build_dir", "=", "None" ]
https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/mongosymb.py#L43-L46
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/python/framework/tensor_shape.py
python
Dimension.__gt__
(self, other)
Returns True if `self` is known to be greater than `other`. Dimensions are compared as follows: Dimension(m) > Dimension(n) == m > n Dimension(m) > Dimension(None) == None Dimension(None) > Dimension(n) == None Dimension(None) > Dimension(None) == None Args: other: Another Dimension. Returns: The value of `self.value > other.value` if both are known, otherwise None.
Returns True if `self` is known to be greater than `other`.
[ "Returns", "True", "if", "self", "is", "known", "to", "be", "greater", "than", "other", "." ]
def __gt__(self, other): """Returns True if `self` is known to be greater than `other`. Dimensions are compared as follows: Dimension(m) > Dimension(n) == m > n Dimension(m) > Dimension(None) == None Dimension(None) > Dimension(n) == None Dimension(None) > Dimension(None) == None Args: other: Another Dimension. Returns: The value of `self.value > other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value > other.value
[ "def", "__gt__", "(", "self", ",", "other", ")", ":", "other", "=", "as_dimension", "(", "other", ")", "if", "self", ".", "_value", "is", "None", "or", "other", ".", "value", "is", "None", ":", "return", "None", "else", ":", "return", "self", ".", "_value", ">", "other", ".", "value" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/python/framework/tensor_shape.py#L311-L332
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Editor/Python/windows/Lib/site-packages/pip/download.py
python
unpack_file_url
(link, location, download_dir=None)
Unpack link into location. If download_dir is provided and link points to a file, make a copy of the link file inside download_dir.
Unpack link into location. If download_dir is provided and link points to a file, make a copy of the link file inside download_dir.
[ "Unpack", "link", "into", "location", ".", "If", "download_dir", "is", "provided", "and", "link", "points", "to", "a", "file", "make", "a", "copy", "of", "the", "link", "file", "inside", "download_dir", "." ]
def unpack_file_url(link, location, download_dir=None): """Unpack link into location. If download_dir is provided and link points to a file, make a copy of the link file inside download_dir.""" link_path = url_to_path(link.url_without_fragment) # If it's a url to a local directory if os.path.isdir(link_path): if os.path.isdir(location): rmtree(location) shutil.copytree(link_path, location, symlinks=True) if download_dir: logger.info('Link is a directory, ignoring download_dir') return # if link has a hash, let's confirm it matches if link.hash: link_path_hash = _get_hash_from_file(link_path, link) _check_hash(link_path_hash, link) # If a download dir is specified, is the file already there and valid? already_downloaded_path = None if download_dir: already_downloaded_path = _check_download_dir(link, download_dir) if already_downloaded_path: from_path = already_downloaded_path else: from_path = link_path content_type = mimetypes.guess_type(from_path)[0] # unpack the archive to the build dir location. even when only downloading # archives, they have to be unpacked to parse dependencies unpack_file(from_path, location, content_type, link) # a download dir is specified and not already downloaded if download_dir and not already_downloaded_path: _copy_file(from_path, download_dir, content_type, link)
[ "def", "unpack_file_url", "(", "link", ",", "location", ",", "download_dir", "=", "None", ")", ":", "link_path", "=", "url_to_path", "(", "link", ".", "url_without_fragment", ")", "# If it's a url to a local directory", "if", "os", ".", "path", ".", "isdir", "(", "link_path", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "location", ")", ":", "rmtree", "(", "location", ")", "shutil", ".", "copytree", "(", "link_path", ",", "location", ",", "symlinks", "=", "True", ")", "if", "download_dir", ":", "logger", ".", "info", "(", "'Link is a directory, ignoring download_dir'", ")", "return", "# if link has a hash, let's confirm it matches", "if", "link", ".", "hash", ":", "link_path_hash", "=", "_get_hash_from_file", "(", "link_path", ",", "link", ")", "_check_hash", "(", "link_path_hash", ",", "link", ")", "# If a download dir is specified, is the file already there and valid?", "already_downloaded_path", "=", "None", "if", "download_dir", ":", "already_downloaded_path", "=", "_check_download_dir", "(", "link", ",", "download_dir", ")", "if", "already_downloaded_path", ":", "from_path", "=", "already_downloaded_path", "else", ":", "from_path", "=", "link_path", "content_type", "=", "mimetypes", ".", "guess_type", "(", "from_path", ")", "[", "0", "]", "# unpack the archive to the build dir location. even when only downloading", "# archives, they have to be unpacked to parse dependencies", "unpack_file", "(", "from_path", ",", "location", ",", "content_type", ",", "link", ")", "# a download dir is specified and not already downloaded", "if", "download_dir", "and", "not", "already_downloaded_path", ":", "_copy_file", "(", "from_path", ",", "download_dir", ",", "content_type", ",", "link", ")" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/pip/download.py#L688-L727
TimoSaemann/caffe-segnet-cudnn5
abcf30dca449245e101bf4ced519f716177f0885
scripts/cpp_lint.py
python
_ShouldPrintError
(category, confidence, linenum)
return True
If confidence >= verbose, category passes filter and is not suppressed.
If confidence >= verbose, category passes filter and is not suppressed.
[ "If", "confidence", ">", "=", "verbose", "category", "passes", "filter", "and", "is", "not", "suppressed", "." ]
def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True
[ "def", "_ShouldPrintError", "(", "category", ",", "confidence", ",", "linenum", ")", ":", "# There are three ways we might decide not to print an error message:", "# a \"NOLINT(category)\" comment appears in the source,", "# the verbosity level isn't high enough, or the filters filter it out.", "if", "IsErrorSuppressedByNolint", "(", "category", ",", "linenum", ")", ":", "return", "False", "if", "confidence", "<", "_cpplint_state", ".", "verbose_level", ":", "return", "False", "is_filtered", "=", "False", "for", "one_filter", "in", "_Filters", "(", ")", ":", "if", "one_filter", ".", "startswith", "(", "'-'", ")", ":", "if", "category", ".", "startswith", "(", "one_filter", "[", "1", ":", "]", ")", ":", "is_filtered", "=", "True", "elif", "one_filter", ".", "startswith", "(", "'+'", ")", ":", "if", "category", ".", "startswith", "(", "one_filter", "[", "1", ":", "]", ")", ":", "is_filtered", "=", "False", "else", ":", "assert", "False", "# should have been checked for in SetFilter.", "if", "is_filtered", ":", "return", "False", "return", "True" ]
https://github.com/TimoSaemann/caffe-segnet-cudnn5/blob/abcf30dca449245e101bf4ced519f716177f0885/scripts/cpp_lint.py#L961-L985
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
tools/idl_parser/idl_parser.py
python
IDLParser.p_TypeSuffix
(self, p)
TypeSuffix : '[' integer ']' TypeSuffix | '[' ']' TypeSuffix | '?' TypeSuffixStartingWithArray |
TypeSuffix : '[' integer ']' TypeSuffix | '[' ']' TypeSuffix | '?' TypeSuffixStartingWithArray |
[ "TypeSuffix", ":", "[", "integer", "]", "TypeSuffix", "|", "[", "]", "TypeSuffix", "|", "?", "TypeSuffixStartingWithArray", "|" ]
def p_TypeSuffix(self, p): """TypeSuffix : '[' integer ']' TypeSuffix | '[' ']' TypeSuffix | '?' TypeSuffixStartingWithArray | """ if len(p) == 5: p[0] = self.BuildNamed('Array', p, 2, p[4]) if len(p) == 4: p[0] = self.BuildProduction('Array', p, 1, p[3]) if len(p) == 3: p[0] = ListFromConcat(self.BuildTrue('NULLABLE'), p[2])
[ "def", "p_TypeSuffix", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "5", ":", "p", "[", "0", "]", "=", "self", ".", "BuildNamed", "(", "'Array'", ",", "p", ",", "2", ",", "p", "[", "4", "]", ")", "if", "len", "(", "p", ")", "==", "4", ":", "p", "[", "0", "]", "=", "self", ".", "BuildProduction", "(", "'Array'", ",", "p", ",", "1", ",", "p", "[", "3", "]", ")", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "ListFromConcat", "(", "self", ".", "BuildTrue", "(", "'NULLABLE'", ")", ",", "p", "[", "2", "]", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/idl_parser/idl_parser.py#L772-L784
omnisci/omniscidb
b9c95f1bd602b4ffc8b0edf18bfad61031e08d86
python/omnisci/thrift/OmniSci.py
python
Client.get_license_claims
(self, session, nonce)
return self.recv_get_license_claims()
Parameters: - session - nonce
Parameters: - session - nonce
[ "Parameters", ":", "-", "session", "-", "nonce" ]
def get_license_claims(self, session, nonce): """ Parameters: - session - nonce """ self.send_get_license_claims(session, nonce) return self.recv_get_license_claims()
[ "def", "get_license_claims", "(", "self", ",", "session", ",", "nonce", ")", ":", "self", ".", "send_get_license_claims", "(", "session", ",", "nonce", ")", "return", "self", ".", "recv_get_license_claims", "(", ")" ]
https://github.com/omnisci/omniscidb/blob/b9c95f1bd602b4ffc8b0edf18bfad61031e08d86/python/omnisci/thrift/OmniSci.py#L4297-L4305
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/contexts/fitting_contexts/basic_fitting_context.py
python
BasicFittingContext.chi_squared
(self)
return self._chi_squared
Returns all of the chi squared values.
Returns all of the chi squared values.
[ "Returns", "all", "of", "the", "chi", "squared", "values", "." ]
def chi_squared(self) -> list: """Returns all of the chi squared values.""" return self._chi_squared
[ "def", "chi_squared", "(", "self", ")", "->", "list", ":", "return", "self", ".", "_chi_squared" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/contexts/fitting_contexts/basic_fitting_context.py#L184-L186
oracle/graaljs
36a56e8e993d45fc40939a3a4d9c0c24990720f1
graal-nodejs/tools/gyp/pylib/gyp/MSVSUserFile.py
python
Writer.AddConfig
(self, name)
Adds a configuration to the project. Args: name: Configuration name.
Adds a configuration to the project.
[ "Adds", "a", "configuration", "to", "the", "project", "." ]
def AddConfig(self, name): """Adds a configuration to the project. Args: name: Configuration name. """ self.configurations[name] = ["Configuration", {"Name": name}]
[ "def", "AddConfig", "(", "self", ",", "name", ")", ":", "self", ".", "configurations", "[", "name", "]", "=", "[", "\"Configuration\"", ",", "{", "\"Name\"", ":", "name", "}", "]" ]
https://github.com/oracle/graaljs/blob/36a56e8e993d45fc40939a3a4d9c0c24990720f1/graal-nodejs/tools/gyp/pylib/gyp/MSVSUserFile.py#L72-L78
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Image.InsertHandler
(*args, **kwargs)
return _core_.Image_InsertHandler(*args, **kwargs)
InsertHandler(ImageHandler handler)
InsertHandler(ImageHandler handler)
[ "InsertHandler", "(", "ImageHandler", "handler", ")" ]
def InsertHandler(*args, **kwargs): """InsertHandler(ImageHandler handler)""" return _core_.Image_InsertHandler(*args, **kwargs)
[ "def", "InsertHandler", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Image_InsertHandler", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L3618-L3620
hpi-xnor/BMXNet
ed0b201da6667887222b8e4b5f997c4f6b61943d
python/mxnet/ndarray/ndarray.py
python
NDArray.__gt__
(self, other)
return greater(self, other)
x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y)
x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y)
[ "x", ".", "__gt__", "(", "y", ")", "<", "=", ">", "x", ">", "y", "<", "=", ">", "mx", ".", "nd", ".", "greater", "(", "x", "y", ")" ]
def __gt__(self, other): """x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y) """ return greater(self, other)
[ "def", "__gt__", "(", "self", ",", "other", ")", ":", "return", "greater", "(", "self", ",", "other", ")" ]
https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/ndarray/ndarray.py#L322-L324
stitchEm/stitchEm
0f399501d41ab77933677f2907f41f80ceb704d7
lib/bindings/samples/server/glfw.py
python
_GLFWimage.unwrap
(self)
return self.width, self.height, pixels
Returns a nested python sequence.
Returns a nested python sequence.
[ "Returns", "a", "nested", "python", "sequence", "." ]
def unwrap(self): """ Returns a nested python sequence. """ pixels = [[[int(c) for c in p] for p in l] for l in self.pixels_array] return self.width, self.height, pixels
[ "def", "unwrap", "(", "self", ")", ":", "pixels", "=", "[", "[", "[", "int", "(", "c", ")", "for", "c", "in", "p", "]", "for", "p", "in", "l", "]", "for", "l", "in", "self", ".", "pixels_array", "]", "return", "self", ".", "width", ",", "self", ".", "height", ",", "pixels" ]
https://github.com/stitchEm/stitchEm/blob/0f399501d41ab77933677f2907f41f80ceb704d7/lib/bindings/samples/server/glfw.py#L323-L328
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/linalg/python/ops/linear_operator_util.py
python
matrix_adjoint
(a, name="matrix_adjoint")
Transposes last two dimensions of tensor `a`, and takes complex conjugate. If `a` is real valued, the result is equivalent to `matrix_transpose`. For example: ```python # Matrix with no batch dimension. # 'x' is [[1 2 3j] # [4 5 -6j]] tf.matrix_adjoint(x) ==> [[1 4] [2 5] [-3j 6j]] # Matrix with two batch dimensions. # x.shape is [1, 2, 3, 4] # tf.matrix_adjoint(x) is shape [1, 2, 4, 3] ``` Note that `tf.matmul` provides kwargs allowing for adjoint of arguments. This is done with minimal cost, and is preferable to using this function. E.g. ``` # Good! Adjoint is taken at minimal additional cost. tf.matmul(matrix, b, adjoint_b=True) # Inefficient! tf.matmul(matrix, tf.matrix_adjoint(b)) ``` Args: a: A `Tensor` with `rank >= 2`. name: A name for the operation (optional). Returns: A batch matrix `Tensor` with same `dtype` as `a`. Raises: ValueError: If `a` is determined statically to have `rank < 2`.
Transposes last two dimensions of tensor `a`, and takes complex conjugate.
[ "Transposes", "last", "two", "dimensions", "of", "tensor", "a", "and", "takes", "complex", "conjugate", "." ]
def matrix_adjoint(a, name="matrix_adjoint"): """Transposes last two dimensions of tensor `a`, and takes complex conjugate. If `a` is real valued, the result is equivalent to `matrix_transpose`. For example: ```python # Matrix with no batch dimension. # 'x' is [[1 2 3j] # [4 5 -6j]] tf.matrix_adjoint(x) ==> [[1 4] [2 5] [-3j 6j]] # Matrix with two batch dimensions. # x.shape is [1, 2, 3, 4] # tf.matrix_adjoint(x) is shape [1, 2, 4, 3] ``` Note that `tf.matmul` provides kwargs allowing for adjoint of arguments. This is done with minimal cost, and is preferable to using this function. E.g. ``` # Good! Adjoint is taken at minimal additional cost. tf.matmul(matrix, b, adjoint_b=True) # Inefficient! tf.matmul(matrix, tf.matrix_adjoint(b)) ``` Args: a: A `Tensor` with `rank >= 2`. name: A name for the operation (optional). Returns: A batch matrix `Tensor` with same `dtype` as `a`. Raises: ValueError: If `a` is determined statically to have `rank < 2`. """ with ops.name_scope(name, values=[a]): a = ops.convert_to_tensor(a, name="a") a_transpose = array_ops.matrix_transpose(a) return math_ops.conj(a_transpose)
[ "def", "matrix_adjoint", "(", "a", ",", "name", "=", "\"matrix_adjoint\"", ")", ":", "with", "ops", ".", "name_scope", "(", "name", ",", "values", "=", "[", "a", "]", ")", ":", "a", "=", "ops", ".", "convert_to_tensor", "(", "a", ",", "name", "=", "\"a\"", ")", "a_transpose", "=", "array_ops", ".", "matrix_transpose", "(", "a", ")", "return", "math_ops", ".", "conj", "(", "a_transpose", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/linalg/python/ops/linear_operator_util.py#L292-L336
grpc/grpc
27bc6fe7797e43298dc931b96dc57322d0852a9f
examples/python/cancellation/search.py
python
_bytestrings_of_length
(length)
Generates a stream containing all bytestrings of a given length. Args: length: A positive integer length. Yields: All bytestrings of length `length`.
Generates a stream containing all bytestrings of a given length.
[ "Generates", "a", "stream", "containing", "all", "bytestrings", "of", "a", "given", "length", "." ]
def _bytestrings_of_length(length): """Generates a stream containing all bytestrings of a given length. Args: length: A positive integer length. Yields: All bytestrings of length `length`. """ for digits in itertools.product(range(_BYTE_MAX), repeat=length): yield b''.join(struct.pack('B', i) for i in digits)
[ "def", "_bytestrings_of_length", "(", "length", ")", ":", "for", "digits", "in", "itertools", ".", "product", "(", "range", "(", "_BYTE_MAX", ")", ",", "repeat", "=", "length", ")", ":", "yield", "b''", ".", "join", "(", "struct", ".", "pack", "(", "'B'", ",", "i", ")", "for", "i", "in", "digits", ")" ]
https://github.com/grpc/grpc/blob/27bc6fe7797e43298dc931b96dc57322d0852a9f/examples/python/cancellation/search.py#L73-L83
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/learn/python/learn/estimators/run_config.py
python
ClusterConfig.get_task_id
()
return int(task_index) if task_index else 0
Returns task index from `TF_CONFIG` environmental variable. If you have a ClusterConfig instance, you can just access its task_id property instead of calling this function and re-parsing the environmental variable. Returns: `TF_CONFIG['task']['index']`. Defaults to 0.
Returns task index from `TF_CONFIG` environmental variable.
[ "Returns", "task", "index", "from", "TF_CONFIG", "environmental", "variable", "." ]
def get_task_id(): """Returns task index from `TF_CONFIG` environmental variable. If you have a ClusterConfig instance, you can just access its task_id property instead of calling this function and re-parsing the environmental variable. Returns: `TF_CONFIG['task']['index']`. Defaults to 0. """ config = json.loads(os.environ.get('TF_CONFIG') or '{}') task_env = config.get('task', {}) task_index = task_env.get('index') return int(task_index) if task_index else 0
[ "def", "get_task_id", "(", ")", ":", "config", "=", "json", ".", "loads", "(", "os", ".", "environ", ".", "get", "(", "'TF_CONFIG'", ")", "or", "'{}'", ")", "task_env", "=", "config", ".", "get", "(", "'task'", ",", "{", "}", ")", "task_index", "=", "task_env", ".", "get", "(", "'index'", ")", "return", "int", "(", "task_index", ")", "if", "task_index", "else", "0" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/learn/python/learn/estimators/run_config.py#L195-L208
libornovax/master_thesis_code
6eca474ed3cae673afde010caef338cf7349f839
scripts/nets/macc_net_generator.py
python
MACCNetGenerator._add_layer
(self, line, outfile, deploy)
Adds one layer to the PROTOTXT file specified by the line. Input: line: string with layer description (one line from the config file) outfile: File handle into which we will write the layer deploy: True/False
Adds one layer to the PROTOTXT file specified by the line.
[ "Adds", "one", "layer", "to", "the", "PROTOTXT", "file", "specified", "by", "the", "line", "." ]
def _add_layer(self, line, outfile, deploy): """ Adds one layer to the PROTOTXT file specified by the line. Input: line: string with layer description (one line from the config file) outfile: File handle into which we will write the layer deploy: True/False """ layer_type = line[:4] if layer_type == 'conv': # Convolutional layer outfile.write(self._layer_conv(line, deploy)) outfile.write(self._layer_relu()) elif layer_type == 'pool': # Pooling layer outfile.write(self._layer_pool()) elif layer_type == 'macc': # Multiscale accumulator - this is also a convolutional layer, but with # 1 output channel outfile.write(self._layer_macc(line, deploy))
[ "def", "_add_layer", "(", "self", ",", "line", ",", "outfile", ",", "deploy", ")", ":", "layer_type", "=", "line", "[", ":", "4", "]", "if", "layer_type", "==", "'conv'", ":", "# Convolutional layer", "outfile", ".", "write", "(", "self", ".", "_layer_conv", "(", "line", ",", "deploy", ")", ")", "outfile", ".", "write", "(", "self", ".", "_layer_relu", "(", ")", ")", "elif", "layer_type", "==", "'pool'", ":", "# Pooling layer", "outfile", ".", "write", "(", "self", ".", "_layer_pool", "(", ")", ")", "elif", "layer_type", "==", "'macc'", ":", "# Multiscale accumulator - this is also a convolutional layer, but with", "# 1 output channel", "outfile", ".", "write", "(", "self", ".", "_layer_macc", "(", "line", ",", "deploy", ")", ")" ]
https://github.com/libornovax/master_thesis_code/blob/6eca474ed3cae673afde010caef338cf7349f839/scripts/nets/macc_net_generator.py#L163-L184
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqtinterfaces/mantidqtinterfaces/drill/view/DrillTableWidget.py
python
DrillTableWidget.eraseRow
(self, position)
Erase the contents of a whole row (if it exists). Args: position (int): row index
Erase the contents of a whole row (if it exists).
[ "Erase", "the", "contents", "of", "a", "whole", "row", "(", "if", "it", "exists", ")", "." ]
def eraseRow(self, position): """ Erase the contents of a whole row (if it exists). Args: position (int): row index """ if self._disabled: return n_rows = self.rowCount() if ((position < 0) or (position >= n_rows)): return for column in range(self.columnCount()): item = self.item(position, column) if item: item.setData(Qt.EditRole, None)
[ "def", "eraseRow", "(", "self", ",", "position", ")", ":", "if", "self", ".", "_disabled", ":", "return", "n_rows", "=", "self", ".", "rowCount", "(", ")", "if", "(", "(", "position", "<", "0", ")", "or", "(", "position", ">=", "n_rows", ")", ")", ":", "return", "for", "column", "in", "range", "(", "self", ".", "columnCount", "(", ")", ")", ":", "item", "=", "self", ".", "item", "(", "position", ",", "column", ")", "if", "item", ":", "item", ".", "setData", "(", "Qt", ".", "EditRole", ",", "None", ")" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/drill/view/DrillTableWidget.py#L162-L177