nwo
stringlengths
5
86
sha
stringlengths
40
40
path
stringlengths
4
189
language
stringclasses
1 value
identifier
stringlengths
1
94
parameters
stringlengths
2
4.03k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
11.5k
docstring
stringlengths
1
33.2k
docstring_summary
stringlengths
0
5.15k
docstring_tokens
sequence
function
stringlengths
34
151k
function_tokens
sequence
url
stringlengths
90
278
SFTtech/openage
d6a08c53c48dc1e157807471df92197f6ca9e04d
openage/convert/processor/conversion/aoc/upgrade_resource_subprocessor.py
python
AoCUpgradeResourceSubprocessor.reveal_enemy_upgrade
(converter_group, value, operator, team=False)
return patches
Creates a patch for the reveal enemy modify effect (ID: 183). :param converter_group: Tech/Civ that gets the patch. :type converter_group: ...dataformat.converter_object.ConverterObjectGroup :param value: Value used for patching the member. :type value: MemberOperator :param operator: Operator used for patching the member. :type operator: MemberOperator :returns: The forward references for the generated patches. :rtype: list
Creates a patch for the reveal enemy modify effect (ID: 183).
[ "Creates", "a", "patch", "for", "the", "reveal", "enemy", "modify", "effect", "(", "ID", ":", "183", ")", "." ]
def reveal_enemy_upgrade(converter_group, value, operator, team=False): """ Creates a patch for the reveal enemy modify effect (ID: 183). :param converter_group: Tech/Civ that gets the patch. :type converter_group: ...dataformat.converter_object.ConverterObjectGroup :param value: Value used for patching the member. :type value: MemberOperator :param operator: Operator used for patching the member. :type operator: MemberOperator :returns: The forward references for the generated patches. :rtype: list """ patches = [] # TODO: Implement return patches
[ "def", "reveal_enemy_upgrade", "(", "converter_group", ",", "value", ",", "operator", ",", "team", "=", "False", ")", ":", "patches", "=", "[", "]", "# TODO: Implement", "return", "patches" ]
https://github.com/SFTtech/openage/blob/d6a08c53c48dc1e157807471df92197f6ca9e04d/openage/convert/processor/conversion/aoc/upgrade_resource_subprocessor.py#L1004-L1021
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
setup/snap.py
python
TNEANet.GetEI
(self, *args)
return _snap.TNEANet_GetEI(self, *args)
GetEI(TNEANet self, int const & SrcNId, int const & DstNId) -> TNEANet::TEdgeI Parameters: SrcNId: int const & DstNId: int const &
GetEI(TNEANet self, int const & SrcNId, int const & DstNId) -> TNEANet::TEdgeI
[ "GetEI", "(", "TNEANet", "self", "int", "const", "&", "SrcNId", "int", "const", "&", "DstNId", ")", "-", ">", "TNEANet", "::", "TEdgeI" ]
def GetEI(self, *args): """ GetEI(TNEANet self, int const & SrcNId, int const & DstNId) -> TNEANet::TEdgeI Parameters: SrcNId: int const & DstNId: int const & """ return _snap.TNEANet_GetEI(self, *args)
[ "def", "GetEI", "(", "self", ",", "*", "args", ")", ":", "return", "_snap", ".", "TNEANet_GetEI", "(", "self", ",", "*", "args", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L21873-L21882
MTG/gaia
0f7214dbdec6f9b651ca34211824841ffba0bc77
src/doc/doxy2swig.py
python
Doxy2SWIG.start_new_paragraph
(self)
Make sure to create an empty line. This is overridden, if the previous text ends with the special marker ''. In that case, nothing is done.
Make sure to create an empty line. This is overridden, if the previous text ends with the special marker ''. In that case, nothing is done.
[ "Make", "sure", "to", "create", "an", "empty", "line", ".", "This", "is", "overridden", "if", "the", "previous", "text", "ends", "with", "the", "special", "marker", ".", "In", "that", "case", "nothing", "is", "done", "." ]
def start_new_paragraph(self): """Make sure to create an empty line. This is overridden, if the previous text ends with the special marker ''. In that case, nothing is done. """ if self.pieces[-1:] == ['']: # respect special marker return elif self.pieces == []: # first paragraph, add '\n', override with '' self.pieces = ['\n'] elif self.pieces[-1][-1:] != '\n': # previous line not ended self.pieces.extend([' \n' ,'\n']) else: #default self.pieces.append('\n')
[ "def", "start_new_paragraph", "(", "self", ")", ":", "if", "self", ".", "pieces", "[", "-", "1", ":", "]", "==", "[", "''", "]", ":", "# respect special marker", "return", "elif", "self", ".", "pieces", "==", "[", "]", ":", "# first paragraph, add '\\n', override with ''", "self", ".", "pieces", "=", "[", "'\\n'", "]", "elif", "self", ".", "pieces", "[", "-", "1", "]", "[", "-", "1", ":", "]", "!=", "'\\n'", ":", "# previous line not ended", "self", ".", "pieces", ".", "extend", "(", "[", "' \\n'", ",", "'\\n'", "]", ")", "else", ":", "#default", "self", ".", "pieces", ".", "append", "(", "'\\n'", ")" ]
https://github.com/MTG/gaia/blob/0f7214dbdec6f9b651ca34211824841ffba0bc77/src/doc/doxy2swig.py#L293-L304
microsoft/TSS.MSR
0f2516fca2cd9929c31d5450e39301c9bde43688
TSS.Py/src/TpmTypes.py
python
TPM2_ObjectChangeAuth_REQUEST.toTpm
(self, buf)
TpmMarshaller method
TpmMarshaller method
[ "TpmMarshaller", "method" ]
def toTpm(self, buf): """ TpmMarshaller method """ buf.writeSizedByteBuf(self.newAuth)
[ "def", "toTpm", "(", "self", ",", "buf", ")", ":", "buf", ".", "writeSizedByteBuf", "(", "self", ".", "newAuth", ")" ]
https://github.com/microsoft/TSS.MSR/blob/0f2516fca2cd9929c31d5450e39301c9bde43688/TSS.Py/src/TpmTypes.py#L10094-L10096
generalized-intelligence/GAAS
29ab17d3e8a4ba18edef3a57c36d8db6329fac73
deprecated/algorithms/sfm/OpenSfM/opensfm/commands/match_features.py
python
match
(args)
Compute all matches for a single image
Compute all matches for a single image
[ "Compute", "all", "matches", "for", "a", "single", "image" ]
def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = timer() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf( ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), timer() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if config['matcher_type'] == 'FLANN': i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) else: i1 = None i2 = None matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format( timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches)
[ "def", "match", "(", "args", ")", ":", "log", ".", "setup", "(", ")", "im1", ",", "candidates", ",", "i", ",", "n", ",", "ctx", "=", "args", "logger", ".", "info", "(", "'Matching {} - {} / {}'", ".", "format", "(", "im1", ",", "i", "+", "1", ",", "n", ")", ")", "config", "=", "ctx", ".", "data", ".", "config", "robust_matching_min_match", "=", "config", "[", "'robust_matching_min_match'", "]", "preemptive_threshold", "=", "config", "[", "'preemptive_threshold'", "]", "lowes_ratio", "=", "config", "[", "'lowes_ratio'", "]", "preemptive_lowes_ratio", "=", "config", "[", "'preemptive_lowes_ratio'", "]", "im1_matches", "=", "{", "}", "for", "im2", "in", "candidates", ":", "# preemptive matching", "if", "preemptive_threshold", ">", "0", ":", "t", "=", "timer", "(", ")", "config", "[", "'lowes_ratio'", "]", "=", "preemptive_lowes_ratio", "matches_pre", "=", "matching", ".", "match_lowe_bf", "(", "ctx", ".", "f_pre", "[", "im1", "]", ",", "ctx", ".", "f_pre", "[", "im2", "]", ",", "config", ")", "config", "[", "'lowes_ratio'", "]", "=", "lowes_ratio", "logger", ".", "debug", "(", "\"Preemptive matching {0}, time: {1}s\"", ".", "format", "(", "len", "(", "matches_pre", ")", ",", "timer", "(", ")", "-", "t", ")", ")", "if", "len", "(", "matches_pre", ")", "<", "preemptive_threshold", ":", "logger", ".", "debug", "(", "\"Discarding based of preemptive matches {0} < {1}\"", ".", "format", "(", "len", "(", "matches_pre", ")", ",", "preemptive_threshold", ")", ")", "continue", "# symmetric matching", "t", "=", "timer", "(", ")", "p1", ",", "f1", ",", "c1", "=", "ctx", ".", "data", ".", "load_features", "(", "im1", ")", "p2", ",", "f2", ",", "c2", "=", "ctx", ".", "data", ".", "load_features", "(", "im2", ")", "if", "config", "[", "'matcher_type'", "]", "==", "'FLANN'", ":", "i1", "=", "ctx", ".", "data", ".", "load_feature_index", "(", "im1", ",", "f1", ")", "i2", "=", "ctx", ".", "data", ".", "load_feature_index", "(", "im2", ",", "f2", ")", "else", ":", "i1", "=", "None", "i2", "=", "None", "matches", "=", "matching", ".", "match_symmetric", "(", "f1", ",", "i1", ",", "f2", ",", "i2", ",", "config", ")", "logger", ".", "debug", "(", "'{} - {} has {} candidate matches'", ".", "format", "(", "im1", ",", "im2", ",", "len", "(", "matches", ")", ")", ")", "if", "len", "(", "matches", ")", "<", "robust_matching_min_match", ":", "im1_matches", "[", "im2", "]", "=", "[", "]", "continue", "# robust matching", "t_robust_matching", "=", "timer", "(", ")", "camera1", "=", "ctx", ".", "cameras", "[", "ctx", ".", "exifs", "[", "im1", "]", "[", "'camera'", "]", "]", "camera2", "=", "ctx", ".", "cameras", "[", "ctx", ".", "exifs", "[", "im2", "]", "[", "'camera'", "]", "]", "rmatches", "=", "matching", ".", "robust_match", "(", "p1", ",", "p2", ",", "camera1", ",", "camera2", ",", "matches", ",", "config", ")", "if", "len", "(", "rmatches", ")", "<", "robust_matching_min_match", ":", "im1_matches", "[", "im2", "]", "=", "[", "]", "continue", "im1_matches", "[", "im2", "]", "=", "rmatches", "logger", ".", "debug", "(", "'Robust matching time : {0}s'", ".", "format", "(", "timer", "(", ")", "-", "t_robust_matching", ")", ")", "logger", ".", "debug", "(", "\"Full matching {0} / {1}, time: {2}s\"", ".", "format", "(", "len", "(", "rmatches", ")", ",", "len", "(", "matches", ")", ",", "timer", "(", ")", "-", "t", ")", ")", "ctx", ".", "data", ".", "save_matches", "(", "im1", ",", "im1_matches", ")" ]
https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/deprecated/algorithms/sfm/OpenSfM/opensfm/commands/match_features.py#L211-L278
adobe/chromium
cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7
tools/python/google/platform_utils_win.py
python
PlatformUtility.GetAbsolutePath
(self, path, force=False)
return _cygpath_proc.stdout.readline().rstrip()
Returns an absolute windows path. If platform is cygwin, converts it to windows style using cygpath. For performance reasons, we use a single cygpath process, shared among all instances of this class. Otherwise Python can run out of file handles.
Returns an absolute windows path. If platform is cygwin, converts it to windows style using cygpath.
[ "Returns", "an", "absolute", "windows", "path", ".", "If", "platform", "is", "cygwin", "converts", "it", "to", "windows", "style", "using", "cygpath", "." ]
def GetAbsolutePath(self, path, force=False): """Returns an absolute windows path. If platform is cygwin, converts it to windows style using cygpath. For performance reasons, we use a single cygpath process, shared among all instances of this class. Otherwise Python can run out of file handles. """ if not force and sys.platform != "cygwin": return os.path.abspath(path) global _cygpath_proc if not _cygpath_proc: cygpath_command = [self._PathToExecutable("cygpath.exe"), "-a", "-m", "-f", "-"] _cygpath_proc = subprocess.Popen(cygpath_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE) _cygpath_proc.stdin.write(path + "\n") return _cygpath_proc.stdout.readline().rstrip()
[ "def", "GetAbsolutePath", "(", "self", ",", "path", ",", "force", "=", "False", ")", ":", "if", "not", "force", "and", "sys", ".", "platform", "!=", "\"cygwin\"", ":", "return", "os", ".", "path", ".", "abspath", "(", "path", ")", "global", "_cygpath_proc", "if", "not", "_cygpath_proc", ":", "cygpath_command", "=", "[", "self", ".", "_PathToExecutable", "(", "\"cygpath.exe\"", ")", ",", "\"-a\"", ",", "\"-m\"", ",", "\"-f\"", ",", "\"-\"", "]", "_cygpath_proc", "=", "subprocess", ".", "Popen", "(", "cygpath_command", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "_cygpath_proc", ".", "stdin", ".", "write", "(", "path", "+", "\"\\n\"", ")", "return", "_cygpath_proc", ".", "stdout", ".", "readline", "(", ")", ".", "rstrip", "(", ")" ]
https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/tools/python/google/platform_utils_win.py#L37-L54
generalized-intelligence/GAAS
29ab17d3e8a4ba18edef3a57c36d8db6329fac73
deprecated/algorithms/sfm/OpenSfM/opensfm/types.py
python
Pose.translation
(self)
return self._translation
Translation vector.
Translation vector.
[ "Translation", "vector", "." ]
def translation(self): """Translation vector.""" return self._translation
[ "def", "translation", "(", "self", ")", ":", "return", "self", ".", "_translation" ]
https://github.com/generalized-intelligence/GAAS/blob/29ab17d3e8a4ba18edef3a57c36d8db6329fac73/deprecated/algorithms/sfm/OpenSfM/opensfm/types.py#L34-L36
deepmind/open_spiel
4ca53bea32bb2875c7385d215424048ae92f78c8
open_spiel/python/pytorch/rcfr.py
python
relu
(v)
return np.maximum(v, 0)
Returns the element-wise maximum between `v` and 0.
Returns the element-wise maximum between `v` and 0.
[ "Returns", "the", "element", "-", "wise", "maximum", "between", "v", "and", "0", "." ]
def relu(v): """Returns the element-wise maximum between `v` and 0.""" return np.maximum(v, 0)
[ "def", "relu", "(", "v", ")", ":", "return", "np", ".", "maximum", "(", "v", ",", "0", ")" ]
https://github.com/deepmind/open_spiel/blob/4ca53bea32bb2875c7385d215424048ae92f78c8/open_spiel/python/pytorch/rcfr.py#L416-L418
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pathlib.py
python
PurePath.as_posix
(self)
return str(self).replace(f.sep, '/')
Return the string representation of the path with forward (/) slashes.
Return the string representation of the path with forward (/) slashes.
[ "Return", "the", "string", "representation", "of", "the", "path", "with", "forward", "(", "/", ")", "slashes", "." ]
def as_posix(self): """Return the string representation of the path with forward (/) slashes.""" f = self._flavour return str(self).replace(f.sep, '/')
[ "def", "as_posix", "(", "self", ")", ":", "f", "=", "self", ".", "_flavour", "return", "str", "(", "self", ")", ".", "replace", "(", "f", ".", "sep", ",", "'/'", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pathlib.py#L722-L726
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py2/pandas/core/arrays/period.py
python
PeriodArray._from_datetime64
(cls, data, freq, tz=None)
return cls(data, freq=freq)
Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq]
Construct a PeriodArray from a datetime64 array
[ "Construct", "a", "PeriodArray", "from", "a", "datetime64", "array" ]
def _from_datetime64(cls, data, freq, tz=None): """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """ data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq)
[ "def", "_from_datetime64", "(", "cls", ",", "data", ",", "freq", ",", "tz", "=", "None", ")", ":", "data", ",", "freq", "=", "dt64arr_to_periodarr", "(", "data", ",", "freq", ",", "tz", ")", "return", "cls", "(", "data", ",", "freq", "=", "freq", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py2/pandas/core/arrays/period.py#L204-L219
GJDuck/LowFat
ecf6a0f0fa1b73a27a626cf493cc39e477b6faea
llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/cached/genk-timing.py
python
KScriptGenerator.setCallWeighting
(self, weight)
Sets the probably of generating a function call
Sets the probably of generating a function call
[ "Sets", "the", "probably", "of", "generating", "a", "function", "call" ]
def setCallWeighting(self, weight): """ Sets the probably of generating a function call""" self.callWeighting = weight
[ "def", "setCallWeighting", "(", "self", ",", "weight", ")", ":", "self", ".", "callWeighting", "=", "weight" ]
https://github.com/GJDuck/LowFat/blob/ecf6a0f0fa1b73a27a626cf493cc39e477b6faea/llvm-4.0.0.src/examples/Kaleidoscope/MCJIT/cached/genk-timing.py#L80-L82
infinit/elle
a8154593c42743f45b9df09daf62b44630c24a02
drake/src/drake/ocaml/ocamllex.py
python
Ocamllex.source
(self)
return self.__source
The ocamllex source file.
The ocamllex source file.
[ "The", "ocamllex", "source", "file", "." ]
def source(self): '''The ocamllex source file.''' return self.__source
[ "def", "source", "(", "self", ")", ":", "return", "self", ".", "__source" ]
https://github.com/infinit/elle/blob/a8154593c42743f45b9df09daf62b44630c24a02/drake/src/drake/ocaml/ocamllex.py#L42-L44
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/misc_util.py
python
is_local_src_dir
(directory)
return os.path.isdir(new_dir)
Return true if directory is local directory.
Return true if directory is local directory.
[ "Return", "true", "if", "directory", "is", "local", "directory", "." ]
def is_local_src_dir(directory): """Return true if directory is local directory. """ if not is_string(directory): return False abs_dir = os.path.abspath(directory) c = os.path.commonprefix([os.getcwd(), abs_dir]) new_dir = abs_dir[len(c):].split(os.sep) if new_dir and not new_dir[0]: new_dir = new_dir[1:] if new_dir and new_dir[0]=='build': return False new_dir = os.sep.join(new_dir) return os.path.isdir(new_dir)
[ "def", "is_local_src_dir", "(", "directory", ")", ":", "if", "not", "is_string", "(", "directory", ")", ":", "return", "False", "abs_dir", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "c", "=", "os", ".", "path", ".", "commonprefix", "(", "[", "os", ".", "getcwd", "(", ")", ",", "abs_dir", "]", ")", "new_dir", "=", "abs_dir", "[", "len", "(", "c", ")", ":", "]", ".", "split", "(", "os", ".", "sep", ")", "if", "new_dir", "and", "not", "new_dir", "[", "0", "]", ":", "new_dir", "=", "new_dir", "[", "1", ":", "]", "if", "new_dir", "and", "new_dir", "[", "0", "]", "==", "'build'", ":", "return", "False", "new_dir", "=", "os", ".", "sep", ".", "join", "(", "new_dir", ")", "return", "os", ".", "path", ".", "isdir", "(", "new_dir", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/numpy/distutils/misc_util.py#L566-L579
martinmoene/variant-lite
f1af3518e4c28f12b09839b9d2ee37984cbf137a
script/upload-conan.py
python
versionFrom
( filename )
return version
Obtain version from conanfile.py
Obtain version from conanfile.py
[ "Obtain", "version", "from", "conanfile", ".", "py" ]
def versionFrom( filename ): """Obtain version from conanfile.py""" with open( filename ) as f: content = f.read() version = re.search(r'version\s=\s"(.*)"', content).group(1) return version
[ "def", "versionFrom", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "content", "=", "f", ".", "read", "(", ")", "version", "=", "re", ".", "search", "(", "r'version\\s=\\s\"(.*)\"'", ",", "content", ")", ".", "group", "(", "1", ")", "return", "version" ]
https://github.com/martinmoene/variant-lite/blob/f1af3518e4c28f12b09839b9d2ee37984cbf137a/script/upload-conan.py#L31-L36
etotheipi/BitcoinArmory
2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98
armorycolors.py
python
luminance
(qcolor)
return int(0.2*r + 0.6*g + 0.2*b)
Gives the pseudo-equivalent greyscale value of this color
Gives the pseudo-equivalent greyscale value of this color
[ "Gives", "the", "pseudo", "-", "equivalent", "greyscale", "value", "of", "this", "color" ]
def luminance(qcolor): """ Gives the pseudo-equivalent greyscale value of this color """ r,g,b = qcolor.red(), qcolor.green(), qcolor.blue() return int(0.2*r + 0.6*g + 0.2*b)
[ "def", "luminance", "(", "qcolor", ")", ":", "r", ",", "g", ",", "b", "=", "qcolor", ".", "red", "(", ")", ",", "qcolor", ".", "green", "(", ")", ",", "qcolor", ".", "blue", "(", ")", "return", "int", "(", "0.2", "*", "r", "+", "0.6", "*", "g", "+", "0.2", "*", "b", ")" ]
https://github.com/etotheipi/BitcoinArmory/blob/2a6fc5355bb0c6fe26e387ccba30a5baafe8cd98/armorycolors.py#L79-L82
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_core.py
python
Window.GetChildren
(*args, **kwargs)
return _core_.Window_GetChildren(*args, **kwargs)
GetChildren(self) -> WindowList Returns an object containing a list of the window's children. The object provides a Python sequence-like interface over the internal list maintained by the window..
GetChildren(self) -> WindowList
[ "GetChildren", "(", "self", ")", "-", ">", "WindowList" ]
def GetChildren(*args, **kwargs): """ GetChildren(self) -> WindowList Returns an object containing a list of the window's children. The object provides a Python sequence-like interface over the internal list maintained by the window.. """ return _core_.Window_GetChildren(*args, **kwargs)
[ "def", "GetChildren", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "Window_GetChildren", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L10247-L10255
epiqc/ScaffCC
66a79944ee4cd116b27bc1a69137276885461db8
llvm/utils/benchmark/mingw.py
python
repository
(urls = urls, log = EmptyLogger())
return versions
Downloads and parse mingw-build repository files and parses them
Downloads and parse mingw-build repository files and parses them
[ "Downloads", "and", "parse", "mingw", "-", "build", "repository", "files", "and", "parses", "them" ]
def repository(urls = urls, log = EmptyLogger()): ''' Downloads and parse mingw-build repository files and parses them ''' log.info('getting mingw-builds repository') versions = {} re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') re_sub = r'http://downloads.sourceforge.net/project/\1' for url in urls: log.debug(' - requesting: %s', url) socket = request.urlopen(url) repo = socket.read() if not isinstance(repo, str): repo = repo.decode(); socket.close() for entry in repo.split('\n')[:-1]: value = entry.split('|') version = tuple([int(n) for n in value[0].strip().split('.')]) version = versions.setdefault(version, {}) arch = value[1].strip() if arch == 'x32': arch = 'i686' elif arch == 'x64': arch = 'x86_64' arch = version.setdefault(arch, {}) threading = arch.setdefault(value[2].strip(), {}) exceptions = threading.setdefault(value[3].strip(), {}) revision = exceptions.setdefault(int(value[4].strip()[3:]), re_sourceforge.sub(re_sub, value[5].strip())) return versions
[ "def", "repository", "(", "urls", "=", "urls", ",", "log", "=", "EmptyLogger", "(", ")", ")", ":", "log", ".", "info", "(", "'getting mingw-builds repository'", ")", "versions", "=", "{", "}", "re_sourceforge", "=", "re", ".", "compile", "(", "r'http://sourceforge.net/projects/([^/]+)/files'", ")", "re_sub", "=", "r'http://downloads.sourceforge.net/project/\\1'", "for", "url", "in", "urls", ":", "log", ".", "debug", "(", "' - requesting: %s'", ",", "url", ")", "socket", "=", "request", ".", "urlopen", "(", "url", ")", "repo", "=", "socket", ".", "read", "(", ")", "if", "not", "isinstance", "(", "repo", ",", "str", ")", ":", "repo", "=", "repo", ".", "decode", "(", ")", "socket", ".", "close", "(", ")", "for", "entry", "in", "repo", ".", "split", "(", "'\\n'", ")", "[", ":", "-", "1", "]", ":", "value", "=", "entry", ".", "split", "(", "'|'", ")", "version", "=", "tuple", "(", "[", "int", "(", "n", ")", "for", "n", "in", "value", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "'.'", ")", "]", ")", "version", "=", "versions", ".", "setdefault", "(", "version", ",", "{", "}", ")", "arch", "=", "value", "[", "1", "]", ".", "strip", "(", ")", "if", "arch", "==", "'x32'", ":", "arch", "=", "'i686'", "elif", "arch", "==", "'x64'", ":", "arch", "=", "'x86_64'", "arch", "=", "version", ".", "setdefault", "(", "arch", ",", "{", "}", ")", "threading", "=", "arch", ".", "setdefault", "(", "value", "[", "2", "]", ".", "strip", "(", ")", ",", "{", "}", ")", "exceptions", "=", "threading", ".", "setdefault", "(", "value", "[", "3", "]", ".", "strip", "(", ")", ",", "{", "}", ")", "revision", "=", "exceptions", ".", "setdefault", "(", "int", "(", "value", "[", "4", "]", ".", "strip", "(", ")", "[", "3", ":", "]", ")", ",", "re_sourceforge", ".", "sub", "(", "re_sub", ",", "value", "[", "5", "]", ".", "strip", "(", ")", ")", ")", "return", "versions" ]
https://github.com/epiqc/ScaffCC/blob/66a79944ee4cd116b27bc1a69137276885461db8/llvm/utils/benchmark/mingw.py#L55-L84
ChromiumWebApps/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
gpu/command_buffer/build_gles2_cmd_buffer.py
python
Function.MakeTypedInitString
(self, prefix, add_comma = False)
return self.__MaybePrependComma(arg_string, add_comma)
Gets a typed list of arguments as they need to be for cmd Init/Set.
Gets a typed list of arguments as they need to be for cmd Init/Set.
[ "Gets", "a", "typed", "list", "of", "arguments", "as", "they", "need", "to", "be", "for", "cmd", "Init", "/", "Set", "." ]
def MakeTypedInitString(self, prefix, add_comma = False): """Gets a typed list of arguments as they need to be for cmd Init/Set.""" args = self.GetInitArgs() arg_string = ", ".join( ["%s %s%s" % (arg.type, prefix, arg.name) for arg in args]) return self.__MaybePrependComma(arg_string, add_comma)
[ "def", "MakeTypedInitString", "(", "self", ",", "prefix", ",", "add_comma", "=", "False", ")", ":", "args", "=", "self", ".", "GetInitArgs", "(", ")", "arg_string", "=", "\", \"", ".", "join", "(", "[", "\"%s %s%s\"", "%", "(", "arg", ".", "type", ",", "prefix", ",", "arg", ".", "name", ")", "for", "arg", "in", "args", "]", ")", "return", "self", ".", "__MaybePrependComma", "(", "arg_string", ",", "add_comma", ")" ]
https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/gpu/command_buffer/build_gles2_cmd_buffer.py#L6476-L6481
tensorflow/tensorflow
419e3a6b650ea4bd1b0cba23c4348f8a69f3272e
tensorflow/python/debug/lib/grpc_debug_server.py
python
EventListenerBaseServicer.request_unwatch
(self, node_name, output_slot, debug_op)
Request disabling a debug tensor watchpoint or breakpoint. This is the opposite of `request_watch()`. Args: node_name: (`str`) name of the node that the to-be-watched tensor belongs to, e.g., "hidden/Weights". output_slot: (`int`) output slot index of the tensor to watch. debug_op: (`str`) name of the debug op to enable. This should not include any attribute substrings.
Request disabling a debug tensor watchpoint or breakpoint.
[ "Request", "disabling", "a", "debug", "tensor", "watchpoint", "or", "breakpoint", "." ]
def request_unwatch(self, node_name, output_slot, debug_op): """Request disabling a debug tensor watchpoint or breakpoint. This is the opposite of `request_watch()`. Args: node_name: (`str`) name of the node that the to-be-watched tensor belongs to, e.g., "hidden/Weights". output_slot: (`int`) output slot index of the tensor to watch. debug_op: (`str`) name of the debug op to enable. This should not include any attribute substrings. """ self._debug_ops_state_change_queue.put( _state_change( debug_service_pb2.EventReply.DebugOpStateChange.DISABLED, node_name, output_slot, debug_op))
[ "def", "request_unwatch", "(", "self", ",", "node_name", ",", "output_slot", ",", "debug_op", ")", ":", "self", ".", "_debug_ops_state_change_queue", ".", "put", "(", "_state_change", "(", "debug_service_pb2", ".", "EventReply", ".", "DebugOpStateChange", ".", "DISABLED", ",", "node_name", ",", "output_slot", ",", "debug_op", ")", ")" ]
https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/python/debug/lib/grpc_debug_server.py#L419-L434
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scipy/py3/scipy/linalg/decomp_svd.py
python
orth
(A, rcond=None)
return Q
Construct an orthonormal basis for the range of A using SVD Parameters ---------- A : (M, N) array_like Input array rcond : float, optional Relative condition number. Singular values ``s`` smaller than ``rcond * max(s)`` are considered zero. Default: floating point eps * max(M,N). Returns ------- Q : (M, K) ndarray Orthonormal basis for the range of A. K = effective rank of A, as determined by rcond See also -------- svd : Singular value decomposition of a matrix null_space : Matrix null space Examples -------- >>> from scipy.linalg import orth >>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array >>> orth(A) array([[0., 1.], [1., 0.]]) >>> orth(A.T) array([[0., 1.], [1., 0.], [0., 0.]])
Construct an orthonormal basis for the range of A using SVD
[ "Construct", "an", "orthonormal", "basis", "for", "the", "range", "of", "A", "using", "SVD" ]
def orth(A, rcond=None): """ Construct an orthonormal basis for the range of A using SVD Parameters ---------- A : (M, N) array_like Input array rcond : float, optional Relative condition number. Singular values ``s`` smaller than ``rcond * max(s)`` are considered zero. Default: floating point eps * max(M,N). Returns ------- Q : (M, K) ndarray Orthonormal basis for the range of A. K = effective rank of A, as determined by rcond See also -------- svd : Singular value decomposition of a matrix null_space : Matrix null space Examples -------- >>> from scipy.linalg import orth >>> A = np.array([[2, 0, 0], [0, 5, 0]]) # rank 2 array >>> orth(A) array([[0., 1.], [1., 0.]]) >>> orth(A.T) array([[0., 1.], [1., 0.], [0., 0.]]) """ u, s, vh = svd(A, full_matrices=False) M, N = u.shape[0], vh.shape[1] if rcond is None: rcond = numpy.finfo(s.dtype).eps * max(M, N) tol = numpy.amax(s) * rcond num = numpy.sum(s > tol, dtype=int) Q = u[:, :num] return Q
[ "def", "orth", "(", "A", ",", "rcond", "=", "None", ")", ":", "u", ",", "s", ",", "vh", "=", "svd", "(", "A", ",", "full_matrices", "=", "False", ")", "M", ",", "N", "=", "u", ".", "shape", "[", "0", "]", ",", "vh", ".", "shape", "[", "1", "]", "if", "rcond", "is", "None", ":", "rcond", "=", "numpy", ".", "finfo", "(", "s", ".", "dtype", ")", ".", "eps", "*", "max", "(", "M", ",", "N", ")", "tol", "=", "numpy", ".", "amax", "(", "s", ")", "*", "rcond", "num", "=", "numpy", ".", "sum", "(", "s", ">", "tol", ",", "dtype", "=", "int", ")", "Q", "=", "u", "[", ":", ",", ":", "num", "]", "return", "Q" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/py3/scipy/linalg/decomp_svd.py#L286-L330
pmq20/node-packer
12c46c6e44fbc14d9ee645ebd17d5296b324f7e0
current/tools/gyp/pylib/gyp/xcode_emulation.py
python
XcodeArchsDefault._VariableMapping
(self, sdkroot)
Returns the dictionary of variable mapping depending on the SDKROOT.
Returns the dictionary of variable mapping depending on the SDKROOT.
[ "Returns", "the", "dictionary", "of", "variable", "mapping", "depending", "on", "the", "SDKROOT", "." ]
def _VariableMapping(self, sdkroot): """Returns the dictionary of variable mapping depending on the SDKROOT.""" sdkroot = sdkroot.lower() if 'iphoneos' in sdkroot: return self._archs['ios'] elif 'iphonesimulator' in sdkroot: return self._archs['iossim'] else: return self._archs['mac']
[ "def", "_VariableMapping", "(", "self", ",", "sdkroot", ")", ":", "sdkroot", "=", "sdkroot", ".", "lower", "(", ")", "if", "'iphoneos'", "in", "sdkroot", ":", "return", "self", ".", "_archs", "[", "'ios'", "]", "elif", "'iphonesimulator'", "in", "sdkroot", ":", "return", "self", ".", "_archs", "[", "'iossim'", "]", "else", ":", "return", "self", ".", "_archs", "[", "'mac'", "]" ]
https://github.com/pmq20/node-packer/blob/12c46c6e44fbc14d9ee645ebd17d5296b324f7e0/current/tools/gyp/pylib/gyp/xcode_emulation.py#L57-L65
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/tensorflow/python/client/timeline.py
python
_TensorTracker.add_unref
(self, timestamp)
Adds an unref to this tensor with the specified timestamp. Args: timestamp: Timestamp of object unreference as an integer.
Adds an unref to this tensor with the specified timestamp.
[ "Adds", "an", "unref", "to", "this", "tensor", "with", "the", "specified", "timestamp", "." ]
def add_unref(self, timestamp): """Adds an unref to this tensor with the specified timestamp. Args: timestamp: Timestamp of object unreference as an integer. """ self._unref_times.append(timestamp)
[ "def", "add_unref", "(", "self", ",", "timestamp", ")", ":", "self", ".", "_unref_times", ".", "append", "(", "timestamp", ")" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/python/client/timeline.py#L338-L344
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py
python
WrappedSocket._custom_validate
(self, verify, trust_bundle)
Called when we have set custom validation. We do this in two cases: first, when cert validation is entirely disabled; and second, when using a custom trust DB. Raises an SSLError if the connection is not trusted.
Called when we have set custom validation. We do this in two cases: first, when cert validation is entirely disabled; and second, when using a custom trust DB. Raises an SSLError if the connection is not trusted.
[ "Called", "when", "we", "have", "set", "custom", "validation", ".", "We", "do", "this", "in", "two", "cases", ":", "first", "when", "cert", "validation", "is", "entirely", "disabled", ";", "and", "second", "when", "using", "a", "custom", "trust", "DB", ".", "Raises", "an", "SSLError", "if", "the", "connection", "is", "not", "trusted", "." ]
def _custom_validate(self, verify, trust_bundle): """ Called when we have set custom validation. We do this in two cases: first, when cert validation is entirely disabled; and second, when using a custom trust DB. Raises an SSLError if the connection is not trusted. """ # If we disabled cert validation, just say: cool. if not verify: return successes = ( SecurityConst.kSecTrustResultUnspecified, SecurityConst.kSecTrustResultProceed, ) try: trust_result = self._evaluate_trust(trust_bundle) if trust_result in successes: return reason = "error code: %d" % (trust_result,) except Exception as e: # Do not trust on error reason = "exception: %r" % (e,) # SecureTransport does not send an alert nor shuts down the connection. rec = _build_tls_unknown_ca_alert(self.version()) self.socket.sendall(rec) # close the connection immediately # l_onoff = 1, activate linger # l_linger = 0, linger for 0 seoncds opts = struct.pack("ii", 1, 0) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts) self.close() raise ssl.SSLError("certificate verify failed, %s" % reason)
[ "def", "_custom_validate", "(", "self", ",", "verify", ",", "trust_bundle", ")", ":", "# If we disabled cert validation, just say: cool.", "if", "not", "verify", ":", "return", "successes", "=", "(", "SecurityConst", ".", "kSecTrustResultUnspecified", ",", "SecurityConst", ".", "kSecTrustResultProceed", ",", ")", "try", ":", "trust_result", "=", "self", ".", "_evaluate_trust", "(", "trust_bundle", ")", "if", "trust_result", "in", "successes", ":", "return", "reason", "=", "\"error code: %d\"", "%", "(", "trust_result", ",", ")", "except", "Exception", "as", "e", ":", "# Do not trust on error", "reason", "=", "\"exception: %r\"", "%", "(", "e", ",", ")", "# SecureTransport does not send an alert nor shuts down the connection.", "rec", "=", "_build_tls_unknown_ca_alert", "(", "self", ".", "version", "(", ")", ")", "self", ".", "socket", ".", "sendall", "(", "rec", ")", "# close the connection immediately", "# l_onoff = 1, activate linger", "# l_linger = 0, linger for 0 seoncds", "opts", "=", "struct", ".", "pack", "(", "\"ii\"", ",", "1", ",", "0", ")", "self", ".", "socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_LINGER", ",", "opts", ")", "self", ".", "close", "(", ")", "raise", "ssl", ".", "SSLError", "(", "\"certificate verify failed, %s\"", "%", "reason", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py#L397-L430
telefonicaid/fiware-orion
27c3202b9ddcfb9e3635a0af8d373f76e89b1d24
scripts/managedb/upgrade-2.2.0/check_metadata_id.py
python
extract_attr_with_md_id
(attrs)
return r
Given a key-value of attributes, returns a dictionary with all attributes with metadata ID. The key in the dictionary is the attribute name and the value a list of its IDs. If attrs doesn't have any metadata ID, then an empty dictionary will be returned :param attrs: key-value of attribute to process :return: a dictionary as described above
Given a key-value of attributes, returns a dictionary with all attributes with metadata ID. The key in the dictionary is the attribute name and the value a list of its IDs. If attrs doesn't have any metadata ID, then an empty dictionary will be returned
[ "Given", "a", "key", "-", "value", "of", "attributes", "returns", "a", "dictionary", "with", "all", "attributes", "with", "metadata", "ID", ".", "The", "key", "in", "the", "dictionary", "is", "the", "attribute", "name", "and", "the", "value", "a", "list", "of", "its", "IDs", ".", "If", "attrs", "doesn", "t", "have", "any", "metadata", "ID", "then", "an", "empty", "dictionary", "will", "be", "returned" ]
def extract_attr_with_md_id(attrs): """ Given a key-value of attributes, returns a dictionary with all attributes with metadata ID. The key in the dictionary is the attribute name and the value a list of its IDs. If attrs doesn't have any metadata ID, then an empty dictionary will be returned :param attrs: key-value of attribute to process :return: a dictionary as described above """ r = {} for attr in attrs.keys(): # We cannot use split here, as the metadata part could have () separator = attr.find('()') if separator > 0: name = attr[:separator] md_id = attr[separator+2:] if name in r.keys(): r[name].append(md_id) else: r[name] = [md_id] return r
[ "def", "extract_attr_with_md_id", "(", "attrs", ")", ":", "r", "=", "{", "}", "for", "attr", "in", "attrs", ".", "keys", "(", ")", ":", "# We cannot use split here, as the metadata part could have ()", "separator", "=", "attr", ".", "find", "(", "'()'", ")", "if", "separator", ">", "0", ":", "name", "=", "attr", "[", ":", "separator", "]", "md_id", "=", "attr", "[", "separator", "+", "2", ":", "]", "if", "name", "in", "r", ".", "keys", "(", ")", ":", "r", "[", "name", "]", ".", "append", "(", "md_id", ")", "else", ":", "r", "[", "name", "]", "=", "[", "md_id", "]", "return", "r" ]
https://github.com/telefonicaid/fiware-orion/blob/27c3202b9ddcfb9e3635a0af8d373f76e89b1d24/scripts/managedb/upgrade-2.2.0/check_metadata_id.py#L84-L105
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/pandas/py3/pandas/io/formats/excel.py
python
CSSToExcelConverter.__call__
(self, declarations_str: str)
return self.build_xlstyle(properties)
Convert CSS declarations to ExcelWriter style. Parameters ---------- declarations_str : str List of CSS declarations. e.g. "font-weight: bold; background: blue" Returns ------- xlstyle : dict A style as interpreted by ExcelWriter when found in ExcelCell.style.
Convert CSS declarations to ExcelWriter style.
[ "Convert", "CSS", "declarations", "to", "ExcelWriter", "style", "." ]
def __call__(self, declarations_str: str) -> dict[str, dict[str, str]]: """ Convert CSS declarations to ExcelWriter style. Parameters ---------- declarations_str : str List of CSS declarations. e.g. "font-weight: bold; background: blue" Returns ------- xlstyle : dict A style as interpreted by ExcelWriter when found in ExcelCell.style. """ # TODO: memoize? properties = self.compute_css(declarations_str, self.inherited) return self.build_xlstyle(properties)
[ "def", "__call__", "(", "self", ",", "declarations_str", ":", "str", ")", "->", "dict", "[", "str", ",", "dict", "[", "str", ",", "str", "]", "]", ":", "# TODO: memoize?", "properties", "=", "self", ".", "compute_css", "(", "declarations_str", ",", "self", ".", "inherited", ")", "return", "self", ".", "build_xlstyle", "(", "properties", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/pandas/py3/pandas/io/formats/excel.py#L146-L164
zhaoweicai/mscnn
534bcac5710a579d60827f192035f7eef6d8c585
scripts/download_model_binary.py
python
reporthook
(count, block_size, total_size)
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
[ "From", "http", ":", "//", "blog", ".", "moleculea", ".", "com", "/", "2012", "/", "10", "/", "04", "/", "urlretrieve", "-", "progres", "-", "indicator", "/" ]
def reporthook(count, block_size, total_size): """ From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/ """ global start_time if count == 0: start_time = time.time() return duration = (time.time() - start_time) or 0.01 progress_size = int(count * block_size) speed = int(progress_size / (1024 * duration)) percent = int(count * block_size * 100 / total_size) sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" % (percent, progress_size / (1024 * 1024), speed, duration)) sys.stdout.flush()
[ "def", "reporthook", "(", "count", ",", "block_size", ",", "total_size", ")", ":", "global", "start_time", "if", "count", "==", "0", ":", "start_time", "=", "time", ".", "time", "(", ")", "return", "duration", "=", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "or", "0.01", "progress_size", "=", "int", "(", "count", "*", "block_size", ")", "speed", "=", "int", "(", "progress_size", "/", "(", "1024", "*", "duration", ")", ")", "percent", "=", "int", "(", "count", "*", "block_size", "*", "100", "/", "total_size", ")", "sys", ".", "stdout", ".", "write", "(", "\"\\r...%d%%, %d MB, %d KB/s, %d seconds passed\"", "%", "(", "percent", ",", "progress_size", "/", "(", "1024", "*", "1024", ")", ",", "speed", ",", "duration", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")" ]
https://github.com/zhaoweicai/mscnn/blob/534bcac5710a579d60827f192035f7eef6d8c585/scripts/download_model_binary.py#L13-L27
ablab/quast
5f6709528129a6ad266a6b24ef3f40b88f0fe04b
quast_libs/site_packages/joblib2/numpy_pickle.py
python
load
(filename, mmap_mode=None)
return obj
Reconstruct a Python object from a file persisted with joblib.load. Parameters ----------- filename: string The name of the file from which to load the object mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional If not None, the arrays are memory-mapped from the disk. This mode has not effect for compressed files. Note that in this case the reconstructed object might not longer match exactly the originally pickled object. Returns ------- result: any Python object The object stored in the file. See Also -------- joblib.dump : function to save an object Notes ----- This function can load numpy array files saved separately during the dump. If the mmap_mode argument is given, it is passed to np.load and arrays are loaded as memmaps. As a consequence, the reconstructed object might not match the original pickled object. Note that if the file was saved with compression, the arrays cannot be memmaped.
Reconstruct a Python object from a file persisted with joblib.load.
[ "Reconstruct", "a", "Python", "object", "from", "a", "file", "persisted", "with", "joblib", ".", "load", "." ]
def load(filename, mmap_mode=None): """Reconstruct a Python object from a file persisted with joblib.load. Parameters ----------- filename: string The name of the file from which to load the object mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional If not None, the arrays are memory-mapped from the disk. This mode has not effect for compressed files. Note that in this case the reconstructed object might not longer match exactly the originally pickled object. Returns ------- result: any Python object The object stored in the file. See Also -------- joblib.dump : function to save an object Notes ----- This function can load numpy array files saved separately during the dump. If the mmap_mode argument is given, it is passed to np.load and arrays are loaded as memmaps. As a consequence, the reconstructed object might not match the original pickled object. Note that if the file was saved with compression, the arrays cannot be memmaped. """ file_handle = open(filename, 'rb') # We are careful to open the file hanlde early and keep it open to # avoid race-conditions on renames. That said, if data are stored in # companion files, moving the directory will create a race when # joblib tries to access the companion files. if _read_magic(file_handle) == _ZFILE_PREFIX: if mmap_mode is not None: warnings.warn('file "%(filename)s" appears to be a zip, ' 'ignoring mmap_mode "%(mmap_mode)s" flag passed' % locals(), Warning, stacklevel=2) unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle) else: unpickler = NumpyUnpickler(filename, file_handle=file_handle, mmap_mode=mmap_mode) try: obj = unpickler.load() finally: if hasattr(unpickler, 'file_handle'): unpickler.file_handle.close() return obj
[ "def", "load", "(", "filename", ",", "mmap_mode", "=", "None", ")", ":", "file_handle", "=", "open", "(", "filename", ",", "'rb'", ")", "# We are careful to open the file hanlde early and keep it open to", "# avoid race-conditions on renames. That said, if data are stored in", "# companion files, moving the directory will create a race when", "# joblib tries to access the companion files.", "if", "_read_magic", "(", "file_handle", ")", "==", "_ZFILE_PREFIX", ":", "if", "mmap_mode", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'file \"%(filename)s\" appears to be a zip, '", "'ignoring mmap_mode \"%(mmap_mode)s\" flag passed'", "%", "locals", "(", ")", ",", "Warning", ",", "stacklevel", "=", "2", ")", "unpickler", "=", "ZipNumpyUnpickler", "(", "filename", ",", "file_handle", "=", "file_handle", ")", "else", ":", "unpickler", "=", "NumpyUnpickler", "(", "filename", ",", "file_handle", "=", "file_handle", ",", "mmap_mode", "=", "mmap_mode", ")", "try", ":", "obj", "=", "unpickler", ".", "load", "(", ")", "finally", ":", "if", "hasattr", "(", "unpickler", ",", "'file_handle'", ")", ":", "unpickler", ".", "file_handle", ".", "close", "(", ")", "return", "obj" ]
https://github.com/ablab/quast/blob/5f6709528129a6ad266a6b24ef3f40b88f0fe04b/quast_libs/site_packages/joblib2/numpy_pickle.py#L370-L422
rodeofx/OpenWalter
6116fbe3f04f1146c854afbfbdbe944feaee647e
walter/maya/scripts/walterPanel/walterOutliner.py
python
TreeViewDelegate.drawArrowDragLock
(self, painter, rect, index)
Draw the expansion arrow on the nodes that want it.
Draw the expansion arrow on the nodes that want it.
[ "Draw", "the", "expansion", "arrow", "on", "the", "nodes", "that", "want", "it", "." ]
def drawArrowDragLock(self, painter, rect, index): """Draw the expansion arrow on the nodes that want it.""" painter.save() arrow = None if index.data(CHILD_COUNT): center = index.data(QtCore.Qt.SizeHintRole).height() / 2 painter.translate(rect.left(), rect.top() + center) # Draw the arrow if self.treeView().isExpanded(index): arrow = self.EXPANDED_ARROW else: arrow = self.COLLAPSED_ARROW painter.setBrush(self.ARROW_COLOR) painter.setPen(QtCore.Qt.NoPen) painter.drawPolygon(arrow) cursorPosition = self.treeView().mapFromGlobal(QtGui.QCursor.pos()) if rect.contains(cursorPosition): x = cursorPosition.x() arrowPoints = [p.x() for p in arrow] minX = min(arrowPoints) + rect.left() - EXPAND_SENSITIVITY maxX = max(arrowPoints) + rect.left() + EXPAND_SENSITIVITY if x >= minX and x <= maxX: # Save the action to expand self.lastHitAction = TreeItem.ACTION_EXPAND painter.restore()
[ "def", "drawArrowDragLock", "(", "self", ",", "painter", ",", "rect", ",", "index", ")", ":", "painter", ".", "save", "(", ")", "arrow", "=", "None", "if", "index", ".", "data", "(", "CHILD_COUNT", ")", ":", "center", "=", "index", ".", "data", "(", "QtCore", ".", "Qt", ".", "SizeHintRole", ")", ".", "height", "(", ")", "/", "2", "painter", ".", "translate", "(", "rect", ".", "left", "(", ")", ",", "rect", ".", "top", "(", ")", "+", "center", ")", "# Draw the arrow", "if", "self", ".", "treeView", "(", ")", ".", "isExpanded", "(", "index", ")", ":", "arrow", "=", "self", ".", "EXPANDED_ARROW", "else", ":", "arrow", "=", "self", ".", "COLLAPSED_ARROW", "painter", ".", "setBrush", "(", "self", ".", "ARROW_COLOR", ")", "painter", ".", "setPen", "(", "QtCore", ".", "Qt", ".", "NoPen", ")", "painter", ".", "drawPolygon", "(", "arrow", ")", "cursorPosition", "=", "self", ".", "treeView", "(", ")", ".", "mapFromGlobal", "(", "QtGui", ".", "QCursor", ".", "pos", "(", ")", ")", "if", "rect", ".", "contains", "(", "cursorPosition", ")", ":", "x", "=", "cursorPosition", ".", "x", "(", ")", "arrowPoints", "=", "[", "p", ".", "x", "(", ")", "for", "p", "in", "arrow", "]", "minX", "=", "min", "(", "arrowPoints", ")", "+", "rect", ".", "left", "(", ")", "-", "EXPAND_SENSITIVITY", "maxX", "=", "max", "(", "arrowPoints", ")", "+", "rect", ".", "left", "(", ")", "+", "EXPAND_SENSITIVITY", "if", "x", ">=", "minX", "and", "x", "<=", "maxX", ":", "# Save the action to expand", "self", ".", "lastHitAction", "=", "TreeItem", ".", "ACTION_EXPAND", "painter", ".", "restore", "(", ")" ]
https://github.com/rodeofx/OpenWalter/blob/6116fbe3f04f1146c854afbfbdbe944feaee647e/walter/maya/scripts/walterPanel/walterOutliner.py#L1627-L1655
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Node/FS.py
python
Dir.srcnode
(self)
return Base.srcnode(self)
Dir has a special need for srcnode()...if we have a srcdir attribute set, then that *is* our srcnode.
Dir has a special need for srcnode()...if we have a srcdir attribute set, then that *is* our srcnode.
[ "Dir", "has", "a", "special", "need", "for", "srcnode", "()", "...", "if", "we", "have", "a", "srcdir", "attribute", "set", "then", "that", "*", "is", "*", "our", "srcnode", "." ]
def srcnode(self): """Dir has a special need for srcnode()...if we have a srcdir attribute set, then that *is* our srcnode.""" if self.srcdir: return self.srcdir return Base.srcnode(self)
[ "def", "srcnode", "(", "self", ")", ":", "if", "self", ".", "srcdir", ":", "return", "self", ".", "srcdir", "return", "Base", ".", "srcnode", "(", "self", ")" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Node/FS.py#L1899-L1904
arangodb/arangodb
0d658689c7d1b721b314fa3ca27d38303e1570c8
3rdParty/boost/1.78.0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py
python
fix_input_files
(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False)
Fixes source- and header-files used as input when pre-processing MPL-containers.
Fixes source- and header-files used as input when pre-processing MPL-containers.
[ "Fixes", "source", "-", "and", "header", "-", "files", "used", "as", "input", "when", "pre", "-", "processing", "MPL", "-", "containers", "." ]
def fix_input_files(headerDir, sourceDir, containers=['vector', 'list', 'set', 'map'], seqType='both', verbose=False): """Fixes source- and header-files used as input when pre-processing MPL-containers.""" # The new modification time. timestamp = datetime.datetime.now(); # Fix the input files for containers in their variadic form. if seqType == "both" or seqType == "variadic": if verbose: print "Fix input files for pre-processing Boost.MPL variadic containers." fix_input_files_for_variadic_seq(headerDir, sourceDir, timestamp) # Fix the input files for containers in their numbered form. if seqType == "both" or seqType == "numbered": if verbose: print "Fix input files for pre-processing Boost.MPL numbered containers." fix_input_files_for_numbered_seq(headerDir, ".hpp", timestamp, containers) fix_input_files_for_numbered_seq(sourceDir, ".cpp", timestamp, containers)
[ "def", "fix_input_files", "(", "headerDir", ",", "sourceDir", ",", "containers", "=", "[", "'vector'", ",", "'list'", ",", "'set'", ",", "'map'", "]", ",", "seqType", "=", "'both'", ",", "verbose", "=", "False", ")", ":", "# The new modification time.", "timestamp", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "# Fix the input files for containers in their variadic form.", "if", "seqType", "==", "\"both\"", "or", "seqType", "==", "\"variadic\"", ":", "if", "verbose", ":", "print", "\"Fix input files for pre-processing Boost.MPL variadic containers.\"", "fix_input_files_for_variadic_seq", "(", "headerDir", ",", "sourceDir", ",", "timestamp", ")", "# Fix the input files for containers in their numbered form.", "if", "seqType", "==", "\"both\"", "or", "seqType", "==", "\"numbered\"", ":", "if", "verbose", ":", "print", "\"Fix input files for pre-processing Boost.MPL numbered containers.\"", "fix_input_files_for_numbered_seq", "(", "headerDir", ",", "\".hpp\"", ",", "timestamp", ",", "containers", ")", "fix_input_files_for_numbered_seq", "(", "sourceDir", ",", "\".cpp\"", ",", "timestamp", ",", "containers", ")" ]
https://github.com/arangodb/arangodb/blob/0d658689c7d1b721b314fa3ca27d38303e1570c8/3rdParty/boost/1.78.0/libs/mpl/preprocessed/fix_boost_mpl_preprocess.py#L123-L138
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/pydoc.py
python
pager
(text)
The first time this is called, determine what kind of pager to use.
The first time this is called, determine what kind of pager to use.
[ "The", "first", "time", "this", "is", "called", "determine", "what", "kind", "of", "pager", "to", "use", "." ]
def pager(text): """The first time this is called, determine what kind of pager to use.""" global pager pager = getpager() pager(text)
[ "def", "pager", "(", "text", ")", ":", "global", "pager", "pager", "=", "getpager", "(", ")", "pager", "(", "text", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/pydoc.py#L1444-L1448
nasa/fprime
595cf3682d8365943d86c1a6fe7c78f0a116acf0
Autocoders/Python/src/fprime_ac/utils/pyparsing.py
python
Regex.__init__
(self, pattern, flags=0)
The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.
The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.
[ "The", "parameters", "pattern", "and", "flags", "are", "passed", "to", "the", "re", ".", "compile", "()", "function", "as", "-", "is", ".", "See", "the", "Python", "re", "module", "for", "an", "explanation", "of", "the", "acceptable", "patterns", "and", "flags", "." ]
def __init__(self, pattern, flags=0): """The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags.""" super().__init__() if len(pattern) == 0: warnings.warn( "null string passed to Regex; use Empty() instead", SyntaxWarning, stacklevel=2, ) self.pattern = pattern self.flags = flags try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error as e: warnings.warn( "invalid pattern (%s) passed to Regex" % pattern, SyntaxWarning, stacklevel=2, ) raise self.name = _ustr(self) self.errmsg = "Expected " + self.name self.myException.msg = self.errmsg self.mayIndexError = False self.mayReturnEmpty = True
[ "def", "__init__", "(", "self", ",", "pattern", ",", "flags", "=", "0", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "if", "len", "(", "pattern", ")", "==", "0", ":", "warnings", ".", "warn", "(", "\"null string passed to Regex; use Empty() instead\"", ",", "SyntaxWarning", ",", "stacklevel", "=", "2", ",", ")", "self", ".", "pattern", "=", "pattern", "self", ".", "flags", "=", "flags", "try", ":", "self", ".", "re", "=", "re", ".", "compile", "(", "self", ".", "pattern", ",", "self", ".", "flags", ")", "self", ".", "reString", "=", "self", ".", "pattern", "except", "sre_constants", ".", "error", "as", "e", ":", "warnings", ".", "warn", "(", "\"invalid pattern (%s) passed to Regex\"", "%", "pattern", ",", "SyntaxWarning", ",", "stacklevel", "=", "2", ",", ")", "raise", "self", ".", "name", "=", "_ustr", "(", "self", ")", "self", ".", "errmsg", "=", "\"Expected \"", "+", "self", ".", "name", "self", ".", "myException", ".", "msg", "=", "self", ".", "errmsg", "self", ".", "mayIndexError", "=", "False", "self", ".", "mayReturnEmpty", "=", "True" ]
https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/utils/pyparsing.py#L1501-L1530
apache/incubator-mxnet
f03fb23f1d103fec9541b5ae59ee06b1734a51d9
python/mxnet/gluon/probability/block/stochastic_block.py
python
StochasticBlock.collectLoss
(func)
return inner
To accumulate loss during the forward phase, one could first decorate forward with `StochasticBlock.collectLoss, and then collect the loss tensor `x` by calling self.add_loss(x). For example, in the following forward function, we generate samples from a Gaussian parameterized by `loc` and `scale` and accumulate the KL-divergence between it and its prior into the block's loss storage.: @StochasticBlock.collectLoss def forward(self, loc, scale): qz = mgp.Normal(loc, scale) # prior pz = mgp.Normal(np.zeros_like(loc), np.ones_like(scale)) self.add_loss(mgp.kl_divergence(qz, pz)) return qz.sample()
To accumulate loss during the forward phase, one could first decorate forward with `StochasticBlock.collectLoss, and then collect the loss tensor `x` by calling self.add_loss(x). For example, in the following forward function, we generate samples from a Gaussian parameterized by `loc` and `scale` and accumulate the KL-divergence between it and its prior into the block's loss storage.:
[ "To", "accumulate", "loss", "during", "the", "forward", "phase", "one", "could", "first", "decorate", "forward", "with", "StochasticBlock", ".", "collectLoss", "and", "then", "collect", "the", "loss", "tensor", "x", "by", "calling", "self", ".", "add_loss", "(", "x", ")", ".", "For", "example", "in", "the", "following", "forward", "function", "we", "generate", "samples", "from", "a", "Gaussian", "parameterized", "by", "loc", "and", "scale", "and", "accumulate", "the", "KL", "-", "divergence", "between", "it", "and", "its", "prior", "into", "the", "block", "s", "loss", "storage", ".", ":" ]
def collectLoss(func): """To accumulate loss during the forward phase, one could first decorate forward with `StochasticBlock.collectLoss, and then collect the loss tensor `x` by calling self.add_loss(x). For example, in the following forward function, we generate samples from a Gaussian parameterized by `loc` and `scale` and accumulate the KL-divergence between it and its prior into the block's loss storage.: @StochasticBlock.collectLoss def forward(self, loc, scale): qz = mgp.Normal(loc, scale) # prior pz = mgp.Normal(np.zeros_like(loc), np.ones_like(scale)) self.add_loss(mgp.kl_divergence(qz, pz)) return qz.sample() """ @wraps(func) def inner(self, *args, **kwargs): # Loss from forward func_out = func(self, *args, **kwargs) collected_loss = self._losscache self._losscache = [] self._flag = True return (func_out, collected_loss) return inner
[ "def", "collectLoss", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Loss from forward", "func_out", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "collected_loss", "=", "self", ".", "_losscache", "self", ".", "_losscache", "=", "[", "]", "self", ".", "_flag", "=", "True", "return", "(", "func_out", ",", "collected_loss", ")", "return", "inner" ]
https://github.com/apache/incubator-mxnet/blob/f03fb23f1d103fec9541b5ae59ee06b1734a51d9/python/mxnet/gluon/probability/block/stochastic_block.py#L46-L70
balint256/gr-baz
937834ce3520b730277328d8e0cdebb3f2b1aafc
python/waterfall_window.py
python
control_panel.__init__
(self, parent)
Create a new control panel. Args: parent: the wx parent window
Create a new control panel. Args: parent: the wx parent window
[ "Create", "a", "new", "control", "panel", ".", "Args", ":", "parent", ":", "the", "wx", "parent", "window" ]
def __init__(self, parent): """ Create a new control panel. Args: parent: the wx parent window """ self.parent = parent wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER) parent[SHOW_CONTROL_PANEL_KEY] = True parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show) control_box = wx.BoxSizer(wx.VERTICAL) control_box.AddStretchSpacer() options_box = forms.static_box_sizer( parent=self, sizer=control_box, label='Options', bold=True, orient=wx.VERTICAL, ) #average #forms.check_box( # sizer=options_box, parent=self, label='Average', # ps=parent, key=AVERAGE_KEY, #) #avg_alpha_text = forms.static_text( # sizer=options_box, parent=self, label='Avg Alpha', # converter=forms.float_converter(lambda x: '%.4f'%x), # ps=parent, key=AVG_ALPHA_KEY, width=50, #) #avg_alpha_slider = forms.log_slider( # sizer=options_box, parent=self, # min_exp=AVG_ALPHA_MIN_EXP, # max_exp=AVG_ALPHA_MAX_EXP, # num_steps=SLIDER_STEPS, # ps=parent, key=AVG_ALPHA_KEY, #) #for widget in (avg_alpha_text, avg_alpha_slider): # parent.subscribe(AVERAGE_KEY, widget.Enable) # widget.Enable(parent[AVERAGE_KEY]) #begin axes box control_box.AddStretchSpacer() axes_box = forms.static_box_sizer( parent=self, sizer=control_box, label='Axes Options', bold=True, orient=wx.VERTICAL, ) #num lines buttons #forms.incr_decr_buttons( # parent=self, sizer=axes_box, label='Time Scale', # on_incr=self._on_incr_time_scale, on_decr=self._on_decr_time_scale, #) #dyanmic range buttons forms.incr_decr_buttons( parent=self, sizer=axes_box, label='Dyn Range', on_incr=self._on_incr_dynamic_range, on_decr=self._on_decr_dynamic_range, ) #ref lvl buttons forms.incr_decr_buttons( parent=self, sizer=axes_box, label='Ref Level', on_incr=self._on_incr_ref_level, on_decr=self._on_decr_ref_level, ) #color mode forms.drop_down( parent=self, sizer=axes_box, width=100, ps=parent, key=COLOR_MODE_KEY, label='Color', choices=map(lambda x: x[1], COLOR_MODES), labels=map(lambda x: x[0], COLOR_MODES), ) #autoscale forms.single_button( parent=self, sizer=axes_box, label='Autoscale', callback=self.parent.autoscale, ) #clear control_box.AddStretchSpacer() forms.single_button( parent=self, sizer=control_box, label='Clear', callback=self._on_clear_button, ) #run/stop forms.toggle_button( sizer=control_box, parent=self, true_label='Stop', false_label='Run', ps=parent, key=RUNNING_KEY, ) #set sizer self.SetSizerAndFit(control_box)
[ "def", "__init__", "(", "self", ",", "parent", ")", ":", "self", ".", "parent", "=", "parent", "wx", ".", "Panel", ".", "__init__", "(", "self", ",", "parent", ",", "style", "=", "wx", ".", "SUNKEN_BORDER", ")", "parent", "[", "SHOW_CONTROL_PANEL_KEY", "]", "=", "True", "parent", ".", "subscribe", "(", "SHOW_CONTROL_PANEL_KEY", ",", "self", ".", "Show", ")", "control_box", "=", "wx", ".", "BoxSizer", "(", "wx", ".", "VERTICAL", ")", "control_box", ".", "AddStretchSpacer", "(", ")", "options_box", "=", "forms", ".", "static_box_sizer", "(", "parent", "=", "self", ",", "sizer", "=", "control_box", ",", "label", "=", "'Options'", ",", "bold", "=", "True", ",", "orient", "=", "wx", ".", "VERTICAL", ",", ")", "#average", "#forms.check_box(", "#\tsizer=options_box, parent=self, label='Average',", "#\tps=parent, key=AVERAGE_KEY,", "#)", "#avg_alpha_text = forms.static_text(", "#\tsizer=options_box, parent=self, label='Avg Alpha',", "#\tconverter=forms.float_converter(lambda x: '%.4f'%x),", "#\tps=parent, key=AVG_ALPHA_KEY, width=50,", "#)", "#avg_alpha_slider = forms.log_slider(", "#\tsizer=options_box, parent=self,", "#\tmin_exp=AVG_ALPHA_MIN_EXP,", "#\tmax_exp=AVG_ALPHA_MAX_EXP,", "#\tnum_steps=SLIDER_STEPS,", "#\tps=parent, key=AVG_ALPHA_KEY,", "#)", "#for widget in (avg_alpha_text, avg_alpha_slider):", "#\tparent.subscribe(AVERAGE_KEY, widget.Enable)", "#\twidget.Enable(parent[AVERAGE_KEY])", "#begin axes box", "control_box", ".", "AddStretchSpacer", "(", ")", "axes_box", "=", "forms", ".", "static_box_sizer", "(", "parent", "=", "self", ",", "sizer", "=", "control_box", ",", "label", "=", "'Axes Options'", ",", "bold", "=", "True", ",", "orient", "=", "wx", ".", "VERTICAL", ",", ")", "#num lines buttons", "#forms.incr_decr_buttons(", "#\tparent=self, sizer=axes_box, label='Time Scale',", "#\ton_incr=self._on_incr_time_scale, on_decr=self._on_decr_time_scale,", "#)", "#dyanmic range buttons", "forms", ".", "incr_decr_buttons", "(", "parent", "=", "self", ",", "sizer", "=", "axes_box", ",", "label", "=", "'Dyn Range'", ",", "on_incr", "=", "self", ".", "_on_incr_dynamic_range", ",", "on_decr", "=", "self", ".", "_on_decr_dynamic_range", ",", ")", "#ref lvl buttons", "forms", ".", "incr_decr_buttons", "(", "parent", "=", "self", ",", "sizer", "=", "axes_box", ",", "label", "=", "'Ref Level'", ",", "on_incr", "=", "self", ".", "_on_incr_ref_level", ",", "on_decr", "=", "self", ".", "_on_decr_ref_level", ",", ")", "#color mode", "forms", ".", "drop_down", "(", "parent", "=", "self", ",", "sizer", "=", "axes_box", ",", "width", "=", "100", ",", "ps", "=", "parent", ",", "key", "=", "COLOR_MODE_KEY", ",", "label", "=", "'Color'", ",", "choices", "=", "map", "(", "lambda", "x", ":", "x", "[", "1", "]", ",", "COLOR_MODES", ")", ",", "labels", "=", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "COLOR_MODES", ")", ",", ")", "#autoscale", "forms", ".", "single_button", "(", "parent", "=", "self", ",", "sizer", "=", "axes_box", ",", "label", "=", "'Autoscale'", ",", "callback", "=", "self", ".", "parent", ".", "autoscale", ",", ")", "#clear", "control_box", ".", "AddStretchSpacer", "(", ")", "forms", ".", "single_button", "(", "parent", "=", "self", ",", "sizer", "=", "control_box", ",", "label", "=", "'Clear'", ",", "callback", "=", "self", ".", "_on_clear_button", ",", ")", "#run/stop", "forms", ".", "toggle_button", "(", "sizer", "=", "control_box", ",", "parent", "=", "self", ",", "true_label", "=", "'Stop'", ",", "false_label", "=", "'Run'", ",", "ps", "=", "parent", ",", "key", "=", "RUNNING_KEY", ",", ")", "#set sizer", "self", ".", "SetSizerAndFit", "(", "control_box", ")" ]
https://github.com/balint256/gr-baz/blob/937834ce3520b730277328d8e0cdebb3f2b1aafc/python/waterfall_window.py#L59-L142
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
direct/src/directtools/DirectSelection.py
python
SelectedNodePaths.getSelectedDict
(self, id)
Search selectedDict for node path, try to repair broken node paths.
Search selectedDict for node path, try to repair broken node paths.
[ "Search", "selectedDict", "for", "node", "path", "try", "to", "repair", "broken", "node", "paths", "." ]
def getSelectedDict(self, id): """ Search selectedDict for node path, try to repair broken node paths. """ dnp = self.selectedDict.get(id, None) if dnp: return dnp else: # Not in selected dictionary return None
[ "def", "getSelectedDict", "(", "self", ",", "id", ")", ":", "dnp", "=", "self", ".", "selectedDict", ".", "get", "(", "id", ",", "None", ")", "if", "dnp", ":", "return", "dnp", "else", ":", "# Not in selected dictionary", "return", "None" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/directtools/DirectSelection.py#L152-L161
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AzCodeGenerator/bin/osx/az_code_gen/clang_cpp.py
python
build_tree_from_string
(formatted_data)
return result
Recursively builds a dictionary tree out of a string using the following rules: Parameter lists: Token(Arguments...) become Dictionary entries: Token:Arguments... Any parameter list that is a string or list of strings gets saved as such. This will return a dictionary (even if given a simple string), unless given a list of strings. The is_simple_string and is_list detection functions and convert_key_to_string and convert_keys_to_list functions can be used to process the results further if necessary. @param formatted_data - A string of data formatted by the AZCodeGenerator C++ Parser @return A dictionary containing all the data parsed out of the incoming string.
Recursively builds a dictionary tree out of a string using the following rules: Parameter lists: Token(Arguments...) become Dictionary entries: Token:Arguments... Any parameter list that is a string or list of strings gets saved as such. This will return a dictionary (even if given a simple string), unless given a list of strings. The is_simple_string and is_list detection functions and convert_key_to_string and convert_keys_to_list functions can be used to process the results further if necessary.
[ "Recursively", "builds", "a", "dictionary", "tree", "out", "of", "a", "string", "using", "the", "following", "rules", ":", "Parameter", "lists", ":", "Token", "(", "Arguments", "...", ")", "become", "Dictionary", "entries", ":", "Token", ":", "Arguments", "...", "Any", "parameter", "list", "that", "is", "a", "string", "or", "list", "of", "strings", "gets", "saved", "as", "such", ".", "This", "will", "return", "a", "dictionary", "(", "even", "if", "given", "a", "simple", "string", ")", "unless", "given", "a", "list", "of", "strings", ".", "The", "is_simple_string", "and", "is_list", "detection", "functions", "and", "convert_key_to_string", "and", "convert_keys_to_list", "functions", "can", "be", "used", "to", "process", "the", "results", "further", "if", "necessary", "." ]
def build_tree_from_string(formatted_data): """ Recursively builds a dictionary tree out of a string using the following rules: Parameter lists: Token(Arguments...) become Dictionary entries: Token:Arguments... Any parameter list that is a string or list of strings gets saved as such. This will return a dictionary (even if given a simple string), unless given a list of strings. The is_simple_string and is_list detection functions and convert_key_to_string and convert_keys_to_list functions can be used to process the results further if necessary. @param formatted_data - A string of data formatted by the AZCodeGenerator C++ Parser @return A dictionary containing all the data parsed out of the incoming string. """ # must maintain key order, in case we convert the keys to a list result = OrderedDict() # The AZCodeGen system can produce empty parameter lists # early detection will prevent python crashes if formatted_data is None: return None # This seems to happen when someone adds multiple of the same tag if not isinstance(formatted_data, str) and not isinstance(formatted_data, unicode): raise TypeError('Expecting string value, got {} ({})\nDid you add multiple of the same tag to an annotation?'.format(type(formatted_data), formatted_data)) # remove external whitespace formatted_data = formatted_data.strip() # strip surrounding {}s used in initializer_list<>s if formatted_data and formatted_data[0] == '{': formatted_data = re.sub(r'^\{(.+)\}$', r'\1', formatted_data) # remove internal whitespace formatted_data = formatted_data.strip() # The AZCodeGen system can produce empty strings as parameter lists # Early detection will speed up execution if not formatted_data: return None # AZCodeGen allows spaces, commas, and tabs to be used as major # delimiters # in lists delimiters = " ,\t" # terminal case (Simple Token) if is_simple_token(formatted_data, delimiters): result[formatted_data] = None # recursive case (New Parameter) elif is_token_and_params(formatted_data, delimiters): tag = extract_tag(formatted_data, delimiters) params = build_tree_from_string(extract_parameters(formatted_data)) template_params = extract_template_params(formatted_data) # Determine if we can flatten the parameter list into a string # or a list of strings if not isinstance(params, list): if is_simple_string(params): params = convert_key_to_string(params) elif is_list(params): params = convert_keys_to_list(params) if template_params: result[tag] = { 'params': params , 'template_params': template_params, } else: result[tag] = params # There are multiple tokens At This Level. # Separate and parse them individually. else: args = split_on_major_delimiter(formatted_data, delimiters) trees = [] for arg in args: tree = build_tree_from_string(arg) trees.append(tree) # intercept simple lists immediately (this is necessary to prevent duplicate list items from being coalesced # into a dictionary) if all(is_simple_string(tree) for tree in trees): return [convert_key_to_string(tree) for tree in trees] # pre-format the result dict, if there are multiple values at a key # ensure that the result value will be a list for tree in trees: for tag, value in tree.items(): if tag in result and not isinstance(result[tag], list): result[tag] = [] elif tag not in result: result[tag] = None # coalesce value trees into the result for tree in trees: for tag, value in tree.items(): if isinstance(result[tag], list): result[tag].append(value) else: result[tag] = value return result
[ "def", "build_tree_from_string", "(", "formatted_data", ")", ":", "# must maintain key order, in case we convert the keys to a list", "result", "=", "OrderedDict", "(", ")", "# The AZCodeGen system can produce empty parameter lists", "# early detection will prevent python crashes", "if", "formatted_data", "is", "None", ":", "return", "None", "# This seems to happen when someone adds multiple of the same tag", "if", "not", "isinstance", "(", "formatted_data", ",", "str", ")", "and", "not", "isinstance", "(", "formatted_data", ",", "unicode", ")", ":", "raise", "TypeError", "(", "'Expecting string value, got {} ({})\\nDid you add multiple of the same tag to an annotation?'", ".", "format", "(", "type", "(", "formatted_data", ")", ",", "formatted_data", ")", ")", "# remove external whitespace", "formatted_data", "=", "formatted_data", ".", "strip", "(", ")", "# strip surrounding {}s used in initializer_list<>s", "if", "formatted_data", "and", "formatted_data", "[", "0", "]", "==", "'{'", ":", "formatted_data", "=", "re", ".", "sub", "(", "r'^\\{(.+)\\}$'", ",", "r'\\1'", ",", "formatted_data", ")", "# remove internal whitespace", "formatted_data", "=", "formatted_data", ".", "strip", "(", ")", "# The AZCodeGen system can produce empty strings as parameter lists", "# Early detection will speed up execution", "if", "not", "formatted_data", ":", "return", "None", "# AZCodeGen allows spaces, commas, and tabs to be used as major", "# delimiters", "# in lists", "delimiters", "=", "\" ,\\t\"", "# terminal case (Simple Token)", "if", "is_simple_token", "(", "formatted_data", ",", "delimiters", ")", ":", "result", "[", "formatted_data", "]", "=", "None", "# recursive case (New Parameter)", "elif", "is_token_and_params", "(", "formatted_data", ",", "delimiters", ")", ":", "tag", "=", "extract_tag", "(", "formatted_data", ",", "delimiters", ")", "params", "=", "build_tree_from_string", "(", "extract_parameters", "(", "formatted_data", ")", ")", "template_params", "=", "extract_template_params", "(", "formatted_data", ")", "# Determine if we can flatten the parameter list into a string", "# or a list of strings", "if", "not", "isinstance", "(", "params", ",", "list", ")", ":", "if", "is_simple_string", "(", "params", ")", ":", "params", "=", "convert_key_to_string", "(", "params", ")", "elif", "is_list", "(", "params", ")", ":", "params", "=", "convert_keys_to_list", "(", "params", ")", "if", "template_params", ":", "result", "[", "tag", "]", "=", "{", "'params'", ":", "params", ",", "'template_params'", ":", "template_params", ",", "}", "else", ":", "result", "[", "tag", "]", "=", "params", "# There are multiple tokens At This Level.", "# Separate and parse them individually.", "else", ":", "args", "=", "split_on_major_delimiter", "(", "formatted_data", ",", "delimiters", ")", "trees", "=", "[", "]", "for", "arg", "in", "args", ":", "tree", "=", "build_tree_from_string", "(", "arg", ")", "trees", ".", "append", "(", "tree", ")", "# intercept simple lists immediately (this is necessary to prevent duplicate list items from being coalesced", "# into a dictionary)", "if", "all", "(", "is_simple_string", "(", "tree", ")", "for", "tree", "in", "trees", ")", ":", "return", "[", "convert_key_to_string", "(", "tree", ")", "for", "tree", "in", "trees", "]", "# pre-format the result dict, if there are multiple values at a key", "# ensure that the result value will be a list", "for", "tree", "in", "trees", ":", "for", "tag", ",", "value", "in", "tree", ".", "items", "(", ")", ":", "if", "tag", "in", "result", "and", "not", "isinstance", "(", "result", "[", "tag", "]", ",", "list", ")", ":", "result", "[", "tag", "]", "=", "[", "]", "elif", "tag", "not", "in", "result", ":", "result", "[", "tag", "]", "=", "None", "# coalesce value trees into the result", "for", "tree", "in", "trees", ":", "for", "tag", ",", "value", "in", "tree", ".", "items", "(", ")", ":", "if", "isinstance", "(", "result", "[", "tag", "]", ",", "list", ")", ":", "result", "[", "tag", "]", ".", "append", "(", "value", ")", "else", ":", "result", "[", "tag", "]", "=", "value", "return", "result" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AzCodeGenerator/bin/osx/az_code_gen/clang_cpp.py#L236-L337
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/gtk/_misc.py
python
MutexGuiLeave
(*args)
return _misc_.MutexGuiLeave(*args)
MutexGuiLeave()
MutexGuiLeave()
[ "MutexGuiLeave", "()" ]
def MutexGuiLeave(*args): """MutexGuiLeave()""" return _misc_.MutexGuiLeave(*args)
[ "def", "MutexGuiLeave", "(", "*", "args", ")", ":", "return", "_misc_", ".", "MutexGuiLeave", "(", "*", "args", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/_misc.py#L635-L637
microsoft/onnxruntime
f92e47e95b13a240e37caf7b36577983544f98fc
orttraining/orttraining/python/training/ortmodule/_torch_module_ort.py
python
TorchModuleORT.parameters
(self, recurse: bool = True)
Override original method to delegate execution to the original PyTorch user module
Override original method to delegate execution to the original PyTorch user module
[ "Override", "original", "method", "to", "delegate", "execution", "to", "the", "original", "PyTorch", "user", "module" ]
def parameters(self, recurse: bool = True) -> Iterator[torch.nn.Parameter]: """Override original method to delegate execution to the original PyTorch user module""" yield from self._original_module.parameters(recurse=recurse)
[ "def", "parameters", "(", "self", ",", "recurse", ":", "bool", "=", "True", ")", "->", "Iterator", "[", "torch", ".", "nn", ".", "Parameter", "]", ":", "yield", "from", "self", ".", "_original_module", ".", "parameters", "(", "recurse", "=", "recurse", ")" ]
https://github.com/microsoft/onnxruntime/blob/f92e47e95b13a240e37caf7b36577983544f98fc/orttraining/orttraining/python/training/ortmodule/_torch_module_ort.py#L91-L94
mapnik/mapnik
f3da900c355e1d15059c4a91b00203dcc9d9f0ef
scons/scons-local-4.1.0/SCons/Variables/__init__.py
python
Variables.Save
(self, filename, env)
Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file. filename - Name of the file to save into env - the environment get the option values from
Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file.
[ "Saves", "all", "the", "options", "in", "the", "given", "file", ".", "This", "file", "can", "then", "be", "used", "to", "load", "the", "options", "next", "run", ".", "This", "can", "be", "used", "to", "create", "an", "option", "cache", "file", "." ]
def Save(self, filename, env): """ Saves all the options in the given file. This file can then be used to load the options next run. This can be used to create an option cache file. filename - Name of the file to save into env - the environment get the option values from """ # Create the file and write out the header try: fh = open(filename, 'w') try: # Make an assignment in the file for each option # within the environment that was assigned a value # other than the default. for option in self.options: try: value = env[option.key] try: prepare = value.prepare_to_store except AttributeError: try: eval(repr(value)) except KeyboardInterrupt: raise except: # Convert stuff that has a repr() that # cannot be evaluated into a string value = SCons.Util.to_String(value) else: value = prepare() defaultVal = env.subst(SCons.Util.to_String(option.default)) if option.converter: defaultVal = option.converter(defaultVal) if str(env.subst('${%s}' % option.key)) != str(defaultVal): fh.write('%s = %s\n' % (option.key, repr(value))) except KeyError: pass finally: fh.close() except IOError as x: raise SCons.Errors.UserError('Error writing options to file: %s\n%s' % (filename, x))
[ "def", "Save", "(", "self", ",", "filename", ",", "env", ")", ":", "# Create the file and write out the header", "try", ":", "fh", "=", "open", "(", "filename", ",", "'w'", ")", "try", ":", "# Make an assignment in the file for each option", "# within the environment that was assigned a value", "# other than the default.", "for", "option", "in", "self", ".", "options", ":", "try", ":", "value", "=", "env", "[", "option", ".", "key", "]", "try", ":", "prepare", "=", "value", ".", "prepare_to_store", "except", "AttributeError", ":", "try", ":", "eval", "(", "repr", "(", "value", ")", ")", "except", "KeyboardInterrupt", ":", "raise", "except", ":", "# Convert stuff that has a repr() that", "# cannot be evaluated into a string", "value", "=", "SCons", ".", "Util", ".", "to_String", "(", "value", ")", "else", ":", "value", "=", "prepare", "(", ")", "defaultVal", "=", "env", ".", "subst", "(", "SCons", ".", "Util", ".", "to_String", "(", "option", ".", "default", ")", ")", "if", "option", ".", "converter", ":", "defaultVal", "=", "option", ".", "converter", "(", "defaultVal", ")", "if", "str", "(", "env", ".", "subst", "(", "'${%s}'", "%", "option", ".", "key", ")", ")", "!=", "str", "(", "defaultVal", ")", ":", "fh", ".", "write", "(", "'%s = %s\\n'", "%", "(", "option", ".", "key", ",", "repr", "(", "value", ")", ")", ")", "except", "KeyError", ":", "pass", "finally", ":", "fh", ".", "close", "(", ")", "except", "IOError", "as", "x", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "'Error writing options to file: %s\\n%s'", "%", "(", "filename", ",", "x", ")", ")" ]
https://github.com/mapnik/mapnik/blob/f3da900c355e1d15059c4a91b00203dcc9d9f0ef/scons/scons-local-4.1.0/SCons/Variables/__init__.py#L234-L281
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pytz/__init__.py
python
UTC.localize
(self, dt, is_dst=False)
return dt.replace(tzinfo=self)
Convert naive time to local time
Convert naive time to local time
[ "Convert", "naive", "time", "to", "local", "time" ]
def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self)
[ "def", "localize", "(", "self", ",", "dt", ",", "is_dst", "=", "False", ")", ":", "if", "dt", ".", "tzinfo", "is", "not", "None", ":", "raise", "ValueError", "(", "'Not naive datetime (tzinfo is already set)'", ")", "return", "dt", ".", "replace", "(", "tzinfo", "=", "self", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/pytz/__init__.py#L235-L239
mindspore-ai/mindspore
fb8fd3338605bb34fa5cea054e535a8b1d753fab
mindspore/python/mindspore/ops/composite/multitype_ops/setitem_impl.py
python
_tensor_setitem_by_tensor_with_tuple
(data, index, value)
return compile_utils.tensor_setitem_by_tensor_with_sequence(data, index, value)
Tensor assignment. Inputs: data (Tensor): Assigned tensor. index (Tensor): Tensor of bool type. value (Tuple): Assignment value. Outputs: Tensor, element type and shape is same as data.
Tensor assignment.
[ "Tensor", "assignment", "." ]
def _tensor_setitem_by_tensor_with_tuple(data, index, value): """ Tensor assignment. Inputs: data (Tensor): Assigned tensor. index (Tensor): Tensor of bool type. value (Tuple): Assignment value. Outputs: Tensor, element type and shape is same as data. """ return compile_utils.tensor_setitem_by_tensor_with_sequence(data, index, value)
[ "def", "_tensor_setitem_by_tensor_with_tuple", "(", "data", ",", "index", ",", "value", ")", ":", "return", "compile_utils", ".", "tensor_setitem_by_tensor_with_sequence", "(", "data", ",", "index", ",", "value", ")" ]
https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/ops/composite/multitype_ops/setitem_impl.py#L286-L298
Polidea/SiriusObfuscator
b0e590d8130e97856afe578869b83a209e2b19be
SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py
python
SBExpressionOptions.SetTimeoutInMicroSeconds
(self, timeout = 0)
return _lldb.SBExpressionOptions_SetTimeoutInMicroSeconds(self, timeout)
SetTimeoutInMicroSeconds(self, uint32_t timeout = 0) SetTimeoutInMicroSeconds(self) Sets the timeout in microseconds to run the expression for. If try all threads is set to true and the expression doesn't complete within the specified timeout, all threads will be resumed for the same timeout to see if the expresson will finish.
SetTimeoutInMicroSeconds(self, uint32_t timeout = 0) SetTimeoutInMicroSeconds(self)
[ "SetTimeoutInMicroSeconds", "(", "self", "uint32_t", "timeout", "=", "0", ")", "SetTimeoutInMicroSeconds", "(", "self", ")" ]
def SetTimeoutInMicroSeconds(self, timeout = 0): """ SetTimeoutInMicroSeconds(self, uint32_t timeout = 0) SetTimeoutInMicroSeconds(self) Sets the timeout in microseconds to run the expression for. If try all threads is set to true and the expression doesn't complete within the specified timeout, all threads will be resumed for the same timeout to see if the expresson will finish. """ return _lldb.SBExpressionOptions_SetTimeoutInMicroSeconds(self, timeout)
[ "def", "SetTimeoutInMicroSeconds", "(", "self", ",", "timeout", "=", "0", ")", ":", "return", "_lldb", ".", "SBExpressionOptions_SetTimeoutInMicroSeconds", "(", "self", ",", "timeout", ")" ]
https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L4119-L4126
kungfu-origin/kungfu
90c84b2b590855654cb9a6395ed050e0f7763512
core/deps/SQLiteCpp-2.3.0/cpplint.py
python
CheckForMultilineCommentsAndStrings
(filename, clean_lines, linenum, error)
Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
Logs an error if we see /* ... */ or "..." that extend past one line.
[ "Logs", "an", "error", "if", "we", "see", "/", "*", "...", "*", "/", "or", "...", "that", "extend", "past", "one", "line", "." ]
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.')
[ "def", "CheckForMultilineCommentsAndStrings", "(", "filename", ",", "clean_lines", ",", "linenum", ",", "error", ")", ":", "line", "=", "clean_lines", ".", "elided", "[", "linenum", "]", "# Remove all \\\\ (escaped backslashes) from the line. They are OK, and the", "# second (escaped) slash may trigger later \\\" detection erroneously.", "line", "=", "line", ".", "replace", "(", "'\\\\\\\\'", ",", "''", ")", "if", "line", ".", "count", "(", "'/*'", ")", ">", "line", ".", "count", "(", "'*/'", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/multiline_comment'", ",", "5", ",", "'Complex multi-line /*...*/-style comment found. '", "'Lint may give bogus warnings. '", "'Consider replacing these with //-style comments, '", "'with #if 0...#endif, '", "'or with more clearly structured multi-line comments.'", ")", "if", "(", "line", ".", "count", "(", "'\"'", ")", "-", "line", ".", "count", "(", "'\\\\\"'", ")", ")", "%", "2", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/multiline_string'", ",", "5", ",", "'Multi-line string (\"...\") found. This lint script doesn\\'t '", "'do well with such strings, and may give bogus warnings. '", "'Use C++11 raw strings or concatenation instead.'", ")" ]
https://github.com/kungfu-origin/kungfu/blob/90c84b2b590855654cb9a6395ed050e0f7763512/core/deps/SQLiteCpp-2.3.0/cpplint.py#L1555-L1590
CRYTEK/CRYENGINE
232227c59a220cbbd311576f0fbeba7bb53b2a8c
Editor/Python/windows/Lib/site-packages/setuptools/package_index.py
python
distros_for_location
(location, basename, metadata=None)
return []
Yield egg or source distribution objects based on basename
Yield egg or source distribution objects based on basename
[ "Yield", "egg", "or", "source", "distribution", "objects", "based", "on", "basename" ]
def distros_for_location(location, basename, metadata=None): """Yield egg or source distribution objects based on basename""" if basename.endswith('.egg.zip'): basename = basename[:-4] # strip the .zip if basename.endswith('.egg') and '-' in basename: # only one, unambiguous interpretation return [Distribution.from_location(location, basename, metadata)] if basename.endswith('.whl') and '-' in basename: wheel = Wheel(basename) if not wheel.is_compatible(): return [] return [Distribution( location=location, project_name=wheel.project_name, version=wheel.version, # Increase priority over eggs. precedence=EGG_DIST + 1, )] if basename.endswith('.exe'): win_base, py_ver, platform = parse_bdist_wininst(basename) if win_base is not None: return interpret_distro_name( location, win_base, metadata, py_ver, BINARY_DIST, platform ) # Try source distro extensions (.zip, .tgz, etc.) # for ext in EXTENSIONS: if basename.endswith(ext): basename = basename[:-len(ext)] return interpret_distro_name(location, basename, metadata) return []
[ "def", "distros_for_location", "(", "location", ",", "basename", ",", "metadata", "=", "None", ")", ":", "if", "basename", ".", "endswith", "(", "'.egg.zip'", ")", ":", "basename", "=", "basename", "[", ":", "-", "4", "]", "# strip the .zip", "if", "basename", ".", "endswith", "(", "'.egg'", ")", "and", "'-'", "in", "basename", ":", "# only one, unambiguous interpretation", "return", "[", "Distribution", ".", "from_location", "(", "location", ",", "basename", ",", "metadata", ")", "]", "if", "basename", ".", "endswith", "(", "'.whl'", ")", "and", "'-'", "in", "basename", ":", "wheel", "=", "Wheel", "(", "basename", ")", "if", "not", "wheel", ".", "is_compatible", "(", ")", ":", "return", "[", "]", "return", "[", "Distribution", "(", "location", "=", "location", ",", "project_name", "=", "wheel", ".", "project_name", ",", "version", "=", "wheel", ".", "version", ",", "# Increase priority over eggs.", "precedence", "=", "EGG_DIST", "+", "1", ",", ")", "]", "if", "basename", ".", "endswith", "(", "'.exe'", ")", ":", "win_base", ",", "py_ver", ",", "platform", "=", "parse_bdist_wininst", "(", "basename", ")", "if", "win_base", "is", "not", "None", ":", "return", "interpret_distro_name", "(", "location", ",", "win_base", ",", "metadata", ",", "py_ver", ",", "BINARY_DIST", ",", "platform", ")", "# Try source distro extensions (.zip, .tgz, etc.)", "#", "for", "ext", "in", "EXTENSIONS", ":", "if", "basename", ".", "endswith", "(", "ext", ")", ":", "basename", "=", "basename", "[", ":", "-", "len", "(", "ext", ")", "]", "return", "interpret_distro_name", "(", "location", ",", "basename", ",", "metadata", ")", "return", "[", "]" ]
https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Editor/Python/windows/Lib/site-packages/setuptools/package_index.py#L110-L140
PaddlePaddle/Paddle
1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c
python/paddle/distribution/dirichlet.py
python
Dirichlet.entropy
(self)
return (paddle.lgamma(self.concentration).sum(-1) - paddle.lgamma(concentration0) - (k - concentration0) * paddle.digamma(concentration0) - ( (self.concentration - 1.0 ) * paddle.digamma(self.concentration)).sum(-1))
Entropy of Dirichlet distribution. Returns: Entropy of distribution.
Entropy of Dirichlet distribution.
[ "Entropy", "of", "Dirichlet", "distribution", "." ]
def entropy(self): """Entropy of Dirichlet distribution. Returns: Entropy of distribution. """ concentration0 = self.concentration.sum(-1) k = self.concentration.shape[-1] return (paddle.lgamma(self.concentration).sum(-1) - paddle.lgamma(concentration0) - (k - concentration0) * paddle.digamma(concentration0) - ( (self.concentration - 1.0 ) * paddle.digamma(self.concentration)).sum(-1))
[ "def", "entropy", "(", "self", ")", ":", "concentration0", "=", "self", ".", "concentration", ".", "sum", "(", "-", "1", ")", "k", "=", "self", ".", "concentration", ".", "shape", "[", "-", "1", "]", "return", "(", "paddle", ".", "lgamma", "(", "self", ".", "concentration", ")", ".", "sum", "(", "-", "1", ")", "-", "paddle", ".", "lgamma", "(", "concentration0", ")", "-", "(", "k", "-", "concentration0", ")", "*", "paddle", ".", "digamma", "(", "concentration0", ")", "-", "(", "(", "self", ".", "concentration", "-", "1.0", ")", "*", "paddle", ".", "digamma", "(", "self", ".", "concentration", ")", ")", ".", "sum", "(", "-", "1", ")", ")" ]
https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distribution/dirichlet.py#L133-L145
google/shaka-packager
e1b0c7c45431327fd3ce193514a5407d07b39b22
packager/third_party/protobuf/python/google/protobuf/internal/containers.py
python
ScalarMap.__init__
(self, message_listener, key_checker, value_checker, entry_descriptor)
Args: message_listener: A MessageListener implementation. The ScalarMap will call this object's Modified() method when it is modified. key_checker: A type_checkers.ValueChecker instance to run on keys inserted into this container. value_checker: A type_checkers.ValueChecker instance to run on values inserted into this container. entry_descriptor: The MessageDescriptor of a map entry: key and value.
Args: message_listener: A MessageListener implementation. The ScalarMap will call this object's Modified() method when it is modified. key_checker: A type_checkers.ValueChecker instance to run on keys inserted into this container. value_checker: A type_checkers.ValueChecker instance to run on values inserted into this container. entry_descriptor: The MessageDescriptor of a map entry: key and value.
[ "Args", ":", "message_listener", ":", "A", "MessageListener", "implementation", ".", "The", "ScalarMap", "will", "call", "this", "object", "s", "Modified", "()", "method", "when", "it", "is", "modified", ".", "key_checker", ":", "A", "type_checkers", ".", "ValueChecker", "instance", "to", "run", "on", "keys", "inserted", "into", "this", "container", ".", "value_checker", ":", "A", "type_checkers", ".", "ValueChecker", "instance", "to", "run", "on", "values", "inserted", "into", "this", "container", ".", "entry_descriptor", ":", "The", "MessageDescriptor", "of", "a", "map", "entry", ":", "key", "and", "value", "." ]
def __init__(self, message_listener, key_checker, value_checker, entry_descriptor): """ Args: message_listener: A MessageListener implementation. The ScalarMap will call this object's Modified() method when it is modified. key_checker: A type_checkers.ValueChecker instance to run on keys inserted into this container. value_checker: A type_checkers.ValueChecker instance to run on values inserted into this container. entry_descriptor: The MessageDescriptor of a map entry: key and value. """ self._message_listener = message_listener self._key_checker = key_checker self._value_checker = value_checker self._entry_descriptor = entry_descriptor self._values = {}
[ "def", "__init__", "(", "self", ",", "message_listener", ",", "key_checker", ",", "value_checker", ",", "entry_descriptor", ")", ":", "self", ".", "_message_listener", "=", "message_listener", "self", ".", "_key_checker", "=", "key_checker", "self", ".", "_value_checker", "=", "value_checker", "self", ".", "_entry_descriptor", "=", "entry_descriptor", "self", ".", "_values", "=", "{", "}" ]
https://github.com/google/shaka-packager/blob/e1b0c7c45431327fd3ce193514a5407d07b39b22/packager/third_party/protobuf/python/google/protobuf/internal/containers.py#L442-L459
Jittor/jittor
e9aca0444c2bdc8e2389d99122954cd0903eec46
python/jittor/linalg.py
python
eigh
(x)
return w, v
r""" calculate the eigenvalues and eigenvectors of x. :param x (...,M,M): :return:w, v. w (...,M) : the eigenvalues. v (...,M,M) : normalized eigenvectors.
r""" calculate the eigenvalues and eigenvectors of x. :param x (...,M,M): :return:w, v. w (...,M) : the eigenvalues. v (...,M,M) : normalized eigenvectors.
[ "r", "calculate", "the", "eigenvalues", "and", "eigenvectors", "of", "x", ".", ":", "param", "x", "(", "...", "M", "M", ")", ":", ":", "return", ":", "w", "v", ".", "w", "(", "...", "M", ")", ":", "the", "eigenvalues", ".", "v", "(", "...", "M", "M", ")", ":", "normalized", "eigenvectors", "." ]
def eigh(x): r""" calculate the eigenvalues and eigenvectors of x. :param x (...,M,M): :return:w, v. w (...,M) : the eigenvalues. v (...,M,M) : normalized eigenvectors. """ def forward_code(np, data): a = data["inputs"][0] w, v = data["outputs"] tw, tv = np.linalg.eigh(a, UPLO='L') np.copyto(w, tw) np.copyto(v, tv) def backward_code(np, data): def T(x): return np.swapaxes(x, -1, -2) _dot = partial(np.einsum, '...ij,...jk->...ik') dout = data["dout"] out = data["outputs"][0] inp = data["inputs"][0] out_index = data["out_index"] w, v = data["f_outputs"] k = int(inp.shape[-1]) w_repeated = np.repeat(w[..., np.newaxis], k, axis=-1) if out_index == 0: t = _dot(v * dout[..., np.newaxis, :], T(v)) np.copyto(out, t) elif out_index == 1: if np.any(dout): off_diag = np.ones((k, k)) - np.eye(k) F = off_diag / (T(w_repeated) - w_repeated + np.eye(k)) t = _dot(_dot(v, F * _dot(T(v), dout)), T(v)) np.copyto(out, t) sw = x.shape[:-2] + x.shape[-1:] sv = x.shape w, v = jt.numpy_code( [sw, sv], [x.dtype, x.dtype], [x], forward_code, [backward_code], ) return w, v
[ "def", "eigh", "(", "x", ")", ":", "def", "forward_code", "(", "np", ",", "data", ")", ":", "a", "=", "data", "[", "\"inputs\"", "]", "[", "0", "]", "w", ",", "v", "=", "data", "[", "\"outputs\"", "]", "tw", ",", "tv", "=", "np", ".", "linalg", ".", "eigh", "(", "a", ",", "UPLO", "=", "'L'", ")", "np", ".", "copyto", "(", "w", ",", "tw", ")", "np", ".", "copyto", "(", "v", ",", "tv", ")", "def", "backward_code", "(", "np", ",", "data", ")", ":", "def", "T", "(", "x", ")", ":", "return", "np", ".", "swapaxes", "(", "x", ",", "-", "1", ",", "-", "2", ")", "_dot", "=", "partial", "(", "np", ".", "einsum", ",", "'...ij,...jk->...ik'", ")", "dout", "=", "data", "[", "\"dout\"", "]", "out", "=", "data", "[", "\"outputs\"", "]", "[", "0", "]", "inp", "=", "data", "[", "\"inputs\"", "]", "[", "0", "]", "out_index", "=", "data", "[", "\"out_index\"", "]", "w", ",", "v", "=", "data", "[", "\"f_outputs\"", "]", "k", "=", "int", "(", "inp", ".", "shape", "[", "-", "1", "]", ")", "w_repeated", "=", "np", ".", "repeat", "(", "w", "[", "...", ",", "np", ".", "newaxis", "]", ",", "k", ",", "axis", "=", "-", "1", ")", "if", "out_index", "==", "0", ":", "t", "=", "_dot", "(", "v", "*", "dout", "[", "...", ",", "np", ".", "newaxis", ",", ":", "]", ",", "T", "(", "v", ")", ")", "np", ".", "copyto", "(", "out", ",", "t", ")", "elif", "out_index", "==", "1", ":", "if", "np", ".", "any", "(", "dout", ")", ":", "off_diag", "=", "np", ".", "ones", "(", "(", "k", ",", "k", ")", ")", "-", "np", ".", "eye", "(", "k", ")", "F", "=", "off_diag", "/", "(", "T", "(", "w_repeated", ")", "-", "w_repeated", "+", "np", ".", "eye", "(", "k", ")", ")", "t", "=", "_dot", "(", "_dot", "(", "v", ",", "F", "*", "_dot", "(", "T", "(", "v", ")", ",", "dout", ")", ")", ",", "T", "(", "v", ")", ")", "np", ".", "copyto", "(", "out", ",", "t", ")", "sw", "=", "x", ".", "shape", "[", ":", "-", "2", "]", "+", "x", ".", "shape", "[", "-", "1", ":", "]", "sv", "=", "x", ".", "shape", "w", ",", "v", "=", "jt", ".", "numpy_code", "(", "[", "sw", ",", "sv", "]", ",", "[", "x", ".", "dtype", ",", "x", ".", "dtype", "]", ",", "[", "x", "]", ",", "forward_code", ",", "[", "backward_code", "]", ",", ")", "return", "w", ",", "v" ]
https://github.com/Jittor/jittor/blob/e9aca0444c2bdc8e2389d99122954cd0903eec46/python/jittor/linalg.py#L97-L142
borglab/gtsam
a5bee157efce6a0563704bce6a5d188c29817f39
python/gtsam/examples/VisualISAMExample.py
python
main
()
A structure-from-motion example with landmarks - The landmarks form a 10 meter cube - The robot rotates around the landmarks, always facing towards the cube
A structure-from-motion example with landmarks - The landmarks form a 10 meter cube - The robot rotates around the landmarks, always facing towards the cube
[ "A", "structure", "-", "from", "-", "motion", "example", "with", "landmarks", "-", "The", "landmarks", "form", "a", "10", "meter", "cube", "-", "The", "robot", "rotates", "around", "the", "landmarks", "always", "facing", "towards", "the", "cube" ]
def main(): """ A structure-from-motion example with landmarks - The landmarks form a 10 meter cube - The robot rotates around the landmarks, always facing towards the cube """ # Define the camera calibration parameters K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model camera_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() # Create the set of ground-truth poses poses = SFMdata.createPoses(K) # Create a NonlinearISAM object which will relinearize and reorder the variables # every "reorderInterval" updates isam = NonlinearISAM(reorderInterval=3) # Create a Factor Graph and Values to hold the new data graph = NonlinearFactorGraph() initial_estimate = Values() # Loop over the different poses, adding the observations to iSAM incrementally for i, pose in enumerate(poses): camera = PinholeCameraCal3_S2(pose, K) # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) factor = GenericProjectionFactorCal3_S2( measurement, camera_noise, X(i), L(j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) initial_xi = pose.compose(noise) # Add an initial guess for the current pose initial_estimate.insert(X(i), initial_xi) # If this is the first iteration, add a prior on the first pose to set the coordinate frame # and a prior on the first landmark to set the scale # Also, as iSAM solves incrementally, we must wait until each is observed at least twice before # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z pose_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) # Add initial guesses to all observed landmarks noise = np.array([-0.25, 0.20, 0.15]) for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth initial_lj = points[j] + noise initial_estimate.insert(L(j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) current_estimate = isam.estimate() print('*' * 50) print('Frame {}:'.format(i)) current_estimate.print('Current estimate: ') # Clear the factor graph and values for the next iteration graph.resize(0) initial_estimate.clear()
[ "def", "main", "(", ")", ":", "# Define the camera calibration parameters", "K", "=", "Cal3_S2", "(", "50.0", ",", "50.0", ",", "0.0", ",", "50.0", ",", "50.0", ")", "# Define the camera observation noise model", "camera_noise", "=", "gtsam", ".", "noiseModel", ".", "Isotropic", ".", "Sigma", "(", "2", ",", "1.0", ")", "# one pixel in u and v", "# Create the set of ground-truth landmarks", "points", "=", "SFMdata", ".", "createPoints", "(", ")", "# Create the set of ground-truth poses", "poses", "=", "SFMdata", ".", "createPoses", "(", "K", ")", "# Create a NonlinearISAM object which will relinearize and reorder the variables", "# every \"reorderInterval\" updates", "isam", "=", "NonlinearISAM", "(", "reorderInterval", "=", "3", ")", "# Create a Factor Graph and Values to hold the new data", "graph", "=", "NonlinearFactorGraph", "(", ")", "initial_estimate", "=", "Values", "(", ")", "# Loop over the different poses, adding the observations to iSAM incrementally", "for", "i", ",", "pose", "in", "enumerate", "(", "poses", ")", ":", "camera", "=", "PinholeCameraCal3_S2", "(", "pose", ",", "K", ")", "# Add factors for each landmark observation", "for", "j", ",", "point", "in", "enumerate", "(", "points", ")", ":", "measurement", "=", "camera", ".", "project", "(", "point", ")", "factor", "=", "GenericProjectionFactorCal3_S2", "(", "measurement", ",", "camera_noise", ",", "X", "(", "i", ")", ",", "L", "(", "j", ")", ",", "K", ")", "graph", ".", "push_back", "(", "factor", ")", "# Intentionally initialize the variables off from the ground truth", "noise", "=", "Pose3", "(", "r", "=", "Rot3", ".", "Rodrigues", "(", "-", "0.1", ",", "0.2", ",", "0.25", ")", ",", "t", "=", "Point3", "(", "0.05", ",", "-", "0.10", ",", "0.20", ")", ")", "initial_xi", "=", "pose", ".", "compose", "(", "noise", ")", "# Add an initial guess for the current pose", "initial_estimate", ".", "insert", "(", "X", "(", "i", ")", ",", "initial_xi", ")", "# If this is the first iteration, add a prior on the first pose to set the coordinate frame", "# and a prior on the first landmark to set the scale", "# Also, as iSAM solves incrementally, we must wait until each is observed at least twice before", "# adding it to iSAM.", "if", "i", "==", "0", ":", "# Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z", "pose_noise", "=", "gtsam", ".", "noiseModel", ".", "Diagonal", ".", "Sigmas", "(", "np", ".", "array", "(", "[", "0.3", ",", "0.3", ",", "0.3", ",", "0.1", ",", "0.1", ",", "0.1", "]", ")", ")", "factor", "=", "PriorFactorPose3", "(", "X", "(", "0", ")", ",", "poses", "[", "0", "]", ",", "pose_noise", ")", "graph", ".", "push_back", "(", "factor", ")", "# Add a prior on landmark l0", "point_noise", "=", "gtsam", ".", "noiseModel", ".", "Isotropic", ".", "Sigma", "(", "3", ",", "0.1", ")", "factor", "=", "PriorFactorPoint3", "(", "L", "(", "0", ")", ",", "points", "[", "0", "]", ",", "point_noise", ")", "graph", ".", "push_back", "(", "factor", ")", "# Add initial guesses to all observed landmarks", "noise", "=", "np", ".", "array", "(", "[", "-", "0.25", ",", "0.20", ",", "0.15", "]", ")", "for", "j", ",", "point", "in", "enumerate", "(", "points", ")", ":", "# Intentionally initialize the variables off from the ground truth", "initial_lj", "=", "points", "[", "j", "]", "+", "noise", "initial_estimate", ".", "insert", "(", "L", "(", "j", ")", ",", "initial_lj", ")", "else", ":", "# Update iSAM with the new factors", "isam", ".", "update", "(", "graph", ",", "initial_estimate", ")", "current_estimate", "=", "isam", ".", "estimate", "(", ")", "print", "(", "'*'", "*", "50", ")", "print", "(", "'Frame {}:'", ".", "format", "(", "i", ")", ")", "current_estimate", ".", "print", "(", "'Current estimate: '", ")", "# Clear the factor graph and values for the next iteration", "graph", ".", "resize", "(", "0", ")", "initial_estimate", ".", "clear", "(", ")" ]
https://github.com/borglab/gtsam/blob/a5bee157efce6a0563704bce6a5d188c29817f39/python/gtsam/examples/VisualISAMExample.py#L22-L99
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py
python
ResourceManager.set_extraction_path
(self, path)
Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.)
Set the base path where resources will be extracted to, if needed.
[ "Set", "the", "base", "path", "where", "resources", "will", "be", "extracted", "to", "if", "needed", "." ]
def set_extraction_path(self, path): """Set the base path where resources will be extracted to, if needed. If you do not call this routine before any extractions take place, the path defaults to the return value of ``get_default_cache()``. (Which is based on the ``PYTHON_EGG_CACHE`` environment variable, with various platform-specific fallbacks. See that routine's documentation for more details.) Resources are extracted to subdirectories of this path based upon information given by the ``IResourceProvider``. You may set this to a temporary directory, but then you must call ``cleanup_resources()`` to delete the extracted files when done. There is no guarantee that ``cleanup_resources()`` will be able to remove all extracted files. (Note: you may not change the extraction path for a given resource manager once resources have been extracted, unless you first call ``cleanup_resources()``.) """ if self.cached_files: raise ValueError( "Can't change extraction path, files already extracted" ) self.extraction_path = path
[ "def", "set_extraction_path", "(", "self", ",", "path", ")", ":", "if", "self", ".", "cached_files", ":", "raise", "ValueError", "(", "\"Can't change extraction path, files already extracted\"", ")", "self", ".", "extraction_path", "=", "path" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemFramework/v1/AWS/resource-manager-code/lib/pkg_resources/__init__.py#L1267-L1291
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pdb.py
python
Pdb.do_list
(self, arg)
l(ist) [first [,last] | .] List source code for the current file. Without arguments, list 11 lines around the current line or continue the previous listing. With . as argument, list 11 lines around the current line. With one argument, list 11 lines starting at that line. With two arguments, list the given range; if the second argument is less than the first, it is a count. The current line in the current frame is indicated by "->". If an exception is being debugged, the line where the exception was originally raised or propagated is indicated by ">>", if it differs from the current line.
l(ist) [first [,last] | .]
[ "l", "(", "ist", ")", "[", "first", "[", "last", "]", "|", ".", "]" ]
def do_list(self, arg): """l(ist) [first [,last] | .] List source code for the current file. Without arguments, list 11 lines around the current line or continue the previous listing. With . as argument, list 11 lines around the current line. With one argument, list 11 lines starting at that line. With two arguments, list the given range; if the second argument is less than the first, it is a count. The current line in the current frame is indicated by "->". If an exception is being debugged, the line where the exception was originally raised or propagated is indicated by ">>", if it differs from the current line. """ self.lastcmd = 'list' last = None if arg and arg != '.': try: if ',' in arg: first, last = arg.split(',') first = int(first.strip()) last = int(last.strip()) if last < first: # assume it's a count last = first + last else: first = int(arg.strip()) first = max(1, first - 5) except ValueError: self.error('Error in argument: %r' % arg) return elif self.lineno is None or arg == '.': first = max(1, self.curframe.f_lineno - 5) else: first = self.lineno + 1 if last is None: last = first + 10 filename = self.curframe.f_code.co_filename breaklist = self.get_file_breaks(filename) try: lines = linecache.getlines(filename, self.curframe.f_globals) self._print_lines(lines[first-1:last], first, breaklist, self.curframe) self.lineno = min(last, len(lines)) if len(lines) < last: self.message('[EOF]') except KeyboardInterrupt: pass
[ "def", "do_list", "(", "self", ",", "arg", ")", ":", "self", ".", "lastcmd", "=", "'list'", "last", "=", "None", "if", "arg", "and", "arg", "!=", "'.'", ":", "try", ":", "if", "','", "in", "arg", ":", "first", ",", "last", "=", "arg", ".", "split", "(", "','", ")", "first", "=", "int", "(", "first", ".", "strip", "(", ")", ")", "last", "=", "int", "(", "last", ".", "strip", "(", ")", ")", "if", "last", "<", "first", ":", "# assume it's a count", "last", "=", "first", "+", "last", "else", ":", "first", "=", "int", "(", "arg", ".", "strip", "(", ")", ")", "first", "=", "max", "(", "1", ",", "first", "-", "5", ")", "except", "ValueError", ":", "self", ".", "error", "(", "'Error in argument: %r'", "%", "arg", ")", "return", "elif", "self", ".", "lineno", "is", "None", "or", "arg", "==", "'.'", ":", "first", "=", "max", "(", "1", ",", "self", ".", "curframe", ".", "f_lineno", "-", "5", ")", "else", ":", "first", "=", "self", ".", "lineno", "+", "1", "if", "last", "is", "None", ":", "last", "=", "first", "+", "10", "filename", "=", "self", ".", "curframe", ".", "f_code", ".", "co_filename", "breaklist", "=", "self", ".", "get_file_breaks", "(", "filename", ")", "try", ":", "lines", "=", "linecache", ".", "getlines", "(", "filename", ",", "self", ".", "curframe", ".", "f_globals", ")", "self", ".", "_print_lines", "(", "lines", "[", "first", "-", "1", ":", "last", "]", ",", "first", ",", "breaklist", ",", "self", ".", "curframe", ")", "self", ".", "lineno", "=", "min", "(", "last", ",", "len", "(", "lines", ")", ")", "if", "len", "(", "lines", ")", "<", "last", ":", "self", ".", "message", "(", "'[EOF]'", ")", "except", "KeyboardInterrupt", ":", "pass" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/pdb.py#L1196-L1244
gklz1982/caffe-yolov2
ebb27029db4ddc0d40e520634633b0fa9cdcc10d
python/caffe/draw.py
python
get_pydot_graph
(caffe_net, rankdir, label_edges=True)
return pydot_graph
Create a data structure which represents the `caffe_net`. Parameters ---------- caffe_net : object rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. label_edges : boolean, optional Label the edges (default is True). Returns ------- pydot graph object
Create a data structure which represents the `caffe_net`.
[ "Create", "a", "data", "structure", "which", "represents", "the", "caffe_net", "." ]
def get_pydot_graph(caffe_net, rankdir, label_edges=True): """Create a data structure which represents the `caffe_net`. Parameters ---------- caffe_net : object rankdir : {'LR', 'TB', 'BT'} Direction of graph layout. label_edges : boolean, optional Label the edges (default is True). Returns ------- pydot graph object """ pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net', graph_type='digraph', rankdir=rankdir) pydot_nodes = {} pydot_edges = [] for layer in caffe_net.layer: node_label = get_layer_label(layer, rankdir) node_name = "%s_%s" % (layer.name, layer.type) if (len(layer.bottom) == 1 and len(layer.top) == 1 and layer.bottom[0] == layer.top[0]): # We have an in-place neuron layer. pydot_nodes[node_name] = pydot.Node(node_label, **NEURON_LAYER_STYLE) else: layer_style = LAYER_STYLE_DEFAULT layer_style['fillcolor'] = choose_color_by_layertype(layer.type) pydot_nodes[node_name] = pydot.Node(node_label, **layer_style) for bottom_blob in layer.bottom: pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob, **BLOB_STYLE) edge_label = '""' pydot_edges.append({'src': bottom_blob + '_blob', 'dst': node_name, 'label': edge_label}) for top_blob in layer.top: pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob)) if label_edges: edge_label = get_edge_label(layer) else: edge_label = '""' pydot_edges.append({'src': node_name, 'dst': top_blob + '_blob', 'label': edge_label}) # Now, add the nodes and edges to the graph. for node in pydot_nodes.values(): pydot_graph.add_node(node) for edge in pydot_edges: pydot_graph.add_edge( pydot.Edge(pydot_nodes[edge['src']], pydot_nodes[edge['dst']], label=edge['label'])) return pydot_graph
[ "def", "get_pydot_graph", "(", "caffe_net", ",", "rankdir", ",", "label_edges", "=", "True", ")", ":", "pydot_graph", "=", "pydot", ".", "Dot", "(", "caffe_net", ".", "name", "if", "caffe_net", ".", "name", "else", "'Net'", ",", "graph_type", "=", "'digraph'", ",", "rankdir", "=", "rankdir", ")", "pydot_nodes", "=", "{", "}", "pydot_edges", "=", "[", "]", "for", "layer", "in", "caffe_net", ".", "layer", ":", "node_label", "=", "get_layer_label", "(", "layer", ",", "rankdir", ")", "node_name", "=", "\"%s_%s\"", "%", "(", "layer", ".", "name", ",", "layer", ".", "type", ")", "if", "(", "len", "(", "layer", ".", "bottom", ")", "==", "1", "and", "len", "(", "layer", ".", "top", ")", "==", "1", "and", "layer", ".", "bottom", "[", "0", "]", "==", "layer", ".", "top", "[", "0", "]", ")", ":", "# We have an in-place neuron layer.", "pydot_nodes", "[", "node_name", "]", "=", "pydot", ".", "Node", "(", "node_label", ",", "*", "*", "NEURON_LAYER_STYLE", ")", "else", ":", "layer_style", "=", "LAYER_STYLE_DEFAULT", "layer_style", "[", "'fillcolor'", "]", "=", "choose_color_by_layertype", "(", "layer", ".", "type", ")", "pydot_nodes", "[", "node_name", "]", "=", "pydot", ".", "Node", "(", "node_label", ",", "*", "*", "layer_style", ")", "for", "bottom_blob", "in", "layer", ".", "bottom", ":", "pydot_nodes", "[", "bottom_blob", "+", "'_blob'", "]", "=", "pydot", ".", "Node", "(", "'%s'", "%", "bottom_blob", ",", "*", "*", "BLOB_STYLE", ")", "edge_label", "=", "'\"\"'", "pydot_edges", ".", "append", "(", "{", "'src'", ":", "bottom_blob", "+", "'_blob'", ",", "'dst'", ":", "node_name", ",", "'label'", ":", "edge_label", "}", ")", "for", "top_blob", "in", "layer", ".", "top", ":", "pydot_nodes", "[", "top_blob", "+", "'_blob'", "]", "=", "pydot", ".", "Node", "(", "'%s'", "%", "(", "top_blob", ")", ")", "if", "label_edges", ":", "edge_label", "=", "get_edge_label", "(", "layer", ")", "else", ":", "edge_label", "=", "'\"\"'", "pydot_edges", ".", "append", "(", "{", "'src'", ":", "node_name", ",", "'dst'", ":", "top_blob", "+", "'_blob'", ",", "'label'", ":", "edge_label", "}", ")", "# Now, add the nodes and edges to the graph.", "for", "node", "in", "pydot_nodes", ".", "values", "(", ")", ":", "pydot_graph", ".", "add_node", "(", "node", ")", "for", "edge", "in", "pydot_edges", ":", "pydot_graph", ".", "add_edge", "(", "pydot", ".", "Edge", "(", "pydot_nodes", "[", "edge", "[", "'src'", "]", "]", ",", "pydot_nodes", "[", "edge", "[", "'dst'", "]", "]", ",", "label", "=", "edge", "[", "'label'", "]", ")", ")", "return", "pydot_graph" ]
https://github.com/gklz1982/caffe-yolov2/blob/ebb27029db4ddc0d40e520634633b0fa9cdcc10d/python/caffe/draw.py#L130-L186
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/pep517/wrappers.py
python
Pep517HookCaller.prepare_metadata_for_build_wheel
( self, metadata_directory, config_settings=None, _allow_fallback=True)
return self._call_hook('prepare_metadata_for_build_wheel', { 'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, '_allow_fallback': _allow_fallback, })
Prepare a ``*.dist-info`` folder with metadata for this project. Returns the name of the newly created folder. If the build backend defines a hook with this name, it will be called in a subprocess. If not, the backend will be asked to build a wheel, and the dist-info extracted from that (unless _allow_fallback is False).
Prepare a ``*.dist-info`` folder with metadata for this project.
[ "Prepare", "a", "*", ".", "dist", "-", "info", "folder", "with", "metadata", "for", "this", "project", "." ]
def prepare_metadata_for_build_wheel( self, metadata_directory, config_settings=None, _allow_fallback=True): """Prepare a ``*.dist-info`` folder with metadata for this project. Returns the name of the newly created folder. If the build backend defines a hook with this name, it will be called in a subprocess. If not, the backend will be asked to build a wheel, and the dist-info extracted from that (unless _allow_fallback is False). """ return self._call_hook('prepare_metadata_for_build_wheel', { 'metadata_directory': abspath(metadata_directory), 'config_settings': config_settings, '_allow_fallback': _allow_fallback, })
[ "def", "prepare_metadata_for_build_wheel", "(", "self", ",", "metadata_directory", ",", "config_settings", "=", "None", ",", "_allow_fallback", "=", "True", ")", ":", "return", "self", ".", "_call_hook", "(", "'prepare_metadata_for_build_wheel'", ",", "{", "'metadata_directory'", ":", "abspath", "(", "metadata_directory", ")", ",", "'config_settings'", ":", "config_settings", ",", "'_allow_fallback'", ":", "_allow_fallback", ",", "}", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/site-packages/pip/_vendor/pep517/wrappers.py#L181-L197
miyosuda/TensorFlowAndroidDemo
35903e0221aa5f109ea2dbef27f20b52e317f42d
jni-build/jni/include/tensorflow/python/framework/errors.py
python
OutOfRangeError.__init__
(self, node_def, op, message)
Creates an `OutOfRangeError`.
Creates an `OutOfRangeError`.
[ "Creates", "an", "OutOfRangeError", "." ]
def __init__(self, node_def, op, message): """Creates an `OutOfRangeError`.""" super(OutOfRangeError, self).__init__(node_def, op, message, OUT_OF_RANGE)
[ "def", "__init__", "(", "self", ",", "node_def", ",", "op", ",", "message", ")", ":", "super", "(", "OutOfRangeError", ",", "self", ")", ".", "__init__", "(", "node_def", ",", "op", ",", "message", ",", "OUT_OF_RANGE", ")" ]
https://github.com/miyosuda/TensorFlowAndroidDemo/blob/35903e0221aa5f109ea2dbef27f20b52e317f42d/jni-build/jni/include/tensorflow/python/framework/errors.py#L345-L348
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/linalg/linear_operator_identity.py
python
BaseLinearOperatorIdentity._check_num_rows_possibly_add_asserts
(self)
Static check of init arg `num_rows`, possibly add asserts.
Static check of init arg `num_rows`, possibly add asserts.
[ "Static", "check", "of", "init", "arg", "num_rows", "possibly", "add", "asserts", "." ]
def _check_num_rows_possibly_add_asserts(self): """Static check of init arg `num_rows`, possibly add asserts.""" # Possibly add asserts. if self._assert_proper_shapes: self._num_rows = control_flow_ops.with_dependencies([ check_ops.assert_rank( self._num_rows, 0, message="Argument num_rows must be a 0-D Tensor."), check_ops.assert_non_negative( self._num_rows, message="Argument num_rows must be non-negative."), ], self._num_rows) # Static checks. if not self._num_rows.dtype.is_integer: raise TypeError("Argument num_rows must be integer type. Found:" " %s" % self._num_rows) num_rows_static = self._num_rows_static if num_rows_static is None: return # Cannot do any other static checks. if num_rows_static.ndim != 0: raise ValueError("Argument num_rows must be a 0-D Tensor. Found:" " %s" % num_rows_static) if num_rows_static < 0: raise ValueError("Argument num_rows must be non-negative. Found:" " %s" % num_rows_static)
[ "def", "_check_num_rows_possibly_add_asserts", "(", "self", ")", ":", "# Possibly add asserts.", "if", "self", ".", "_assert_proper_shapes", ":", "self", ".", "_num_rows", "=", "control_flow_ops", ".", "with_dependencies", "(", "[", "check_ops", ".", "assert_rank", "(", "self", ".", "_num_rows", ",", "0", ",", "message", "=", "\"Argument num_rows must be a 0-D Tensor.\"", ")", ",", "check_ops", ".", "assert_non_negative", "(", "self", ".", "_num_rows", ",", "message", "=", "\"Argument num_rows must be non-negative.\"", ")", ",", "]", ",", "self", ".", "_num_rows", ")", "# Static checks.", "if", "not", "self", ".", "_num_rows", ".", "dtype", ".", "is_integer", ":", "raise", "TypeError", "(", "\"Argument num_rows must be integer type. Found:\"", "\" %s\"", "%", "self", ".", "_num_rows", ")", "num_rows_static", "=", "self", ".", "_num_rows_static", "if", "num_rows_static", "is", "None", ":", "return", "# Cannot do any other static checks.", "if", "num_rows_static", ".", "ndim", "!=", "0", ":", "raise", "ValueError", "(", "\"Argument num_rows must be a 0-D Tensor. Found:\"", "\" %s\"", "%", "num_rows_static", ")", "if", "num_rows_static", "<", "0", ":", "raise", "ValueError", "(", "\"Argument num_rows must be non-negative. Found:\"", "\" %s\"", "%", "num_rows_static", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/python/ops/linalg/linear_operator_identity.py#L45-L75
fastio/1store
64f928df0ddb665c17a9ab3aa79ddd703de374ce
dist/common/scripts/scylla_util.py
python
aws_instance.disks
(self)
return disks
Returns all disks in the system, as visible from the AWS registry
Returns all disks in the system, as visible from the AWS registry
[ "Returns", "all", "disks", "in", "the", "system", "as", "visible", "from", "the", "AWS", "registry" ]
def disks(self): """Returns all disks in the system, as visible from the AWS registry""" disks = set() for v in list(self._disks.values()): disks = disks.union([self.__disk_name(x) for x in v]) return disks
[ "def", "disks", "(", "self", ")", ":", "disks", "=", "set", "(", ")", "for", "v", "in", "list", "(", "self", ".", "_disks", ".", "values", "(", ")", ")", ":", "disks", "=", "disks", ".", "union", "(", "[", "self", ".", "__disk_name", "(", "x", ")", "for", "x", "in", "v", "]", ")", "return", "disks" ]
https://github.com/fastio/1store/blob/64f928df0ddb665c17a9ab3aa79ddd703de374ce/dist/common/scripts/scylla_util.py#L129-L134
macchina-io/macchina.io
ef24ba0e18379c3dd48fb84e6dbf991101cb8db0
platform/JS/V8/v8/tools/isolate_driver.py
python
collect_deps
(target, build_steps, dependencies_added, rules_seen)
Recursively adds all the interesting dependencies for |target| into |dependencies_added|.
Recursively adds all the interesting dependencies for |target| into |dependencies_added|.
[ "Recursively", "adds", "all", "the", "interesting", "dependencies", "for", "|target|", "into", "|dependencies_added|", "." ]
def collect_deps(target, build_steps, dependencies_added, rules_seen): """Recursively adds all the interesting dependencies for |target| into |dependencies_added|. """ if rules_seen is None: rules_seen = set() if target in rules_seen: # TODO(maruel): Figure out how it happens. logging.warning('Circular dependency for %s!', target) return rules_seen.add(target) try: dependencies = raw_build_to_deps(build_steps[target]) except KeyError: logging.info('Failed to find a build step to generate: %s', target) return logging.debug('collect_deps(%s) -> %s', target, dependencies) for dependency in dependencies: dependencies_added.add(dependency) collect_deps(dependency, build_steps, dependencies_added, rules_seen)
[ "def", "collect_deps", "(", "target", ",", "build_steps", ",", "dependencies_added", ",", "rules_seen", ")", ":", "if", "rules_seen", "is", "None", ":", "rules_seen", "=", "set", "(", ")", "if", "target", "in", "rules_seen", ":", "# TODO(maruel): Figure out how it happens.", "logging", ".", "warning", "(", "'Circular dependency for %s!'", ",", "target", ")", "return", "rules_seen", ".", "add", "(", "target", ")", "try", ":", "dependencies", "=", "raw_build_to_deps", "(", "build_steps", "[", "target", "]", ")", "except", "KeyError", ":", "logging", ".", "info", "(", "'Failed to find a build step to generate: %s'", ",", "target", ")", "return", "logging", ".", "debug", "(", "'collect_deps(%s) -> %s'", ",", "target", ",", "dependencies", ")", "for", "dependency", "in", "dependencies", ":", "dependencies_added", ".", "add", "(", "dependency", ")", "collect_deps", "(", "dependency", ",", "build_steps", ",", "dependencies_added", ",", "rules_seen", ")" ]
https://github.com/macchina-io/macchina.io/blob/ef24ba0e18379c3dd48fb84e6dbf991101cb8db0/platform/JS/V8/v8/tools/isolate_driver.py#L149-L168
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/klampt/robotsim.py
python
Widget.idle
(self)
return _robotsim.Widget_idle(self)
r"""
r"""
[ "r" ]
def idle(self) ->None: r""" """ return _robotsim.Widget_idle(self)
[ "def", "idle", "(", "self", ")", "->", "None", ":", "return", "_robotsim", ".", "Widget_idle", "(", "self", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/klampt/robotsim.py#L3355-L3358
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py3/sklearn/gaussian_process/kernels.py
python
ConstantKernel.diag
(self, X)
return np.full(_num_samples(X), self.constant_value, dtype=np.array(self.constant_value).dtype)
Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : sequence of length n_samples_X Argument to the kernel. Could either be array-like with shape = (n_samples_X, n_features) or a list of objects. Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X)
Returns the diagonal of the kernel k(X, X).
[ "Returns", "the", "diagonal", "of", "the", "kernel", "k", "(", "X", "X", ")", "." ]
def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : sequence of length n_samples_X Argument to the kernel. Could either be array-like with shape = (n_samples_X, n_features) or a list of objects. Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return np.full(_num_samples(X), self.constant_value, dtype=np.array(self.constant_value).dtype)
[ "def", "diag", "(", "self", ",", "X", ")", ":", "return", "np", ".", "full", "(", "_num_samples", "(", "X", ")", ",", "self", ".", "constant_value", ",", "dtype", "=", "np", ".", "array", "(", "self", ".", "constant_value", ")", ".", "dtype", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py3/sklearn/gaussian_process/kernels.py#L1087-L1106
gromacs/gromacs
7dec3a3f99993cf5687a122de3e12de31c21c399
docs/doxygen/reporter.py
python
Reporter.doc_note
(self, entity, message)
Report a potential issue in documentation.
Report a potential issue in documentation.
[ "Report", "a", "potential", "issue", "in", "documentation", "." ]
def doc_note(self, entity, message): """Report a potential issue in documentation.""" self._report(Message('note: ' + entity.get_name() + ': ' + message, location=entity.get_reporter_location()))
[ "def", "doc_note", "(", "self", ",", "entity", ",", "message", ")", ":", "self", ".", "_report", "(", "Message", "(", "'note: '", "+", "entity", ".", "get_name", "(", ")", "+", "': '", "+", "message", ",", "location", "=", "entity", ".", "get_reporter_location", "(", ")", ")", ")" ]
https://github.com/gromacs/gromacs/blob/7dec3a3f99993cf5687a122de3e12de31c21c399/docs/doxygen/reporter.py#L279-L282
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
src/python/turicreate/util/_cloudpickle/_cloudpickle_py27.py
python
CloudPickler.save_module
(self, obj)
Save a module as an import
Save a module as an import
[ "Save", "a", "module", "as", "an", "import" ]
def save_module(self, obj): """ Save a module as an import """ mod_name = obj.__name__ # If module is successfully found then it is not a dynamically created module if hasattr(obj, "__file__"): is_dynamic = False else: try: _find_module(mod_name) is_dynamic = False except ImportError: is_dynamic = True self.modules.add(obj) if is_dynamic: self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj) else: self.save_reduce(subimport, (obj.__name__,), obj=obj)
[ "def", "save_module", "(", "self", ",", "obj", ")", ":", "mod_name", "=", "obj", ".", "__name__", "# If module is successfully found then it is not a dynamically created module", "if", "hasattr", "(", "obj", ",", "\"__file__\"", ")", ":", "is_dynamic", "=", "False", "else", ":", "try", ":", "_find_module", "(", "mod_name", ")", "is_dynamic", "=", "False", "except", "ImportError", ":", "is_dynamic", "=", "True", "self", ".", "modules", ".", "add", "(", "obj", ")", "if", "is_dynamic", ":", "self", ".", "save_reduce", "(", "dynamic_subimport", ",", "(", "obj", ".", "__name__", ",", "vars", "(", "obj", ")", ")", ",", "obj", "=", "obj", ")", "else", ":", "self", ".", "save_reduce", "(", "subimport", ",", "(", "obj", ".", "__name__", ",", ")", ",", "obj", "=", "obj", ")" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/util/_cloudpickle/_cloudpickle_py27.py#L292-L311
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/parso/py2/parso/python/diff.py
python
DiffParser._try_parse_part
(self, until_line)
return self._active_parser.parse(tokens=tokens)
Sets up a normal parser that uses a spezialized tokenizer to only parse until a certain position (or a bit longer if the statement hasn't ended.
Sets up a normal parser that uses a spezialized tokenizer to only parse until a certain position (or a bit longer if the statement hasn't ended.
[ "Sets", "up", "a", "normal", "parser", "that", "uses", "a", "spezialized", "tokenizer", "to", "only", "parse", "until", "a", "certain", "position", "(", "or", "a", "bit", "longer", "if", "the", "statement", "hasn", "t", "ended", "." ]
def _try_parse_part(self, until_line): """ Sets up a normal parser that uses a spezialized tokenizer to only parse until a certain position (or a bit longer if the statement hasn't ended. """ self._parser_count += 1 # TODO speed up, shouldn't copy the whole list all the time. # memoryview? parsed_until_line = self._nodes_tree.parsed_until_line lines_after = self._parser_lines_new[parsed_until_line:] tokens = self._diff_tokenize( lines_after, until_line, line_offset=parsed_until_line ) self._active_parser = Parser( self._pgen_grammar, error_recovery=True ) return self._active_parser.parse(tokens=tokens)
[ "def", "_try_parse_part", "(", "self", ",", "until_line", ")", ":", "self", ".", "_parser_count", "+=", "1", "# TODO speed up, shouldn't copy the whole list all the time.", "# memoryview?", "parsed_until_line", "=", "self", ".", "_nodes_tree", ".", "parsed_until_line", "lines_after", "=", "self", ".", "_parser_lines_new", "[", "parsed_until_line", ":", "]", "tokens", "=", "self", ".", "_diff_tokenize", "(", "lines_after", ",", "until_line", ",", "line_offset", "=", "parsed_until_line", ")", "self", ".", "_active_parser", "=", "Parser", "(", "self", ".", "_pgen_grammar", ",", "error_recovery", "=", "True", ")", "return", "self", ".", "_active_parser", ".", "parse", "(", "tokens", "=", "tokens", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/parso/py2/parso/python/diff.py#L433-L453
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py
python
InequalitySplitHandler.__init__
(self, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, feature_column_group_id, epsilon, num_quantiles, gradient_shape, hessian_shape, multiclass_strategy, init_stamp_token=0, loss_uses_sum_reduction=False, name=None)
Initialize the internal state for this split handler. Args: l1_regularization: L1 regularization applied for this split handler. l2_regularization: L2 regularization applied for this split handler. tree_complexity_regularization: Tree complexity regularization applied for this split handler. min_node_weight: Minimum sum of weights of examples in each partition to be considered for splitting. feature_column_group_id: Feature column group index. epsilon: A float, the error bound for quantile computation. num_quantiles: An int, the number of buckets to create from the histogram. gradient_shape: A TensorShape, containing shape of gradients. hessian_shape: A TensorShape, containing shape of hessians. multiclass_strategy: Strategy describing how to treat multiclass problems. init_stamp_token: A tensor containing an scalar for initial stamp of the stamped objects. loss_uses_sum_reduction: A scalar boolean tensor that specifies whether SUM or MEAN reduction was used for the loss. name: An optional handler name.
Initialize the internal state for this split handler.
[ "Initialize", "the", "internal", "state", "for", "this", "split", "handler", "." ]
def __init__(self, l1_regularization, l2_regularization, tree_complexity_regularization, min_node_weight, feature_column_group_id, epsilon, num_quantiles, gradient_shape, hessian_shape, multiclass_strategy, init_stamp_token=0, loss_uses_sum_reduction=False, name=None): """Initialize the internal state for this split handler. Args: l1_regularization: L1 regularization applied for this split handler. l2_regularization: L2 regularization applied for this split handler. tree_complexity_regularization: Tree complexity regularization applied for this split handler. min_node_weight: Minimum sum of weights of examples in each partition to be considered for splitting. feature_column_group_id: Feature column group index. epsilon: A float, the error bound for quantile computation. num_quantiles: An int, the number of buckets to create from the histogram. gradient_shape: A TensorShape, containing shape of gradients. hessian_shape: A TensorShape, containing shape of hessians. multiclass_strategy: Strategy describing how to treat multiclass problems. init_stamp_token: A tensor containing an scalar for initial stamp of the stamped objects. loss_uses_sum_reduction: A scalar boolean tensor that specifies whether SUM or MEAN reduction was used for the loss. name: An optional handler name. """ super(InequalitySplitHandler, self).__init__( name=name, l1_regularization=l1_regularization, l2_regularization=l2_regularization, tree_complexity_regularization=tree_complexity_regularization, min_node_weight=min_node_weight, feature_column_group_id=feature_column_group_id, gradient_shape=gradient_shape, hessian_shape=hessian_shape, multiclass_strategy=multiclass_strategy, loss_uses_sum_reduction=loss_uses_sum_reduction) self._stats_accumulator = stats_accumulator_ops.StatsAccumulator( init_stamp_token, gradient_shape, hessian_shape, name="StatsAccumulator/{}".format(self._name)) # Allocate both stats accumulator and quantile accumulator on the same # device so that we can build splits with fewer RPCs. with ops.colocate_with(self._stats_accumulator.resource_handle): self._quantile_accumulator = quantile_ops.QuantileAccumulator( init_stamp_token, epsilon=epsilon, num_quantiles=num_quantiles, name="QuantileAccumulator/{}".format(self._name))
[ "def", "__init__", "(", "self", ",", "l1_regularization", ",", "l2_regularization", ",", "tree_complexity_regularization", ",", "min_node_weight", ",", "feature_column_group_id", ",", "epsilon", ",", "num_quantiles", ",", "gradient_shape", ",", "hessian_shape", ",", "multiclass_strategy", ",", "init_stamp_token", "=", "0", ",", "loss_uses_sum_reduction", "=", "False", ",", "name", "=", "None", ")", ":", "super", "(", "InequalitySplitHandler", ",", "self", ")", ".", "__init__", "(", "name", "=", "name", ",", "l1_regularization", "=", "l1_regularization", ",", "l2_regularization", "=", "l2_regularization", ",", "tree_complexity_regularization", "=", "tree_complexity_regularization", ",", "min_node_weight", "=", "min_node_weight", ",", "feature_column_group_id", "=", "feature_column_group_id", ",", "gradient_shape", "=", "gradient_shape", ",", "hessian_shape", "=", "hessian_shape", ",", "multiclass_strategy", "=", "multiclass_strategy", ",", "loss_uses_sum_reduction", "=", "loss_uses_sum_reduction", ")", "self", ".", "_stats_accumulator", "=", "stats_accumulator_ops", ".", "StatsAccumulator", "(", "init_stamp_token", ",", "gradient_shape", ",", "hessian_shape", ",", "name", "=", "\"StatsAccumulator/{}\"", ".", "format", "(", "self", ".", "_name", ")", ")", "# Allocate both stats accumulator and quantile accumulator on the same", "# device so that we can build splits with fewer RPCs.", "with", "ops", ".", "colocate_with", "(", "self", ".", "_stats_accumulator", ".", "resource_handle", ")", ":", "self", ".", "_quantile_accumulator", "=", "quantile_ops", ".", "QuantileAccumulator", "(", "init_stamp_token", ",", "epsilon", "=", "epsilon", ",", "num_quantiles", "=", "num_quantiles", ",", "name", "=", "\"QuantileAccumulator/{}\"", ".", "format", "(", "self", ".", "_name", ")", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/boosted_trees/lib/learner/batch/ordinal_split_handler.py#L91-L149
krishauser/Klampt
972cc83ea5befac3f653c1ba20f80155768ad519
Python/python2_version/klampt/robotsim.py
python
IKObjective.getPosition
(self)
return _robotsim.IKObjective_getPosition(self)
getPosition(IKObjective self) Returns the local and global position of the position constraint.
getPosition(IKObjective self)
[ "getPosition", "(", "IKObjective", "self", ")" ]
def getPosition(self): """ getPosition(IKObjective self) Returns the local and global position of the position constraint. """ return _robotsim.IKObjective_getPosition(self)
[ "def", "getPosition", "(", "self", ")", ":", "return", "_robotsim", ".", "IKObjective_getPosition", "(", "self", ")" ]
https://github.com/krishauser/Klampt/blob/972cc83ea5befac3f653c1ba20f80155768ad519/Python/python2_version/klampt/robotsim.py#L6402-L6411
nasa/fprime
595cf3682d8365943d86c1a6fe7c78f0a116acf0
Autocoders/Python/src/fprime_ac/generators/visitors/ChannelVisitor.py
python
ChannelVisitor.__init__
(self)
Constructor.
Constructor.
[ "Constructor", "." ]
def __init__(self): """ Constructor. """ super().__init__() self.__config = ConfigManager.ConfigManager.getInstance() self.__form = formatters.Formatters.getInstance() self.__form_comment = formatters.CommentFormatters() DEBUG.info("ChannelVisitor: Instanced.") self.bodytext = "" self.prototypetext = ""
[ "def", "__init__", "(", "self", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "__config", "=", "ConfigManager", ".", "ConfigManager", ".", "getInstance", "(", ")", "self", ".", "__form", "=", "formatters", ".", "Formatters", ".", "getInstance", "(", ")", "self", ".", "__form_comment", "=", "formatters", ".", "CommentFormatters", "(", ")", "DEBUG", ".", "info", "(", "\"ChannelVisitor: Instanced.\"", ")", "self", ".", "bodytext", "=", "\"\"", "self", ".", "prototypetext", "=", "\"\"" ]
https://github.com/nasa/fprime/blob/595cf3682d8365943d86c1a6fe7c78f0a116acf0/Autocoders/Python/src/fprime_ac/generators/visitors/ChannelVisitor.py#L64-L74
natanielruiz/android-yolo
1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f
jni-build/jni/include/tensorflow/contrib/graph_editor/reroute.py
python
add_control_inputs
(op, cops)
Add the control inputs cops to co. Warning: this function is directly manipulating the internals of the tf.Graph. Args: op: a tf.Operation to which the control inputs are added. cops: an object convertible to a list of tf.Operation. Raises: TypeError: if op is not a tf.Operation ValueError: if any cop in cops is already a control input of op.
Add the control inputs cops to co.
[ "Add", "the", "control", "inputs", "cops", "to", "co", "." ]
def add_control_inputs(op, cops): """Add the control inputs cops to co. Warning: this function is directly manipulating the internals of the tf.Graph. Args: op: a tf.Operation to which the control inputs are added. cops: an object convertible to a list of tf.Operation. Raises: TypeError: if op is not a tf.Operation ValueError: if any cop in cops is already a control input of op. """ if not isinstance(op, tf_ops.Operation): raise TypeError("Expected a tf.Operation, got: {}", type(op)) cops = util.make_list_of_op(cops, allow_graph=False) for cop in cops: if cop in op.control_inputs: raise ValueError("{} is already a control_input of {}".format(op.name, cop.name)) # pylint: disable=protected-access op._control_inputs += cops op._recompute_node_def()
[ "def", "add_control_inputs", "(", "op", ",", "cops", ")", ":", "if", "not", "isinstance", "(", "op", ",", "tf_ops", ".", "Operation", ")", ":", "raise", "TypeError", "(", "\"Expected a tf.Operation, got: {}\"", ",", "type", "(", "op", ")", ")", "cops", "=", "util", ".", "make_list_of_op", "(", "cops", ",", "allow_graph", "=", "False", ")", "for", "cop", "in", "cops", ":", "if", "cop", "in", "op", ".", "control_inputs", ":", "raise", "ValueError", "(", "\"{} is already a control_input of {}\"", ".", "format", "(", "op", ".", "name", ",", "cop", ".", "name", ")", ")", "# pylint: disable=protected-access", "op", ".", "_control_inputs", "+=", "cops", "op", ".", "_recompute_node_def", "(", ")" ]
https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/graph_editor/reroute.py#L468-L489
mantidproject/mantid
03deeb89254ec4289edb8771e0188c2090a02f32
qt/python/mantidqt/mantidqt/widgets/sliceviewer/peaksviewer/model.py
python
PeaksViewerModel.__init__
(self, peaks_ws: IPeaksWorkspace, fg_color: str, bg_color: str)
:param peaks_ws: A pointer to the PeaksWorkspace :param fg_color: Color of the glyphs marking the signal region :param bg_color: Color of the glyphs marking the background region
:param peaks_ws: A pointer to the PeaksWorkspace :param fg_color: Color of the glyphs marking the signal region :param bg_color: Color of the glyphs marking the background region
[ ":", "param", "peaks_ws", ":", "A", "pointer", "to", "the", "PeaksWorkspace", ":", "param", "fg_color", ":", "Color", "of", "the", "glyphs", "marking", "the", "signal", "region", ":", "param", "bg_color", ":", "Color", "of", "the", "glyphs", "marking", "the", "background", "region" ]
def __init__(self, peaks_ws: IPeaksWorkspace, fg_color: str, bg_color: str): """ :param peaks_ws: A pointer to the PeaksWorkspace :param fg_color: Color of the glyphs marking the signal region :param bg_color: Color of the glyphs marking the background region """ if not hasattr(peaks_ws, 'getNumberPeaks'): raise ValueError("Expected a PeaksWorkspace type but found a {}".format(type(peaks_ws))) super().__init__(peaks_ws) self._peaks_ws_name = peaks_ws.name() self._fg_color = fg_color self._bg_color = bg_color self._representations: List[Painted] = []
[ "def", "__init__", "(", "self", ",", "peaks_ws", ":", "IPeaksWorkspace", ",", "fg_color", ":", "str", ",", "bg_color", ":", "str", ")", ":", "if", "not", "hasattr", "(", "peaks_ws", ",", "'getNumberPeaks'", ")", ":", "raise", "ValueError", "(", "\"Expected a PeaksWorkspace type but found a {}\"", ".", "format", "(", "type", "(", "peaks_ws", ")", ")", ")", "super", "(", ")", ".", "__init__", "(", "peaks_ws", ")", "self", ".", "_peaks_ws_name", "=", "peaks_ws", ".", "name", "(", ")", "self", ".", "_fg_color", "=", "fg_color", "self", ".", "_bg_color", "=", "bg_color", "self", ".", "_representations", ":", "List", "[", "Painted", "]", "=", "[", "]" ]
https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqt/mantidqt/widgets/sliceviewer/peaksviewer/model.py#L35-L51
SpenceKonde/megaTinyCore
1c4a70b18a149fe6bcb551dfa6db11ca50b8997b
megaavr/tools/libs/pause_mod/__init__.py
python
minutes
(num)
Pause for this many minutes
Pause for this many minutes
[ "Pause", "for", "this", "many", "minutes" ]
def minutes(num): """ Pause for this many minutes """ seconds(60 * num)
[ "def", "minutes", "(", "num", ")", ":", "seconds", "(", "60", "*", "num", ")" ]
https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/pause_mod/__init__.py#L97-L101
openvinotoolkit/openvino
dedcbeafa8b84cccdc55ca64b8da516682b381c7
src/bindings/python/src/openvino/runtime/utils/node_factory.py
python
NodeFactory._set_node_attr_value
(node: Node, attr_name: str, value: Any)
Set the node attribute value. :param node: The node we change attribute value for. :param attr_name: The attribute name. :param value: The new attribute value.
Set the node attribute value.
[ "Set", "the", "node", "attribute", "value", "." ]
def _set_node_attr_value(node: Node, attr_name: str, value: Any) -> None: """Set the node attribute value. :param node: The node we change attribute value for. :param attr_name: The attribute name. :param value: The new attribute value. """ node.set_attribute(attr_name, value) node._attr_cache[attr_name] = value
[ "def", "_set_node_attr_value", "(", "node", ":", "Node", ",", "attr_name", ":", "str", ",", "value", ":", "Any", ")", "->", "None", ":", "node", ".", "set_attribute", "(", "attr_name", ",", "value", ")", "node", ".", "_attr_cache", "[", "attr_name", "]", "=", "value" ]
https://github.com/openvinotoolkit/openvino/blob/dedcbeafa8b84cccdc55ca64b8da516682b381c7/src/bindings/python/src/openvino/runtime/utils/node_factory.py#L156-L164
iam-abbas/cs-algorithms
d04aa8fd9a1fa290266dde96afe9b90ee23c5a92
Python/Johnson_algorithm.py
python
johnson
(g)
return distance
Return distance where distance[u][v] is the min distance from u to v. distance[u][v] is the shortest distance from vertex u to v. g is a Graph object which can have negative edge weights.
Return distance where distance[u][v] is the min distance from u to v. distance[u][v] is the shortest distance from vertex u to v. g is a Graph object which can have negative edge weights.
[ "Return", "distance", "where", "distance", "[", "u", "]", "[", "v", "]", "is", "the", "min", "distance", "from", "u", "to", "v", ".", "distance", "[", "u", "]", "[", "v", "]", "is", "the", "shortest", "distance", "from", "vertex", "u", "to", "v", ".", "g", "is", "a", "Graph", "object", "which", "can", "have", "negative", "edge", "weights", "." ]
def johnson(g): """Return distance where distance[u][v] is the min distance from u to v. distance[u][v] is the shortest distance from vertex u to v. g is a Graph object which can have negative edge weights. """ # add new vertex q g.add_vertex('q') # let q point to all other vertices in g with zero-weight edges for v in g: g.add_edge('q', v.get_key(), 0) # compute shortest distance from vertex q to all other vertices bell_dist = bellman_ford(g, g.get_vertex('q')) # set weight(u, v) = weight(u, v) + bell_dist(u) - bell_dist(v) for each # edge (u, v) for v in g: for n in v.get_neighbours(): w = v.get_weight(n) v.set_weight(n, w + bell_dist[v] - bell_dist[n]) # remove vertex q # This implementation of the graph stores edge (u, v) in Vertex object u # Since no other vertex points back to q, we do not need to worry about # removing edges pointing to q from other vertices. del g.vertices['q'] # distance[u][v] will hold smallest distance from vertex u to v distance = {} # run dijkstra's algorithm on each source vertex for v in g: distance[v] = dijkstra(g, v) # correct distances for v in g: for w in g: distance[v][w] += bell_dist[w] - bell_dist[v] # correct weights in original graph for v in g: for n in v.get_neighbours(): w = v.get_weight(n) v.set_weight(n, w + bell_dist[n] - bell_dist[v]) return distance
[ "def", "johnson", "(", "g", ")", ":", "# add new vertex q", "g", ".", "add_vertex", "(", "'q'", ")", "# let q point to all other vertices in g with zero-weight edges", "for", "v", "in", "g", ":", "g", ".", "add_edge", "(", "'q'", ",", "v", ".", "get_key", "(", ")", ",", "0", ")", "# compute shortest distance from vertex q to all other vertices", "bell_dist", "=", "bellman_ford", "(", "g", ",", "g", ".", "get_vertex", "(", "'q'", ")", ")", "# set weight(u, v) = weight(u, v) + bell_dist(u) - bell_dist(v) for each", "# edge (u, v)", "for", "v", "in", "g", ":", "for", "n", "in", "v", ".", "get_neighbours", "(", ")", ":", "w", "=", "v", ".", "get_weight", "(", "n", ")", "v", ".", "set_weight", "(", "n", ",", "w", "+", "bell_dist", "[", "v", "]", "-", "bell_dist", "[", "n", "]", ")", "# remove vertex q", "# This implementation of the graph stores edge (u, v) in Vertex object u", "# Since no other vertex points back to q, we do not need to worry about", "# removing edges pointing to q from other vertices.", "del", "g", ".", "vertices", "[", "'q'", "]", "# distance[u][v] will hold smallest distance from vertex u to v", "distance", "=", "{", "}", "# run dijkstra's algorithm on each source vertex", "for", "v", "in", "g", ":", "distance", "[", "v", "]", "=", "dijkstra", "(", "g", ",", "v", ")", "# correct distances", "for", "v", "in", "g", ":", "for", "w", "in", "g", ":", "distance", "[", "v", "]", "[", "w", "]", "+=", "bell_dist", "[", "w", "]", "-", "bell_dist", "[", "v", "]", "# correct weights in original graph", "for", "v", "in", "g", ":", "for", "n", "in", "v", ".", "get_neighbours", "(", ")", ":", "w", "=", "v", ".", "get_weight", "(", "n", ")", "v", ".", "set_weight", "(", "n", ",", "w", "+", "bell_dist", "[", "n", "]", "-", "bell_dist", "[", "v", "]", ")", "return", "distance" ]
https://github.com/iam-abbas/cs-algorithms/blob/d04aa8fd9a1fa290266dde96afe9b90ee23c5a92/Python/Johnson_algorithm.py#L63-L109
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Source/ThirdParty/CEF3/pristine/cef_source/tools/crash_server.py
python
CrashHTTPRequestHandler._parse_post_data
(self, data)
return cgi.FieldStorage( fp = cStringIO.StringIO(data), headers = self.headers, environ = { 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type'], })
Returns a cgi.FieldStorage object for this request or None if this is not a POST request.
Returns a cgi.FieldStorage object for this request or None if this is not a POST request.
[ "Returns", "a", "cgi", ".", "FieldStorage", "object", "for", "this", "request", "or", "None", "if", "this", "is", "not", "a", "POST", "request", "." ]
def _parse_post_data(self, data): """ Returns a cgi.FieldStorage object for this request or None if this is not a POST request. """ if self.command != 'POST': return None return cgi.FieldStorage( fp = cStringIO.StringIO(data), headers = self.headers, environ = { 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type'], })
[ "def", "_parse_post_data", "(", "self", ",", "data", ")", ":", "if", "self", ".", "command", "!=", "'POST'", ":", "return", "None", "return", "cgi", ".", "FieldStorage", "(", "fp", "=", "cStringIO", ".", "StringIO", "(", "data", ")", ",", "headers", "=", "self", ".", "headers", ",", "environ", "=", "{", "'REQUEST_METHOD'", ":", "'POST'", ",", "'CONTENT_TYPE'", ":", "self", ".", "headers", "[", "'Content-Type'", "]", ",", "}", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Source/ThirdParty/CEF3/pristine/cef_source/tools/crash_server.py#L143-L154
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/tools/cython/Cython/StringIOTree.py
python
StringIOTree.insert
(self, iotree)
Insert a StringIOTree (and all of its contents) at this location. Further writing to self appears after what is inserted.
Insert a StringIOTree (and all of its contents) at this location. Further writing to self appears after what is inserted.
[ "Insert", "a", "StringIOTree", "(", "and", "all", "of", "its", "contents", ")", "at", "this", "location", ".", "Further", "writing", "to", "self", "appears", "after", "what", "is", "inserted", "." ]
def insert(self, iotree): """ Insert a StringIOTree (and all of its contents) at this location. Further writing to self appears after what is inserted. """ self.commit() self.prepended_children.append(iotree)
[ "def", "insert", "(", "self", ",", "iotree", ")", ":", "self", ".", "commit", "(", ")", "self", ".", "prepended_children", ".", "append", "(", "iotree", ")" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/cython/Cython/StringIOTree.py#L81-L87
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_cocoa/_misc.py
python
Log.SetActiveTarget
(*args, **kwargs)
return _misc_.Log_SetActiveTarget(*args, **kwargs)
SetActiveTarget(Log pLogger) -> Log
SetActiveTarget(Log pLogger) -> Log
[ "SetActiveTarget", "(", "Log", "pLogger", ")", "-", ">", "Log" ]
def SetActiveTarget(*args, **kwargs): """SetActiveTarget(Log pLogger) -> Log""" return _misc_.Log_SetActiveTarget(*args, **kwargs)
[ "def", "SetActiveTarget", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_misc_", ".", "Log_SetActiveTarget", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_misc.py#L1520-L1522
eclipse/sumo
7132a9b8b6eea734bdec38479026b4d8c4336d03
tools/contributed/sumopy/agilepy/lib_wx/objpanel.py
python
ObjPanelMixin.on_cancel
(self, event)
Destroy itself and parent
Destroy itself and parent
[ "Destroy", "itself", "and", "parent" ]
def on_cancel(self, event): """ Destroy itself and parent """ # print 'OP.on_cancel' self.Close()
[ "def", "on_cancel", "(", "self", ",", "event", ")", ":", "# print 'OP.on_cancel'", "self", ".", "Close", "(", ")" ]
https://github.com/eclipse/sumo/blob/7132a9b8b6eea734bdec38479026b4d8c4336d03/tools/contributed/sumopy/agilepy/lib_wx/objpanel.py#L3662-L3667
bareos/bareos
56a10bb368b0a81e977bb51304033fe49d59efb0
contrib/fd-plugins/openvz7/bareos-fd-vz7_ct_fs.py
python
load_bareos_plugin
(plugindef)
return bareosfd.bRC_OK
This function is called by the Bareos-FD to load the plugin We use it to instantiate the plugin class
This function is called by the Bareos-FD to load the plugin We use it to instantiate the plugin class
[ "This", "function", "is", "called", "by", "the", "Bareos", "-", "FD", "to", "load", "the", "plugin", "We", "use", "it", "to", "instantiate", "the", "plugin", "class" ]
def load_bareos_plugin(plugindef): ''' This function is called by the Bareos-FD to load the plugin We use it to instantiate the plugin class ''' # BareosFdWrapper.bareos_fd_plugin_object is the module attribute that # holds the plugin class object BareosFdWrapper.bareos_fd_plugin_object = \ BareosFdPluginVz7CtFs.BareosFdPluginVz7CtFs( plugindef) return bareosfd.bRC_OK
[ "def", "load_bareos_plugin", "(", "plugindef", ")", ":", "# BareosFdWrapper.bareos_fd_plugin_object is the module attribute that", "# holds the plugin class object", "BareosFdWrapper", ".", "bareos_fd_plugin_object", "=", "BareosFdPluginVz7CtFs", ".", "BareosFdPluginVz7CtFs", "(", "plugindef", ")", "return", "bareosfd", ".", "bRC_OK" ]
https://github.com/bareos/bareos/blob/56a10bb368b0a81e977bb51304033fe49d59efb0/contrib/fd-plugins/openvz7/bareos-fd-vz7_ct_fs.py#L14-L24
ufal/udpipe
e51f02d2744cdfd4a29efc1320644ea04d535f0b
doc/t2t_docsys/txt2tags.py
python
TitleMaster.close_all
(self)
return ret
Closes all opened title blocks
Closes all opened title blocks
[ "Closes", "all", "opened", "title", "blocks" ]
def close_all(self): "Closes all opened title blocks" ret = [] ret.extend(self.tag_hold) while self.level: tag = TAGS.get('title%dClose'%self.level) if tag: ret.append(tag) tag = TAGS.get('blockTitle%dClose'%self.level) if tag: ret.append(tag) self.level -= 1 return ret
[ "def", "close_all", "(", "self", ")", ":", "ret", "=", "[", "]", "ret", ".", "extend", "(", "self", ".", "tag_hold", ")", "while", "self", ".", "level", ":", "tag", "=", "TAGS", ".", "get", "(", "'title%dClose'", "%", "self", ".", "level", ")", "if", "tag", ":", "ret", ".", "append", "(", "tag", ")", "tag", "=", "TAGS", ".", "get", "(", "'blockTitle%dClose'", "%", "self", ".", "level", ")", "if", "tag", ":", "ret", ".", "append", "(", "tag", ")", "self", ".", "level", "-=", "1", "return", "ret" ]
https://github.com/ufal/udpipe/blob/e51f02d2744cdfd4a29efc1320644ea04d535f0b/doc/t2t_docsys/txt2tags.py#L3276-L3286
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Tools/AWSPythonSDK/1.5.8/botocore/stub.py
python
Stubber.assert_no_pending_responses
(self)
Asserts that all expected calls were made.
Asserts that all expected calls were made.
[ "Asserts", "that", "all", "expected", "calls", "were", "made", "." ]
def assert_no_pending_responses(self): """ Asserts that all expected calls were made. """ remaining = len(self._queue) if remaining != 0: raise AssertionError( "%d responses remaining in queue." % remaining)
[ "def", "assert_no_pending_responses", "(", "self", ")", ":", "remaining", "=", "len", "(", "self", ".", "_queue", ")", "if", "remaining", "!=", "0", ":", "raise", "AssertionError", "(", "\"%d responses remaining in queue.\"", "%", "remaining", ")" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/AWSPythonSDK/1.5.8/botocore/stub.py#L309-L316
bigartm/bigartm
47e37f982de87aa67bfd475ff1f39da696b181b3
utils/cpplint.py
python
ShouldCheckNamespaceIndentation
(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum)
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace.
This method determines if we should apply our namespace indentation check.
[ "This", "method", "determines", "if", "we", "should", "apply", "our", "namespace", "indentation", "check", "." ]
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum): """This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace. """ is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) if not (is_namespace_indent_item or is_forward_declaration): return False # If we are in a macro, we do not want to check the namespace indentation. if IsMacroDefinition(raw_lines_no_comments, linenum): return False return IsBlockInNameSpace(nesting_state, is_forward_declaration)
[ "def", "ShouldCheckNamespaceIndentation", "(", "nesting_state", ",", "is_namespace_indent_item", ",", "raw_lines_no_comments", ",", "linenum", ")", ":", "is_forward_declaration", "=", "IsForwardClassDeclaration", "(", "raw_lines_no_comments", ",", "linenum", ")", "if", "not", "(", "is_namespace_indent_item", "or", "is_forward_declaration", ")", ":", "return", "False", "# If we are in a macro, we do not want to check the namespace indentation.", "if", "IsMacroDefinition", "(", "raw_lines_no_comments", ",", "linenum", ")", ":", "return", "False", "return", "IsBlockInNameSpace", "(", "nesting_state", ",", "is_forward_declaration", ")" ]
https://github.com/bigartm/bigartm/blob/47e37f982de87aa67bfd475ff1f39da696b181b3/utils/cpplint.py#L5869-L5896
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Optimizer/vai_p_pytorch/cifar10_ofa_pruning/mynet_ofa_supernet_evo_search.py
python
accuracy
(output, target, topk=(1,))
Computes the accuracy over the k top predictions for the specified values of k
Computes the accuracy over the k top predictions for the specified values of k
[ "Computes", "the", "accuracy", "over", "the", "k", "top", "predictions", "for", "the", "specified", "values", "of", "k" ]
def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].flatten().float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
[ "def", "accuracy", "(", "output", ",", "target", ",", "topk", "=", "(", "1", ",", ")", ")", ":", "with", "torch", ".", "no_grad", "(", ")", ":", "maxk", "=", "max", "(", "topk", ")", "batch_size", "=", "target", ".", "size", "(", "0", ")", "_", ",", "pred", "=", "output", ".", "topk", "(", "maxk", ",", "1", ",", "True", ",", "True", ")", "pred", "=", "pred", ".", "t", "(", ")", "correct", "=", "pred", ".", "eq", "(", "target", ".", "view", "(", "1", ",", "-", "1", ")", ".", "expand_as", "(", "pred", ")", ")", "res", "=", "[", "]", "for", "k", "in", "topk", ":", "correct_k", "=", "correct", "[", ":", "k", "]", ".", "flatten", "(", ")", ".", "float", "(", ")", ".", "sum", "(", "0", ",", "keepdim", "=", "True", ")", "res", ".", "append", "(", "correct_k", ".", "mul_", "(", "100.0", "/", "batch_size", ")", ")", "return", "res" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Optimizer/vai_p_pytorch/cifar10_ofa_pruning/mynet_ofa_supernet_evo_search.py#L118-L133
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/_core.py
python
FileSystem.RemoveHandler
(*args, **kwargs)
return _core_.FileSystem_RemoveHandler(*args, **kwargs)
RemoveHandler(CPPFileSystemHandler handler) -> CPPFileSystemHandler
RemoveHandler(CPPFileSystemHandler handler) -> CPPFileSystemHandler
[ "RemoveHandler", "(", "CPPFileSystemHandler", "handler", ")", "-", ">", "CPPFileSystemHandler" ]
def RemoveHandler(*args, **kwargs): """RemoveHandler(CPPFileSystemHandler handler) -> CPPFileSystemHandler""" return _core_.FileSystem_RemoveHandler(*args, **kwargs)
[ "def", "RemoveHandler", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "_core_", ".", "FileSystem_RemoveHandler", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L2441-L2443
randombit/botan
e068d80953469fc8a3ec1715d0f64756d972daba
configure.py
python
CompilerInfo.gen_lib_flags
(self, options, variables)
return ' '.join(list(flag_builder()))
Return any flags specific to building the library (vs the cli or tests)
Return any flags specific to building the library (vs the cli or tests)
[ "Return", "any", "flags", "specific", "to", "building", "the", "library", "(", "vs", "the", "cli", "or", "tests", ")" ]
def gen_lib_flags(self, options, variables): """ Return any flags specific to building the library (vs the cli or tests) """ def flag_builder(): if options.build_shared_lib: yield self.shared_flags yield self.visibility_build_flags if 'debug' in self.lib_flags and options.with_debug_info: yield process_template_string(self.lib_flags['debug'], variables, self.infofile) return ' '.join(list(flag_builder()))
[ "def", "gen_lib_flags", "(", "self", ",", "options", ",", "variables", ")", ":", "def", "flag_builder", "(", ")", ":", "if", "options", ".", "build_shared_lib", ":", "yield", "self", ".", "shared_flags", "yield", "self", ".", "visibility_build_flags", "if", "'debug'", "in", "self", ".", "lib_flags", "and", "options", ".", "with_debug_info", ":", "yield", "process_template_string", "(", "self", ".", "lib_flags", "[", "'debug'", "]", ",", "variables", ",", "self", ".", "infofile", ")", "return", "' '", ".", "join", "(", "list", "(", "flag_builder", "(", ")", ")", ")" ]
https://github.com/randombit/botan/blob/e068d80953469fc8a3ec1715d0f64756d972daba/configure.py#L1284-L1299
aws/lumberyard
f85344403c1c2e77ec8c75deb2c116e97b713217
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/PIL/GribStubImagePlugin.py
python
register_handler
(handler)
Install application-specific GRIB image handler. :param handler: Handler object.
Install application-specific GRIB image handler.
[ "Install", "application", "-", "specific", "GRIB", "image", "handler", "." ]
def register_handler(handler): """ Install application-specific GRIB image handler. :param handler: Handler object. """ global _handler _handler = handler
[ "def", "register_handler", "(", "handler", ")", ":", "global", "_handler", "_handler", "=", "handler" ]
https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/PIL/GribStubImagePlugin.py#L18-L25
Xilinx/Vitis-AI
fc74d404563d9951b57245443c73bef389f3657f
tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/python/timeseries/state_space_models/level_trend.py
python
AdderStateSpaceModel.transition_to_powers
(self, powers)
return identity_matrices + math_ops.cast(powers_padded, self.dtype)
Computes powers of the adder transition matrix efficiently. Args: powers: An integer Tensor, shape [...], with powers to raise the transition matrix to. Returns: A floating point Tensor with shape [..., 2, 2] containing: transition^power = [[1., power], [0., 1.]]
Computes powers of the adder transition matrix efficiently.
[ "Computes", "powers", "of", "the", "adder", "transition", "matrix", "efficiently", "." ]
def transition_to_powers(self, powers): """Computes powers of the adder transition matrix efficiently. Args: powers: An integer Tensor, shape [...], with powers to raise the transition matrix to. Returns: A floating point Tensor with shape [..., 2, 2] containing: transition^power = [[1., power], [0., 1.]] """ paddings = array_ops.concat( [ array_ops.zeros([array_ops.rank(powers), 2], dtype=dtypes.int32), [(0, 1), (1, 0)] ], axis=0) powers_padded = array_ops.pad(powers[..., None, None], paddings=paddings) identity_matrices = linalg_ops.eye( num_rows=2, batch_shape=array_ops.shape(powers), dtype=self.dtype) return identity_matrices + math_ops.cast(powers_padded, self.dtype)
[ "def", "transition_to_powers", "(", "self", ",", "powers", ")", ":", "paddings", "=", "array_ops", ".", "concat", "(", "[", "array_ops", ".", "zeros", "(", "[", "array_ops", ".", "rank", "(", "powers", ")", ",", "2", "]", ",", "dtype", "=", "dtypes", ".", "int32", ")", ",", "[", "(", "0", ",", "1", ")", ",", "(", "1", ",", "0", ")", "]", "]", ",", "axis", "=", "0", ")", "powers_padded", "=", "array_ops", ".", "pad", "(", "powers", "[", "...", ",", "None", ",", "None", "]", ",", "paddings", "=", "paddings", ")", "identity_matrices", "=", "linalg_ops", ".", "eye", "(", "num_rows", "=", "2", ",", "batch_shape", "=", "array_ops", ".", "shape", "(", "powers", ")", ",", "dtype", "=", "self", ".", "dtype", ")", "return", "identity_matrices", "+", "math_ops", ".", "cast", "(", "powers_padded", ",", "self", ".", "dtype", ")" ]
https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/timeseries/python/timeseries/state_space_models/level_trend.py#L73-L93
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
src/osx_carbon/propgrid.py
python
PropertyGridInterface.DoDefaultTypeMappings
(self)
Map built-in properties.
Map built-in properties.
[ "Map", "built", "-", "in", "properties", "." ]
def DoDefaultTypeMappings(self): "Map built-in properties." global _type2property try: mappings = _type2property return except NameError: mappings = {} _type2property = mappings mappings[str] = StringProperty mappings[unicode] = StringProperty mappings[int] = IntProperty mappings[float] = FloatProperty mappings[bool] = BoolProperty mappings[list] = ArrayStringProperty mappings[tuple] = ArrayStringProperty mappings[wx.Font] = FontProperty mappings[wx.Colour] = ColourProperty "mappings[wx.Size] = SizeProperty" "mappings[wx.Point] = PointProperty" "mappings[wx.FontData] = FontDataProperty"
[ "def", "DoDefaultTypeMappings", "(", "self", ")", ":", "global", "_type2property", "try", ":", "mappings", "=", "_type2property", "return", "except", "NameError", ":", "mappings", "=", "{", "}", "_type2property", "=", "mappings", "mappings", "[", "str", "]", "=", "StringProperty", "mappings", "[", "unicode", "]", "=", "StringProperty", "mappings", "[", "int", "]", "=", "IntProperty", "mappings", "[", "float", "]", "=", "FloatProperty", "mappings", "[", "bool", "]", "=", "BoolProperty", "mappings", "[", "list", "]", "=", "ArrayStringProperty", "mappings", "[", "tuple", "]", "=", "ArrayStringProperty", "mappings", "[", "wx", ".", "Font", "]", "=", "FontProperty", "mappings", "[", "wx", ".", "Colour", "]", "=", "ColourProperty", "\"mappings[wx.Size] = SizeProperty\"", "\"mappings[wx.Point] = PointProperty\"", "\"mappings[wx.FontData] = FontDataProperty\"" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/propgrid.py#L1496-L1518
baidu-research/tensorflow-allreduce
66d5b855e90b0949e9fa5cca5599fd729a70e874
tensorflow/contrib/framework/python/ops/variables.py
python
get_variables_by_suffix
(suffix, scope=None)
return get_variables(scope=scope, suffix=suffix)
Gets the list of variables that end with the given suffix. Args: suffix: suffix for filtering the variables to return. scope: an optional scope for filtering the variables to return. Returns: a copied list of variables with the given name and prefix.
Gets the list of variables that end with the given suffix.
[ "Gets", "the", "list", "of", "variables", "that", "end", "with", "the", "given", "suffix", "." ]
def get_variables_by_suffix(suffix, scope=None): """Gets the list of variables that end with the given suffix. Args: suffix: suffix for filtering the variables to return. scope: an optional scope for filtering the variables to return. Returns: a copied list of variables with the given name and prefix. """ return get_variables(scope=scope, suffix=suffix)
[ "def", "get_variables_by_suffix", "(", "suffix", ",", "scope", "=", "None", ")", ":", "return", "get_variables", "(", "scope", "=", "scope", ",", "suffix", "=", "suffix", ")" ]
https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/framework/python/ops/variables.py#L373-L383
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/distutils/cmd.py
python
Command.ensure_filename
(self, option)
Ensure that 'option' is the name of an existing file.
Ensure that 'option' is the name of an existing file.
[ "Ensure", "that", "option", "is", "the", "name", "of", "an", "existing", "file", "." ]
def ensure_filename(self, option): """Ensure that 'option' is the name of an existing file.""" self._ensure_tested_string(option, os.path.isfile, "filename", "'%s' does not exist or is not a file")
[ "def", "ensure_filename", "(", "self", ",", "option", ")", ":", "self", ".", "_ensure_tested_string", "(", "option", ",", "os", ".", "path", ".", "isfile", ",", "\"filename\"", ",", "\"'%s' does not exist or is not a file\"", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/distutils/cmd.py#L260-L264
rdkit/rdkit
ede860ae316d12d8568daf5ee800921c3389c84e
rdkit/Chem/GraphDescriptors.py
python
_pyChi4n
(mol)
return _pyChiNn_(mol, 4)
Similar to Hall Kier Chi4v, but uses nVal instead of valence This makes a big difference after we get out of the first row. **NOTE**: because the current path finding code does, by design, detect rings as paths (e.g. in C1CC1 there is *1* atom path of length 3), values of Chi4n may give results that differ from those provided by the old code in molecules that have 3 rings.
Similar to Hall Kier Chi4v, but uses nVal instead of valence This makes a big difference after we get out of the first row.
[ "Similar", "to", "Hall", "Kier", "Chi4v", "but", "uses", "nVal", "instead", "of", "valence", "This", "makes", "a", "big", "difference", "after", "we", "get", "out", "of", "the", "first", "row", "." ]
def _pyChi4n(mol): """ Similar to Hall Kier Chi4v, but uses nVal instead of valence This makes a big difference after we get out of the first row. **NOTE**: because the current path finding code does, by design, detect rings as paths (e.g. in C1CC1 there is *1* atom path of length 3), values of Chi4n may give results that differ from those provided by the old code in molecules that have 3 rings. """ return _pyChiNn_(mol, 4)
[ "def", "_pyChi4n", "(", "mol", ")", ":", "return", "_pyChiNn_", "(", "mol", ",", "4", ")" ]
https://github.com/rdkit/rdkit/blob/ede860ae316d12d8568daf5ee800921c3389c84e/rdkit/Chem/GraphDescriptors.py#L389-L400
windystrife/UnrealEngine_NVIDIAGameWorks
b50e6338a7c5b26374d66306ebc7807541ff815e
Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rfc822.py
python
Message.values
(self)
return self.dict.values()
Get all of a message's header field values.
Get all of a message's header field values.
[ "Get", "all", "of", "a", "message", "s", "header", "field", "values", "." ]
def values(self): """Get all of a message's header field values.""" return self.dict.values()
[ "def", "values", "(", "self", ")", ":", "return", "self", ".", "dict", ".", "values", "(", ")" ]
https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/rfc822.py#L450-L452
wxWidgets/wxPython-Classic
19571e1ae65f1ac445f5491474121998c97a1bf0
demo/SystemSettings.py
python
SysPanelBase.DoGetBestSize
(self)
return (maxw, maxh)
Return the best size for this panel
Return the best size for this panel
[ "Return", "the", "best", "size", "for", "this", "panel" ]
def DoGetBestSize(self): """Return the best size for this panel""" maxw = 0 for vals in self._vals: extent = self.GetTextExtent(vals)[0] if extent > maxw: maxw = extent self._maxw = maxw maxw += 75 maxh = (len(self._vals) + 1) * 22 return (maxw, maxh)
[ "def", "DoGetBestSize", "(", "self", ")", ":", "maxw", "=", "0", "for", "vals", "in", "self", ".", "_vals", ":", "extent", "=", "self", ".", "GetTextExtent", "(", "vals", ")", "[", "0", "]", "if", "extent", ">", "maxw", ":", "maxw", "=", "extent", "self", ".", "_maxw", "=", "maxw", "maxw", "+=", "75", "maxh", "=", "(", "len", "(", "self", ".", "_vals", ")", "+", "1", ")", "*", "22", "return", "(", "maxw", ",", "maxh", ")" ]
https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/demo/SystemSettings.py#L77-L88
indutny/candor
48e7260618f5091c80a3416828e2808cad3ea22e
tools/gyp/pylib/gyp/xcode_emulation.py
python
XcodeSettings.GetBundleContentsFolderPath
(self)
Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.
Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.
[ "Returns", "the", "qualified", "path", "to", "the", "bundle", "s", "contents", "folder", ".", "E", ".", "g", ".", "Chromium", ".", "app", "/", "Contents", "or", "Foo", ".", "bundle", "/", "Versions", "/", "A", ".", "Only", "valid", "for", "bundles", "." ]
def GetBundleContentsFolderPath(self): """Returns the qualified path to the bundle's contents folder. E.g. Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.""" assert self._IsBundle() if self.spec['type'] == 'shared_library': return os.path.join( self.GetWrapperName(), 'Versions', self.GetFrameworkVersion()) else: # loadable_modules have a 'Contents' folder like executables. return os.path.join(self.GetWrapperName(), 'Contents')
[ "def", "GetBundleContentsFolderPath", "(", "self", ")", ":", "assert", "self", ".", "_IsBundle", "(", ")", "if", "self", ".", "spec", "[", "'type'", "]", "==", "'shared_library'", ":", "return", "os", ".", "path", ".", "join", "(", "self", ".", "GetWrapperName", "(", ")", ",", "'Versions'", ",", "self", ".", "GetFrameworkVersion", "(", ")", ")", "else", ":", "# loadable_modules have a 'Contents' folder like executables.", "return", "os", ".", "path", ".", "join", "(", "self", ".", "GetWrapperName", "(", ")", ",", "'Contents'", ")" ]
https://github.com/indutny/candor/blob/48e7260618f5091c80a3416828e2808cad3ea22e/tools/gyp/pylib/gyp/xcode_emulation.py#L101-L110
hanpfei/chromium-net
392cc1fa3a8f92f42e4071ab6e674d8e0482f83f
third_party/catapult/third_party/closure_linter/closure_linter/javascriptlintrules.py
python
JavaScriptLintRules.CheckToken
(self, token, state)
Checks a token, given the current parser_state, for warnings and errors. Args: token: The current token under consideration state: parser_state object that indicates the current state in the page
Checks a token, given the current parser_state, for warnings and errors.
[ "Checks", "a", "token", "given", "the", "current", "parser_state", "for", "warnings", "and", "errors", "." ]
def CheckToken(self, token, state): """Checks a token, given the current parser_state, for warnings and errors. Args: token: The current token under consideration state: parser_state object that indicates the current state in the page """ # Call the base class's CheckToken function. super(JavaScriptLintRules, self).CheckToken(token, state) # Store some convenience variables namespaces_info = self._namespaces_info if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES): self._CheckUnusedLocalVariables(token, state) if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS): # Find all assignments to private members. if token.type == Type.SIMPLE_LVALUE: identifier = token.string if identifier.endswith('_') and not identifier.endswith('__'): doc_comment = state.GetDocComment() suppressed = doc_comment and ( 'underscore' in doc_comment.suppressions or 'unusedPrivateMembers' in doc_comment.suppressions) if not suppressed: # Look for static members defined on a provided namespace. if namespaces_info: namespace = namespaces_info.GetClosurizedNamespace(identifier) provided_namespaces = namespaces_info.GetProvidedNamespaces() else: namespace = None provided_namespaces = set() # Skip cases of this.something_.somethingElse_. regex = re.compile(r'^this\.[a-zA-Z_]+$') if namespace in provided_namespaces or regex.match(identifier): variable = identifier.split('.')[-1] self._declared_private_member_tokens[variable] = token self._declared_private_members.add(variable) elif not identifier.endswith('__'): # Consider setting public members of private members to be a usage. for piece in identifier.split('.'): if piece.endswith('_'): self._used_private_members.add(piece) # Find all usages of private members. if token.type == Type.IDENTIFIER: for piece in token.string.split('.'): if piece.endswith('_'): self._used_private_members.add(piece) if token.type == Type.DOC_FLAG: flag = token.attached_object if flag.flag_type == 'param' and flag.name_token is not None: self._CheckForMissingSpaceBeforeToken( token.attached_object.name_token) if flag.type is not None and flag.name is not None: if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER): # Check for variable arguments marker in type. if flag.jstype.IsVarArgsType() and flag.name != 'var_args': self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME, 'Variable length argument %s must be renamed ' 'to var_args.' % flag.name, token) elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args': self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE, 'Variable length argument %s type must start ' 'with \'...\'.' % flag.name, token) if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER): # Check for optional marker in type. if (flag.jstype.opt_arg and not flag.name.startswith('opt_')): self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX, 'Optional parameter name %s must be prefixed ' 'with opt_.' % flag.name, token) elif (not flag.jstype.opt_arg and flag.name.startswith('opt_')): self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE, 'Optional parameter %s type must end with =.' % flag.name, token) if flag.flag_type in state.GetDocFlag().HAS_TYPE: # Check for both missing type token and empty type braces '{}' # Missing suppress types are reported separately and we allow enums, # const, private, public and protected without types. if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE and (not flag.jstype or flag.jstype.IsEmpty())): self._HandleError(errors.MISSING_JSDOC_TAG_TYPE, 'Missing type in %s tag' % token.string, token) elif flag.name_token and flag.type_end_token and tokenutil.Compare( flag.type_end_token, flag.name_token) > 0: self._HandleError( errors.OUT_OF_ORDER_JSDOC_TAG_TYPE, 'Type should be immediately after %s tag' % token.string, token) elif token.type == Type.DOUBLE_QUOTE_STRING_START: next_token = token.next while next_token.type == Type.STRING_TEXT: if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search( next_token.string): break next_token = next_token.next else: self._HandleError( errors.UNNECESSARY_DOUBLE_QUOTED_STRING, 'Single-quoted string preferred over double-quoted string.', token, position=Position.All(token.string)) elif token.type == Type.END_DOC_COMMENT: doc_comment = state.GetDocComment() # When @externs appears in a @fileoverview comment, it should trigger # the same limited doc checks as a special filename like externs.js. if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'): self._SetLimitedDocChecks(True) if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and not self._is_html and state.InTopLevel() and not state.InNonScopeBlock()): # Check if we're in a fileoverview or constructor JsDoc. is_constructor = ( doc_comment.HasFlag('constructor') or doc_comment.HasFlag('interface')) # @fileoverview is an optional tag so if the dosctring is the first # token in the file treat it as a file level docstring. is_file_level_comment = ( doc_comment.HasFlag('fileoverview') or not doc_comment.start_token.previous) # If the comment is not a file overview, and it does not immediately # precede some code, skip it. # NOTE: The tokenutil methods are not used here because of their # behavior at the top of a file. next_token = token.next if (not next_token or (not is_file_level_comment and next_token.type in Type.NON_CODE_TYPES)): return # Don't require extra blank lines around suppression of extra # goog.require errors. if (doc_comment.SuppressionOnly() and next_token.type == Type.IDENTIFIER and next_token.string in ['goog.provide', 'goog.require']): return # Find the start of this block (include comments above the block, unless # this is a file overview). block_start = doc_comment.start_token if not is_file_level_comment: token = block_start.previous while token and token.type in Type.COMMENT_TYPES: block_start = token token = token.previous # Count the number of blank lines before this block. blank_lines = 0 token = block_start.previous while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]: if token.type == Type.BLANK_LINE: # A blank line. blank_lines += 1 elif token.type == Type.WHITESPACE and not token.line.strip(): # A line with only whitespace on it. blank_lines += 1 token = token.previous # Log errors. error_message = False expected_blank_lines = 0 # Only need blank line before file overview if it is not the beginning # of the file, e.g. copyright is first. if is_file_level_comment and blank_lines == 0 and block_start.previous: error_message = 'Should have a blank line before a file overview.' expected_blank_lines = 1 elif is_constructor and blank_lines != 3: error_message = ( 'Should have 3 blank lines before a constructor/interface.') expected_blank_lines = 3 elif (not is_file_level_comment and not is_constructor and blank_lines != 2): error_message = 'Should have 2 blank lines between top-level blocks.' expected_blank_lines = 2 if error_message: self._HandleError( errors.WRONG_BLANK_LINE_COUNT, error_message, block_start, position=Position.AtBeginning(), fix_data=expected_blank_lines - blank_lines) elif token.type == Type.END_BLOCK: if state.InFunction() and state.IsFunctionClose(): is_immediately_called = (token.next and token.next.type == Type.START_PAREN) function = state.GetFunction() if not self._limited_doc_checks: if (function.has_return and function.doc and not is_immediately_called and not function.doc.HasFlag('return') and not function.doc.InheritsDocumentation() and not function.doc.HasFlag('constructor')): # Check for proper documentation of return value. self._HandleError( errors.MISSING_RETURN_DOCUMENTATION, 'Missing @return JsDoc in function with non-trivial return', function.doc.end_token, position=Position.AtBeginning()) elif (not function.has_return and not function.has_throw and function.doc and function.doc.HasFlag('return') and not state.InInterfaceMethod()): flag = function.doc.GetFlag('return') valid_no_return_names = ['undefined', 'void', '*'] invalid_return = flag.jstype is None or not any( sub_type.identifier in valid_no_return_names for sub_type in flag.jstype.IterTypeGroup()) if invalid_return: self._HandleError( errors.UNNECESSARY_RETURN_DOCUMENTATION, 'Found @return JsDoc on function that returns nothing', flag.flag_token, position=Position.AtBeginning()) # b/4073735. Method in object literal definition of prototype can # safely reference 'this'. prototype_object_literal = False block_start = None previous_code = None previous_previous_code = None # Search for cases where prototype is defined as object literal. # previous_previous_code # | previous_code # | | block_start # | | | # a.b.prototype = { # c : function() { # this.d = 1; # } # } # If in object literal, find first token of block so to find previous # tokens to check above condition. if state.InObjectLiteral(): block_start = state.GetCurrentBlockStart() # If an object literal then get previous token (code type). For above # case it should be '='. if block_start: previous_code = tokenutil.SearchExcept(block_start, Type.NON_CODE_TYPES, reverse=True) # If previous token to block is '=' then get its previous token. if previous_code and previous_code.IsOperator('='): previous_previous_code = tokenutil.SearchExcept(previous_code, Type.NON_CODE_TYPES, reverse=True) # If variable/token before '=' ends with '.prototype' then its above # case of prototype defined with object literal. prototype_object_literal = (previous_previous_code and previous_previous_code.string.endswith( '.prototype')) if (function.has_this and function.doc and not function.doc.HasFlag('this') and not function.is_constructor and not function.is_interface and '.prototype.' not in function.name and not prototype_object_literal): self._HandleError( errors.MISSING_JSDOC_TAG_THIS, 'Missing @this JsDoc in function referencing "this". (' 'this usually means you are trying to reference "this" in ' 'a static function, or you have forgotten to mark a ' 'constructor with @constructor)', function.doc.end_token, position=Position.AtBeginning()) elif token.type == Type.IDENTIFIER: if token.string == 'goog.inherits' and not state.InFunction(): if state.GetLastNonSpaceToken().line_number == token.line_number: self._HandleError( errors.MISSING_LINE, 'Missing newline between constructor and goog.inherits', token, position=Position.AtBeginning()) extra_space = state.GetLastNonSpaceToken().next while extra_space != token: if extra_space.type == Type.BLANK_LINE: self._HandleError( errors.EXTRA_LINE, 'Extra line between constructor and goog.inherits', extra_space) extra_space = extra_space.next # TODO(robbyw): Test the last function was a constructor. # TODO(robbyw): Test correct @extends and @implements documentation. elif (token.string == 'goog.provide' and not state.InFunction() and namespaces_info is not None): namespace = tokenutil.GetStringAfterToken(token) # Report extra goog.provide statement. if not namespace or namespaces_info.IsExtraProvide(token): if not namespace: msg = 'Empty namespace in goog.provide' else: msg = 'Unnecessary goog.provide: ' + namespace # Hint to user if this is a Test namespace. if namespace.endswith('Test'): msg += (' *Test namespaces must be mentioned in the ' 'goog.setTestOnly() call') self._HandleError( errors.EXTRA_GOOG_PROVIDE, msg, token, position=Position.AtBeginning()) if namespaces_info.IsLastProvide(token): # Report missing provide statements after the last existing provide. missing_provides = namespaces_info.GetMissingProvides() if missing_provides: self._ReportMissingProvides( missing_provides, tokenutil.GetLastTokenInSameLine(token).next, False) # If there are no require statements, missing requires should be # reported after the last provide. if not namespaces_info.GetRequiredNamespaces(): missing_requires, illegal_alias_statements = ( namespaces_info.GetMissingRequires()) if missing_requires: self._ReportMissingRequires( missing_requires, tokenutil.GetLastTokenInSameLine(token).next, True) if illegal_alias_statements: self._ReportIllegalAliasStatement(illegal_alias_statements) elif (token.string == 'goog.require' and not state.InFunction() and namespaces_info is not None): namespace = tokenutil.GetStringAfterToken(token) # If there are no provide statements, missing provides should be # reported before the first require. if (namespaces_info.IsFirstRequire(token) and not namespaces_info.GetProvidedNamespaces()): missing_provides = namespaces_info.GetMissingProvides() if missing_provides: self._ReportMissingProvides( missing_provides, tokenutil.GetFirstTokenInSameLine(token), True) # Report extra goog.require statement. if not namespace or namespaces_info.IsExtraRequire(token): if not namespace: msg = 'Empty namespace in goog.require' else: msg = 'Unnecessary goog.require: ' + namespace self._HandleError( errors.EXTRA_GOOG_REQUIRE, msg, token, position=Position.AtBeginning()) # Report missing goog.require statements. if namespaces_info.IsLastRequire(token): missing_requires, illegal_alias_statements = ( namespaces_info.GetMissingRequires()) if missing_requires: self._ReportMissingRequires( missing_requires, tokenutil.GetLastTokenInSameLine(token).next, False) if illegal_alias_statements: self._ReportIllegalAliasStatement(illegal_alias_statements) elif token.type == Type.OPERATOR: last_in_line = token.IsLastInLine() # If the token is unary and appears to be used in a unary context # it's ok. Otherwise, if it's at the end of the line or immediately # before a comment, it's ok. # Don't report an error before a start bracket - it will be reported # by that token's space checks. if (not token.metadata.IsUnaryOperator() and not last_in_line and not token.next.IsComment() and not token.next.IsOperator(',') and not tokenutil.IsDot(token) and token.next.type not in (Type.WHITESPACE, Type.END_PAREN, Type.END_BRACKET, Type.SEMICOLON, Type.START_BRACKET)): self._HandleError( errors.MISSING_SPACE, 'Missing space after "%s"' % token.string, token, position=Position.AtEnd(token.string)) elif token.type == Type.WHITESPACE: first_in_line = token.IsFirstInLine() last_in_line = token.IsLastInLine() # Check whitespace length if it's not the first token of the line and # if it's not immediately before a comment. if not last_in_line and not first_in_line and not token.next.IsComment(): # Ensure there is no space after opening parentheses. if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET, Type.FUNCTION_NAME) or token.next.type == Type.START_PARAMETERS): self._HandleError( errors.EXTRA_SPACE, 'Extra space after "%s"' % token.previous.string, token, position=Position.All(token.string)) elif token.type == Type.SEMICOLON: previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True) if not previous_token: self._HandleError( errors.REDUNDANT_SEMICOLON, 'Semicolon without any statement', token, position=Position.AtEnd(token.string)) elif (previous_token.type == Type.KEYWORD and previous_token.string not in ['break', 'continue', 'return']): self._HandleError( errors.REDUNDANT_SEMICOLON, ('Semicolon after \'%s\' without any statement.' ' Looks like an error.' % previous_token.string), token, position=Position.AtEnd(token.string))
[ "def", "CheckToken", "(", "self", ",", "token", ",", "state", ")", ":", "# Call the base class's CheckToken function.", "super", "(", "JavaScriptLintRules", ",", "self", ")", ".", "CheckToken", "(", "token", ",", "state", ")", "# Store some convenience variables", "namespaces_info", "=", "self", ".", "_namespaces_info", "if", "error_check", ".", "ShouldCheck", "(", "Rule", ".", "UNUSED_LOCAL_VARIABLES", ")", ":", "self", ".", "_CheckUnusedLocalVariables", "(", "token", ",", "state", ")", "if", "error_check", ".", "ShouldCheck", "(", "Rule", ".", "UNUSED_PRIVATE_MEMBERS", ")", ":", "# Find all assignments to private members.", "if", "token", ".", "type", "==", "Type", ".", "SIMPLE_LVALUE", ":", "identifier", "=", "token", ".", "string", "if", "identifier", ".", "endswith", "(", "'_'", ")", "and", "not", "identifier", ".", "endswith", "(", "'__'", ")", ":", "doc_comment", "=", "state", ".", "GetDocComment", "(", ")", "suppressed", "=", "doc_comment", "and", "(", "'underscore'", "in", "doc_comment", ".", "suppressions", "or", "'unusedPrivateMembers'", "in", "doc_comment", ".", "suppressions", ")", "if", "not", "suppressed", ":", "# Look for static members defined on a provided namespace.", "if", "namespaces_info", ":", "namespace", "=", "namespaces_info", ".", "GetClosurizedNamespace", "(", "identifier", ")", "provided_namespaces", "=", "namespaces_info", ".", "GetProvidedNamespaces", "(", ")", "else", ":", "namespace", "=", "None", "provided_namespaces", "=", "set", "(", ")", "# Skip cases of this.something_.somethingElse_.", "regex", "=", "re", ".", "compile", "(", "r'^this\\.[a-zA-Z_]+$'", ")", "if", "namespace", "in", "provided_namespaces", "or", "regex", ".", "match", "(", "identifier", ")", ":", "variable", "=", "identifier", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "self", ".", "_declared_private_member_tokens", "[", "variable", "]", "=", "token", "self", ".", "_declared_private_members", ".", "add", "(", "variable", ")", "elif", "not", "identifier", ".", "endswith", "(", "'__'", ")", ":", "# Consider setting public members of private members to be a usage.", "for", "piece", "in", "identifier", ".", "split", "(", "'.'", ")", ":", "if", "piece", ".", "endswith", "(", "'_'", ")", ":", "self", ".", "_used_private_members", ".", "add", "(", "piece", ")", "# Find all usages of private members.", "if", "token", ".", "type", "==", "Type", ".", "IDENTIFIER", ":", "for", "piece", "in", "token", ".", "string", ".", "split", "(", "'.'", ")", ":", "if", "piece", ".", "endswith", "(", "'_'", ")", ":", "self", ".", "_used_private_members", ".", "add", "(", "piece", ")", "if", "token", ".", "type", "==", "Type", ".", "DOC_FLAG", ":", "flag", "=", "token", ".", "attached_object", "if", "flag", ".", "flag_type", "==", "'param'", "and", "flag", ".", "name_token", "is", "not", "None", ":", "self", ".", "_CheckForMissingSpaceBeforeToken", "(", "token", ".", "attached_object", ".", "name_token", ")", "if", "flag", ".", "type", "is", "not", "None", "and", "flag", ".", "name", "is", "not", "None", ":", "if", "error_check", ".", "ShouldCheck", "(", "Rule", ".", "VARIABLE_ARG_MARKER", ")", ":", "# Check for variable arguments marker in type.", "if", "flag", ".", "jstype", ".", "IsVarArgsType", "(", ")", "and", "flag", ".", "name", "!=", "'var_args'", ":", "self", ".", "_HandleError", "(", "errors", ".", "JSDOC_MISSING_VAR_ARGS_NAME", ",", "'Variable length argument %s must be renamed '", "'to var_args.'", "%", "flag", ".", "name", ",", "token", ")", "elif", "not", "flag", ".", "jstype", ".", "IsVarArgsType", "(", ")", "and", "flag", ".", "name", "==", "'var_args'", ":", "self", ".", "_HandleError", "(", "errors", ".", "JSDOC_MISSING_VAR_ARGS_TYPE", ",", "'Variable length argument %s type must start '", "'with \\'...\\'.'", "%", "flag", ".", "name", ",", "token", ")", "if", "error_check", ".", "ShouldCheck", "(", "Rule", ".", "OPTIONAL_TYPE_MARKER", ")", ":", "# Check for optional marker in type.", "if", "(", "flag", ".", "jstype", ".", "opt_arg", "and", "not", "flag", ".", "name", ".", "startswith", "(", "'opt_'", ")", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "JSDOC_MISSING_OPTIONAL_PREFIX", ",", "'Optional parameter name %s must be prefixed '", "'with opt_.'", "%", "flag", ".", "name", ",", "token", ")", "elif", "(", "not", "flag", ".", "jstype", ".", "opt_arg", "and", "flag", ".", "name", ".", "startswith", "(", "'opt_'", ")", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "JSDOC_MISSING_OPTIONAL_TYPE", ",", "'Optional parameter %s type must end with =.'", "%", "flag", ".", "name", ",", "token", ")", "if", "flag", ".", "flag_type", "in", "state", ".", "GetDocFlag", "(", ")", ".", "HAS_TYPE", ":", "# Check for both missing type token and empty type braces '{}'", "# Missing suppress types are reported separately and we allow enums,", "# const, private, public and protected without types.", "if", "(", "flag", ".", "flag_type", "not", "in", "state", ".", "GetDocFlag", "(", ")", ".", "CAN_OMIT_TYPE", "and", "(", "not", "flag", ".", "jstype", "or", "flag", ".", "jstype", ".", "IsEmpty", "(", ")", ")", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "MISSING_JSDOC_TAG_TYPE", ",", "'Missing type in %s tag'", "%", "token", ".", "string", ",", "token", ")", "elif", "flag", ".", "name_token", "and", "flag", ".", "type_end_token", "and", "tokenutil", ".", "Compare", "(", "flag", ".", "type_end_token", ",", "flag", ".", "name_token", ")", ">", "0", ":", "self", ".", "_HandleError", "(", "errors", ".", "OUT_OF_ORDER_JSDOC_TAG_TYPE", ",", "'Type should be immediately after %s tag'", "%", "token", ".", "string", ",", "token", ")", "elif", "token", ".", "type", "==", "Type", ".", "DOUBLE_QUOTE_STRING_START", ":", "next_token", "=", "token", ".", "next", "while", "next_token", ".", "type", "==", "Type", ".", "STRING_TEXT", ":", "if", "javascripttokenizer", ".", "JavaScriptTokenizer", ".", "SINGLE_QUOTE", ".", "search", "(", "next_token", ".", "string", ")", ":", "break", "next_token", "=", "next_token", ".", "next", "else", ":", "self", ".", "_HandleError", "(", "errors", ".", "UNNECESSARY_DOUBLE_QUOTED_STRING", ",", "'Single-quoted string preferred over double-quoted string.'", ",", "token", ",", "position", "=", "Position", ".", "All", "(", "token", ".", "string", ")", ")", "elif", "token", ".", "type", "==", "Type", ".", "END_DOC_COMMENT", ":", "doc_comment", "=", "state", ".", "GetDocComment", "(", ")", "# When @externs appears in a @fileoverview comment, it should trigger", "# the same limited doc checks as a special filename like externs.js.", "if", "doc_comment", ".", "HasFlag", "(", "'fileoverview'", ")", "and", "doc_comment", ".", "HasFlag", "(", "'externs'", ")", ":", "self", ".", "_SetLimitedDocChecks", "(", "True", ")", "if", "(", "error_check", ".", "ShouldCheck", "(", "Rule", ".", "BLANK_LINES_AT_TOP_LEVEL", ")", "and", "not", "self", ".", "_is_html", "and", "state", ".", "InTopLevel", "(", ")", "and", "not", "state", ".", "InNonScopeBlock", "(", ")", ")", ":", "# Check if we're in a fileoverview or constructor JsDoc.", "is_constructor", "=", "(", "doc_comment", ".", "HasFlag", "(", "'constructor'", ")", "or", "doc_comment", ".", "HasFlag", "(", "'interface'", ")", ")", "# @fileoverview is an optional tag so if the dosctring is the first", "# token in the file treat it as a file level docstring.", "is_file_level_comment", "=", "(", "doc_comment", ".", "HasFlag", "(", "'fileoverview'", ")", "or", "not", "doc_comment", ".", "start_token", ".", "previous", ")", "# If the comment is not a file overview, and it does not immediately", "# precede some code, skip it.", "# NOTE: The tokenutil methods are not used here because of their", "# behavior at the top of a file.", "next_token", "=", "token", ".", "next", "if", "(", "not", "next_token", "or", "(", "not", "is_file_level_comment", "and", "next_token", ".", "type", "in", "Type", ".", "NON_CODE_TYPES", ")", ")", ":", "return", "# Don't require extra blank lines around suppression of extra", "# goog.require errors.", "if", "(", "doc_comment", ".", "SuppressionOnly", "(", ")", "and", "next_token", ".", "type", "==", "Type", ".", "IDENTIFIER", "and", "next_token", ".", "string", "in", "[", "'goog.provide'", ",", "'goog.require'", "]", ")", ":", "return", "# Find the start of this block (include comments above the block, unless", "# this is a file overview).", "block_start", "=", "doc_comment", ".", "start_token", "if", "not", "is_file_level_comment", ":", "token", "=", "block_start", ".", "previous", "while", "token", "and", "token", ".", "type", "in", "Type", ".", "COMMENT_TYPES", ":", "block_start", "=", "token", "token", "=", "token", ".", "previous", "# Count the number of blank lines before this block.", "blank_lines", "=", "0", "token", "=", "block_start", ".", "previous", "while", "token", "and", "token", ".", "type", "in", "[", "Type", ".", "WHITESPACE", ",", "Type", ".", "BLANK_LINE", "]", ":", "if", "token", ".", "type", "==", "Type", ".", "BLANK_LINE", ":", "# A blank line.", "blank_lines", "+=", "1", "elif", "token", ".", "type", "==", "Type", ".", "WHITESPACE", "and", "not", "token", ".", "line", ".", "strip", "(", ")", ":", "# A line with only whitespace on it.", "blank_lines", "+=", "1", "token", "=", "token", ".", "previous", "# Log errors.", "error_message", "=", "False", "expected_blank_lines", "=", "0", "# Only need blank line before file overview if it is not the beginning", "# of the file, e.g. copyright is first.", "if", "is_file_level_comment", "and", "blank_lines", "==", "0", "and", "block_start", ".", "previous", ":", "error_message", "=", "'Should have a blank line before a file overview.'", "expected_blank_lines", "=", "1", "elif", "is_constructor", "and", "blank_lines", "!=", "3", ":", "error_message", "=", "(", "'Should have 3 blank lines before a constructor/interface.'", ")", "expected_blank_lines", "=", "3", "elif", "(", "not", "is_file_level_comment", "and", "not", "is_constructor", "and", "blank_lines", "!=", "2", ")", ":", "error_message", "=", "'Should have 2 blank lines between top-level blocks.'", "expected_blank_lines", "=", "2", "if", "error_message", ":", "self", ".", "_HandleError", "(", "errors", ".", "WRONG_BLANK_LINE_COUNT", ",", "error_message", ",", "block_start", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ",", "fix_data", "=", "expected_blank_lines", "-", "blank_lines", ")", "elif", "token", ".", "type", "==", "Type", ".", "END_BLOCK", ":", "if", "state", ".", "InFunction", "(", ")", "and", "state", ".", "IsFunctionClose", "(", ")", ":", "is_immediately_called", "=", "(", "token", ".", "next", "and", "token", ".", "next", ".", "type", "==", "Type", ".", "START_PAREN", ")", "function", "=", "state", ".", "GetFunction", "(", ")", "if", "not", "self", ".", "_limited_doc_checks", ":", "if", "(", "function", ".", "has_return", "and", "function", ".", "doc", "and", "not", "is_immediately_called", "and", "not", "function", ".", "doc", ".", "HasFlag", "(", "'return'", ")", "and", "not", "function", ".", "doc", ".", "InheritsDocumentation", "(", ")", "and", "not", "function", ".", "doc", ".", "HasFlag", "(", "'constructor'", ")", ")", ":", "# Check for proper documentation of return value.", "self", ".", "_HandleError", "(", "errors", ".", "MISSING_RETURN_DOCUMENTATION", ",", "'Missing @return JsDoc in function with non-trivial return'", ",", "function", ".", "doc", ".", "end_token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")", "elif", "(", "not", "function", ".", "has_return", "and", "not", "function", ".", "has_throw", "and", "function", ".", "doc", "and", "function", ".", "doc", ".", "HasFlag", "(", "'return'", ")", "and", "not", "state", ".", "InInterfaceMethod", "(", ")", ")", ":", "flag", "=", "function", ".", "doc", ".", "GetFlag", "(", "'return'", ")", "valid_no_return_names", "=", "[", "'undefined'", ",", "'void'", ",", "'*'", "]", "invalid_return", "=", "flag", ".", "jstype", "is", "None", "or", "not", "any", "(", "sub_type", ".", "identifier", "in", "valid_no_return_names", "for", "sub_type", "in", "flag", ".", "jstype", ".", "IterTypeGroup", "(", ")", ")", "if", "invalid_return", ":", "self", ".", "_HandleError", "(", "errors", ".", "UNNECESSARY_RETURN_DOCUMENTATION", ",", "'Found @return JsDoc on function that returns nothing'", ",", "flag", ".", "flag_token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")", "# b/4073735. Method in object literal definition of prototype can", "# safely reference 'this'.", "prototype_object_literal", "=", "False", "block_start", "=", "None", "previous_code", "=", "None", "previous_previous_code", "=", "None", "# Search for cases where prototype is defined as object literal.", "# previous_previous_code", "# | previous_code", "# | | block_start", "# | | |", "# a.b.prototype = {", "# c : function() {", "# this.d = 1;", "# }", "# }", "# If in object literal, find first token of block so to find previous", "# tokens to check above condition.", "if", "state", ".", "InObjectLiteral", "(", ")", ":", "block_start", "=", "state", ".", "GetCurrentBlockStart", "(", ")", "# If an object literal then get previous token (code type). For above", "# case it should be '='.", "if", "block_start", ":", "previous_code", "=", "tokenutil", ".", "SearchExcept", "(", "block_start", ",", "Type", ".", "NON_CODE_TYPES", ",", "reverse", "=", "True", ")", "# If previous token to block is '=' then get its previous token.", "if", "previous_code", "and", "previous_code", ".", "IsOperator", "(", "'='", ")", ":", "previous_previous_code", "=", "tokenutil", ".", "SearchExcept", "(", "previous_code", ",", "Type", ".", "NON_CODE_TYPES", ",", "reverse", "=", "True", ")", "# If variable/token before '=' ends with '.prototype' then its above", "# case of prototype defined with object literal.", "prototype_object_literal", "=", "(", "previous_previous_code", "and", "previous_previous_code", ".", "string", ".", "endswith", "(", "'.prototype'", ")", ")", "if", "(", "function", ".", "has_this", "and", "function", ".", "doc", "and", "not", "function", ".", "doc", ".", "HasFlag", "(", "'this'", ")", "and", "not", "function", ".", "is_constructor", "and", "not", "function", ".", "is_interface", "and", "'.prototype.'", "not", "in", "function", ".", "name", "and", "not", "prototype_object_literal", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "MISSING_JSDOC_TAG_THIS", ",", "'Missing @this JsDoc in function referencing \"this\". ('", "'this usually means you are trying to reference \"this\" in '", "'a static function, or you have forgotten to mark a '", "'constructor with @constructor)'", ",", "function", ".", "doc", ".", "end_token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")", "elif", "token", ".", "type", "==", "Type", ".", "IDENTIFIER", ":", "if", "token", ".", "string", "==", "'goog.inherits'", "and", "not", "state", ".", "InFunction", "(", ")", ":", "if", "state", ".", "GetLastNonSpaceToken", "(", ")", ".", "line_number", "==", "token", ".", "line_number", ":", "self", ".", "_HandleError", "(", "errors", ".", "MISSING_LINE", ",", "'Missing newline between constructor and goog.inherits'", ",", "token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")", "extra_space", "=", "state", ".", "GetLastNonSpaceToken", "(", ")", ".", "next", "while", "extra_space", "!=", "token", ":", "if", "extra_space", ".", "type", "==", "Type", ".", "BLANK_LINE", ":", "self", ".", "_HandleError", "(", "errors", ".", "EXTRA_LINE", ",", "'Extra line between constructor and goog.inherits'", ",", "extra_space", ")", "extra_space", "=", "extra_space", ".", "next", "# TODO(robbyw): Test the last function was a constructor.", "# TODO(robbyw): Test correct @extends and @implements documentation.", "elif", "(", "token", ".", "string", "==", "'goog.provide'", "and", "not", "state", ".", "InFunction", "(", ")", "and", "namespaces_info", "is", "not", "None", ")", ":", "namespace", "=", "tokenutil", ".", "GetStringAfterToken", "(", "token", ")", "# Report extra goog.provide statement.", "if", "not", "namespace", "or", "namespaces_info", ".", "IsExtraProvide", "(", "token", ")", ":", "if", "not", "namespace", ":", "msg", "=", "'Empty namespace in goog.provide'", "else", ":", "msg", "=", "'Unnecessary goog.provide: '", "+", "namespace", "# Hint to user if this is a Test namespace.", "if", "namespace", ".", "endswith", "(", "'Test'", ")", ":", "msg", "+=", "(", "' *Test namespaces must be mentioned in the '", "'goog.setTestOnly() call'", ")", "self", ".", "_HandleError", "(", "errors", ".", "EXTRA_GOOG_PROVIDE", ",", "msg", ",", "token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")", "if", "namespaces_info", ".", "IsLastProvide", "(", "token", ")", ":", "# Report missing provide statements after the last existing provide.", "missing_provides", "=", "namespaces_info", ".", "GetMissingProvides", "(", ")", "if", "missing_provides", ":", "self", ".", "_ReportMissingProvides", "(", "missing_provides", ",", "tokenutil", ".", "GetLastTokenInSameLine", "(", "token", ")", ".", "next", ",", "False", ")", "# If there are no require statements, missing requires should be", "# reported after the last provide.", "if", "not", "namespaces_info", ".", "GetRequiredNamespaces", "(", ")", ":", "missing_requires", ",", "illegal_alias_statements", "=", "(", "namespaces_info", ".", "GetMissingRequires", "(", ")", ")", "if", "missing_requires", ":", "self", ".", "_ReportMissingRequires", "(", "missing_requires", ",", "tokenutil", ".", "GetLastTokenInSameLine", "(", "token", ")", ".", "next", ",", "True", ")", "if", "illegal_alias_statements", ":", "self", ".", "_ReportIllegalAliasStatement", "(", "illegal_alias_statements", ")", "elif", "(", "token", ".", "string", "==", "'goog.require'", "and", "not", "state", ".", "InFunction", "(", ")", "and", "namespaces_info", "is", "not", "None", ")", ":", "namespace", "=", "tokenutil", ".", "GetStringAfterToken", "(", "token", ")", "# If there are no provide statements, missing provides should be", "# reported before the first require.", "if", "(", "namespaces_info", ".", "IsFirstRequire", "(", "token", ")", "and", "not", "namespaces_info", ".", "GetProvidedNamespaces", "(", ")", ")", ":", "missing_provides", "=", "namespaces_info", ".", "GetMissingProvides", "(", ")", "if", "missing_provides", ":", "self", ".", "_ReportMissingProvides", "(", "missing_provides", ",", "tokenutil", ".", "GetFirstTokenInSameLine", "(", "token", ")", ",", "True", ")", "# Report extra goog.require statement.", "if", "not", "namespace", "or", "namespaces_info", ".", "IsExtraRequire", "(", "token", ")", ":", "if", "not", "namespace", ":", "msg", "=", "'Empty namespace in goog.require'", "else", ":", "msg", "=", "'Unnecessary goog.require: '", "+", "namespace", "self", ".", "_HandleError", "(", "errors", ".", "EXTRA_GOOG_REQUIRE", ",", "msg", ",", "token", ",", "position", "=", "Position", ".", "AtBeginning", "(", ")", ")", "# Report missing goog.require statements.", "if", "namespaces_info", ".", "IsLastRequire", "(", "token", ")", ":", "missing_requires", ",", "illegal_alias_statements", "=", "(", "namespaces_info", ".", "GetMissingRequires", "(", ")", ")", "if", "missing_requires", ":", "self", ".", "_ReportMissingRequires", "(", "missing_requires", ",", "tokenutil", ".", "GetLastTokenInSameLine", "(", "token", ")", ".", "next", ",", "False", ")", "if", "illegal_alias_statements", ":", "self", ".", "_ReportIllegalAliasStatement", "(", "illegal_alias_statements", ")", "elif", "token", ".", "type", "==", "Type", ".", "OPERATOR", ":", "last_in_line", "=", "token", ".", "IsLastInLine", "(", ")", "# If the token is unary and appears to be used in a unary context", "# it's ok. Otherwise, if it's at the end of the line or immediately", "# before a comment, it's ok.", "# Don't report an error before a start bracket - it will be reported", "# by that token's space checks.", "if", "(", "not", "token", ".", "metadata", ".", "IsUnaryOperator", "(", ")", "and", "not", "last_in_line", "and", "not", "token", ".", "next", ".", "IsComment", "(", ")", "and", "not", "token", ".", "next", ".", "IsOperator", "(", "','", ")", "and", "not", "tokenutil", ".", "IsDot", "(", "token", ")", "and", "token", ".", "next", ".", "type", "not", "in", "(", "Type", ".", "WHITESPACE", ",", "Type", ".", "END_PAREN", ",", "Type", ".", "END_BRACKET", ",", "Type", ".", "SEMICOLON", ",", "Type", ".", "START_BRACKET", ")", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "MISSING_SPACE", ",", "'Missing space after \"%s\"'", "%", "token", ".", "string", ",", "token", ",", "position", "=", "Position", ".", "AtEnd", "(", "token", ".", "string", ")", ")", "elif", "token", ".", "type", "==", "Type", ".", "WHITESPACE", ":", "first_in_line", "=", "token", ".", "IsFirstInLine", "(", ")", "last_in_line", "=", "token", ".", "IsLastInLine", "(", ")", "# Check whitespace length if it's not the first token of the line and", "# if it's not immediately before a comment.", "if", "not", "last_in_line", "and", "not", "first_in_line", "and", "not", "token", ".", "next", ".", "IsComment", "(", ")", ":", "# Ensure there is no space after opening parentheses.", "if", "(", "token", ".", "previous", ".", "type", "in", "(", "Type", ".", "START_PAREN", ",", "Type", ".", "START_BRACKET", ",", "Type", ".", "FUNCTION_NAME", ")", "or", "token", ".", "next", ".", "type", "==", "Type", ".", "START_PARAMETERS", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "EXTRA_SPACE", ",", "'Extra space after \"%s\"'", "%", "token", ".", "previous", ".", "string", ",", "token", ",", "position", "=", "Position", ".", "All", "(", "token", ".", "string", ")", ")", "elif", "token", ".", "type", "==", "Type", ".", "SEMICOLON", ":", "previous_token", "=", "tokenutil", ".", "SearchExcept", "(", "token", ",", "Type", ".", "NON_CODE_TYPES", ",", "reverse", "=", "True", ")", "if", "not", "previous_token", ":", "self", ".", "_HandleError", "(", "errors", ".", "REDUNDANT_SEMICOLON", ",", "'Semicolon without any statement'", ",", "token", ",", "position", "=", "Position", ".", "AtEnd", "(", "token", ".", "string", ")", ")", "elif", "(", "previous_token", ".", "type", "==", "Type", ".", "KEYWORD", "and", "previous_token", ".", "string", "not", "in", "[", "'break'", ",", "'continue'", ",", "'return'", "]", ")", ":", "self", ".", "_HandleError", "(", "errors", ".", "REDUNDANT_SEMICOLON", ",", "(", "'Semicolon after \\'%s\\' without any statement.'", "' Looks like an error.'", "%", "previous_token", ".", "string", ")", ",", "token", ",", "position", "=", "Position", ".", "AtEnd", "(", "token", ".", "string", ")", ")" ]
https://github.com/hanpfei/chromium-net/blob/392cc1fa3a8f92f42e4071ab6e674d8e0482f83f/third_party/catapult/third_party/closure_linter/closure_linter/javascriptlintrules.py#L66-L515
panda3d/panda3d
833ad89ebad58395d0af0b7ec08538e5e4308265
samples/networking/06-simple-avatar/ClientRepository.py
python
GameClientRepository.gotCreateReady
(self)
Ready to enter the world. Expand our interest to include any other zones
Ready to enter the world. Expand our interest to include any other zones
[ "Ready", "to", "enter", "the", "world", ".", "Expand", "our", "interest", "to", "include", "any", "other", "zones" ]
def gotCreateReady(self): """ Ready to enter the world. Expand our interest to include any other zones """ # This method checks whether we actually have a valid doID range # to create distributed objects yet if not self.haveCreateAuthority(): # Not ready yet. return # we are ready now, so ignore further createReady events self.ignore(self.uniqueName('createReady')) self.join() print("Client Ready")
[ "def", "gotCreateReady", "(", "self", ")", ":", "# This method checks whether we actually have a valid doID range", "# to create distributed objects yet", "if", "not", "self", ".", "haveCreateAuthority", "(", ")", ":", "# Not ready yet.", "return", "# we are ready now, so ignore further createReady events", "self", ".", "ignore", "(", "self", ".", "uniqueName", "(", "'createReady'", ")", ")", "self", ".", "join", "(", ")", "print", "(", "\"Client Ready\"", ")" ]
https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/samples/networking/06-simple-avatar/ClientRepository.py#L79-L94
sdhash/sdhash
b9eff63e4e5867e910f41fd69032bbb1c94a2a5e
sdhash-ui/cherrypy/lib/sessions.py
python
Session.regenerate
(self)
Replace the current session (with a new id).
Replace the current session (with a new id).
[ "Replace", "the", "current", "session", "(", "with", "a", "new", "id", ")", "." ]
def regenerate(self): """Replace the current session (with a new id).""" self.regenerated = True self._regenerate()
[ "def", "regenerate", "(", "self", ")", ":", "self", ".", "regenerated", "=", "True", "self", ".", "_regenerate", "(", ")" ]
https://github.com/sdhash/sdhash/blob/b9eff63e4e5867e910f41fd69032bbb1c94a2a5e/sdhash-ui/cherrypy/lib/sessions.py#L183-L186
benoitsteiner/tensorflow-opencl
cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5
tensorflow/contrib/learn/python/learn/monitors.py
python
ValidationMonitor.best_value
(self)
return self._best_value
Returns the best early stopping metric value found so far.
Returns the best early stopping metric value found so far.
[ "Returns", "the", "best", "early", "stopping", "metric", "value", "found", "so", "far", "." ]
def best_value(self): """Returns the best early stopping metric value found so far.""" return self._best_value
[ "def", "best_value", "(", "self", ")", ":", "return", "self", ".", "_best_value" ]
https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/contrib/learn/python/learn/monitors.py#L634-L636
apple/turicreate
cce55aa5311300e3ce6af93cb45ba791fd1bdf49
deps/src/libxml2-2.9.1/python/libxml2class.py
python
parserCtxt.htmlCtxtReadDoc
(self, cur, URL, encoding, options)
return __tmp
parse an XML in-memory document and build a tree. This reuses the existing @ctxt parser context
parse an XML in-memory document and build a tree. This reuses the existing
[ "parse", "an", "XML", "in", "-", "memory", "document", "and", "build", "a", "tree", ".", "This", "reuses", "the", "existing" ]
def htmlCtxtReadDoc(self, cur, URL, encoding, options): """parse an XML in-memory document and build a tree. This reuses the existing @ctxt parser context """ ret = libxml2mod.htmlCtxtReadDoc(self._o, cur, URL, encoding, options) if ret is None:raise treeError('htmlCtxtReadDoc() failed') __tmp = xmlDoc(_obj=ret) return __tmp
[ "def", "htmlCtxtReadDoc", "(", "self", ",", "cur", ",", "URL", ",", "encoding", ",", "options", ")", ":", "ret", "=", "libxml2mod", ".", "htmlCtxtReadDoc", "(", "self", ".", "_o", ",", "cur", ",", "URL", ",", "encoding", ",", "options", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'htmlCtxtReadDoc() failed'", ")", "__tmp", "=", "xmlDoc", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/deps/src/libxml2-2.9.1/python/libxml2class.py#L4167-L4173
snap-stanford/snap-python
d53c51b0a26aa7e3e7400b014cdf728948fde80a
setup/snap.py
python
TIntIntVV.IsInBin
(self, *args)
return _snap.TIntIntVV_IsInBin(self, *args)
IsInBin(TIntIntVV self, TIntV Val) -> bool Parameters: Val: TVec< TInt,int > const &
IsInBin(TIntIntVV self, TIntV Val) -> bool
[ "IsInBin", "(", "TIntIntVV", "self", "TIntV", "Val", ")", "-", ">", "bool" ]
def IsInBin(self, *args): """ IsInBin(TIntIntVV self, TIntV Val) -> bool Parameters: Val: TVec< TInt,int > const & """ return _snap.TIntIntVV_IsInBin(self, *args)
[ "def", "IsInBin", "(", "self", ",", "*", "args", ")", ":", "return", "_snap", ".", "TIntIntVV_IsInBin", "(", "self", ",", "*", "args", ")" ]
https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L17333-L17341
catboost/catboost
167f64f237114a4d10b2b4ee42adb4569137debe
contrib/python/scikit-learn/py2/sklearn/covariance/empirical_covariance_.py
python
EmpiricalCovariance.score
(self, X_test, y=None)
return res
Computes the log-likelihood of a Gaussian data set with `self.covariance_` as an estimator of its covariance matrix. Parameters ---------- X_test : array-like, shape = [n_samples, n_features] Test data of which we compute the likelihood, where n_samples is the number of samples and n_features is the number of features. X_test is assumed to be drawn from the same distribution than the data used in fit (including centering). y : not used, present for API consistence purpose. Returns ------- res : float The likelihood of the data set with `self.covariance_` as an estimator of its covariance matrix.
Computes the log-likelihood of a Gaussian data set with `self.covariance_` as an estimator of its covariance matrix.
[ "Computes", "the", "log", "-", "likelihood", "of", "a", "Gaussian", "data", "set", "with", "self", ".", "covariance_", "as", "an", "estimator", "of", "its", "covariance", "matrix", "." ]
def score(self, X_test, y=None): """Computes the log-likelihood of a Gaussian data set with `self.covariance_` as an estimator of its covariance matrix. Parameters ---------- X_test : array-like, shape = [n_samples, n_features] Test data of which we compute the likelihood, where n_samples is the number of samples and n_features is the number of features. X_test is assumed to be drawn from the same distribution than the data used in fit (including centering). y : not used, present for API consistence purpose. Returns ------- res : float The likelihood of the data set with `self.covariance_` as an estimator of its covariance matrix. """ # compute empirical covariance of the test set test_cov = empirical_covariance( X_test - self.location_, assume_centered=True) # compute log likelihood res = log_likelihood(test_cov, self.get_precision()) return res
[ "def", "score", "(", "self", ",", "X_test", ",", "y", "=", "None", ")", ":", "# compute empirical covariance of the test set", "test_cov", "=", "empirical_covariance", "(", "X_test", "-", "self", ".", "location_", ",", "assume_centered", "=", "True", ")", "# compute log likelihood", "res", "=", "log_likelihood", "(", "test_cov", ",", "self", ".", "get_precision", "(", ")", ")", "return", "res" ]
https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scikit-learn/py2/sklearn/covariance/empirical_covariance_.py#L184-L211
gnuradio/gnuradio
09c3c4fa4bfb1a02caac74cb5334dfe065391e3b
grc/core/utils/backports/chainmap.py
python
ChainMap.copy
(self)
return self.__class__(self.maps[0].copy(), *self.maps[1:])
New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]
New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]
[ "New", "ChainMap", "or", "subclass", "with", "a", "new", "copy", "of", "maps", "[", "0", "]", "and", "refs", "to", "maps", "[", "1", ":", "]" ]
def copy(self): """New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]""" return self.__class__(self.maps[0].copy(), *self.maps[1:])
[ "def", "copy", "(", "self", ")", ":", "return", "self", ".", "__class__", "(", "self", ".", "maps", "[", "0", "]", ".", "copy", "(", ")", ",", "*", "self", ".", "maps", "[", "1", ":", "]", ")" ]
https://github.com/gnuradio/gnuradio/blob/09c3c4fa4bfb1a02caac74cb5334dfe065391e3b/grc/core/utils/backports/chainmap.py#L65-L67
apache/singa
93fd9da72694e68bfe3fb29d0183a65263d238a1
python/singa/autograd.py
python
acosh
(x)
return Acosh()(x)[0]
Calculates the hyperbolic arccosine of the given input tensor element-wise. Args: x (Tensor): Input tensor Returns: Tensor, the output
Calculates the hyperbolic arccosine of the given input tensor element-wise. Args: x (Tensor): Input tensor Returns: Tensor, the output
[ "Calculates", "the", "hyperbolic", "arccosine", "of", "the", "given", "input", "tensor", "element", "-", "wise", ".", "Args", ":", "x", "(", "Tensor", ")", ":", "Input", "tensor", "Returns", ":", "Tensor", "the", "output" ]
def acosh(x): """ Calculates the hyperbolic arccosine of the given input tensor element-wise. Args: x (Tensor): Input tensor Returns: Tensor, the output """ return Acosh()(x)[0]
[ "def", "acosh", "(", "x", ")", ":", "return", "Acosh", "(", ")", "(", "x", ")", "[", "0", "]" ]
https://github.com/apache/singa/blob/93fd9da72694e68bfe3fb29d0183a65263d238a1/python/singa/autograd.py#L2136-L2144
gromacs/gromacs
7dec3a3f99993cf5687a122de3e12de31c21c399
python_packaging/src/gmxapi/simulation/workflow.py
python
WorkSpec.__chase_deps
(self, source_set: Set[str], name_list: Iterable[Text])
Helper to recursively generate dependencies before dependents. Given a set of WorkElement objects and a list of element names, generate WorkElements for the members of name_list plus their dependencies in an order such that dependencies are guaranteed to occur before their dependent elements. For example, to sequence an entire work specification into a reasonable order for instantiation, use >>> workspec.__chase_deps(set(workspec.elements.keys()), list(workspec.elements.keys())) Note: as a member function of WorkSpec, we have access to the full WorkSpec elements data at all times, giving us extra flexibility in implementation and arguments. Args: source_set: a (super)set of element names from the current work spec (will be consumed) name_list: subset of *sources* to be sequenced Returns: Sequence of WorkElement objects drawn from the names in *source_set* Requires that WorkElements named in *name_list* and any elements on which they depend are all named in *source_list* and available in the current work spec. Warning: *source_set* is a reference to an object that is modified arbitrarily. The caller should not re-use the object after calling _chase_deps(). (Make a copy first, if needed.) TODO: Separate out DAG topology operations from here and Context.__enter__() Our needs are simple enough that we probably don't need an external dependency like networkx...
Helper to recursively generate dependencies before dependents.
[ "Helper", "to", "recursively", "generate", "dependencies", "before", "dependents", "." ]
def __chase_deps(self, source_set: Set[str], name_list: Iterable[Text]): """Helper to recursively generate dependencies before dependents. Given a set of WorkElement objects and a list of element names, generate WorkElements for the members of name_list plus their dependencies in an order such that dependencies are guaranteed to occur before their dependent elements. For example, to sequence an entire work specification into a reasonable order for instantiation, use >>> workspec.__chase_deps(set(workspec.elements.keys()), list(workspec.elements.keys())) Note: as a member function of WorkSpec, we have access to the full WorkSpec elements data at all times, giving us extra flexibility in implementation and arguments. Args: source_set: a (super)set of element names from the current work spec (will be consumed) name_list: subset of *sources* to be sequenced Returns: Sequence of WorkElement objects drawn from the names in *source_set* Requires that WorkElements named in *name_list* and any elements on which they depend are all named in *source_list* and available in the current work spec. Warning: *source_set* is a reference to an object that is modified arbitrarily. The caller should not re-use the object after calling _chase_deps(). (Make a copy first, if needed.) TODO: Separate out DAG topology operations from here and Context.__enter__() Our needs are simple enough that we probably don't need an external dependency like networkx... """ # Recursively (depth-first) generate a topologically valid serialized DAG from source_set. assert isinstance(source_set, set) if isinstance(name_list, (str, bytes)): warnings.warn('name_list appears to be a single name. Disambiguate a string by passing a list or tuple.') assert isinstance(name_list, collections.abc.Iterable) # Make a copy of name_list in case the input reference is being used elsewhere during # iteration, such as for source_set, which is modified during the loop. for name in tuple(name_list): assert isinstance(name, str) if name in source_set: source_set.remove(name) element = WorkElement.deserialize(self.elements[name], name=name, workspec=self) dependencies = element.depends # items in element.depends are either element names or ensembles of element names. for item in dependencies: if isinstance(item, (list, tuple, set)): dependency_list = item else: if not isinstance(item, str): raise exceptions.ValueError( 'Dependencies should be a string or sequence of strings. Got {}'.format(type(item))) dependency_list = [item] for dependency in dependency_list: for recursive_dep in self.__chase_deps(source_set, (dependency,)): yield recursive_dep yield element else: # Note: The user is responsible for ensuring that source_set is complete. # Otherwise, we would need to maintain a list of elements previously yielded. pass
[ "def", "__chase_deps", "(", "self", ",", "source_set", ":", "Set", "[", "str", "]", ",", "name_list", ":", "Iterable", "[", "Text", "]", ")", ":", "# Recursively (depth-first) generate a topologically valid serialized DAG from source_set.", "assert", "isinstance", "(", "source_set", ",", "set", ")", "if", "isinstance", "(", "name_list", ",", "(", "str", ",", "bytes", ")", ")", ":", "warnings", ".", "warn", "(", "'name_list appears to be a single name. Disambiguate a string by passing a list or tuple.'", ")", "assert", "isinstance", "(", "name_list", ",", "collections", ".", "abc", ".", "Iterable", ")", "# Make a copy of name_list in case the input reference is being used elsewhere during", "# iteration, such as for source_set, which is modified during the loop.", "for", "name", "in", "tuple", "(", "name_list", ")", ":", "assert", "isinstance", "(", "name", ",", "str", ")", "if", "name", "in", "source_set", ":", "source_set", ".", "remove", "(", "name", ")", "element", "=", "WorkElement", ".", "deserialize", "(", "self", ".", "elements", "[", "name", "]", ",", "name", "=", "name", ",", "workspec", "=", "self", ")", "dependencies", "=", "element", ".", "depends", "# items in element.depends are either element names or ensembles of element names.", "for", "item", "in", "dependencies", ":", "if", "isinstance", "(", "item", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "dependency_list", "=", "item", "else", ":", "if", "not", "isinstance", "(", "item", ",", "str", ")", ":", "raise", "exceptions", ".", "ValueError", "(", "'Dependencies should be a string or sequence of strings. Got {}'", ".", "format", "(", "type", "(", "item", ")", ")", ")", "dependency_list", "=", "[", "item", "]", "for", "dependency", "in", "dependency_list", ":", "for", "recursive_dep", "in", "self", ".", "__chase_deps", "(", "source_set", ",", "(", "dependency", ",", ")", ")", ":", "yield", "recursive_dep", "yield", "element", "else", ":", "# Note: The user is responsible for ensuring that source_set is complete.", "# Otherwise, we would need to maintain a list of elements previously yielded.", "pass" ]
https://github.com/gromacs/gromacs/blob/7dec3a3f99993cf5687a122de3e12de31c21c399/python_packaging/src/gmxapi/simulation/workflow.py#L213-L276
miyosuda/TensorFlowAndroidMNIST
7b5a4603d2780a8a2834575706e9001977524007
jni-build/jni/include/external/bazel_tools/tools/build_defs/docker/rewrite_json.py
python
GetParentIdentifier
(f)
return None
Try to look at the parent identifier from a docker image. The identifier is expected to be in the 'top' file for our rule so we look at it first ('./top', 'top'). If it's not found, then we use the 'repositories' file and tries to parse it to get the first declared repository (so we can actually parse a file generated by 'docker save'). Args: f: the input tar file. Returns: The identifier of the docker image, or None if no identifier was found.
Try to look at the parent identifier from a docker image.
[ "Try", "to", "look", "at", "the", "parent", "identifier", "from", "a", "docker", "image", "." ]
def GetParentIdentifier(f): """Try to look at the parent identifier from a docker image. The identifier is expected to be in the 'top' file for our rule so we look at it first ('./top', 'top'). If it's not found, then we use the 'repositories' file and tries to parse it to get the first declared repository (so we can actually parse a file generated by 'docker save'). Args: f: the input tar file. Returns: The identifier of the docker image, or None if no identifier was found. """ # TODO(dmarting): Maybe we could drop the 'top' file all together? top = utils.GetTarFile(f, 'top') if top: return top.strip() repositories = utils.GetTarFile(f, 'repositories') if repositories: data = json.loads(repositories) for k1 in data: for k2 in data[k1]: # Returns the first found key return data[k1][k2].strip() return None
[ "def", "GetParentIdentifier", "(", "f", ")", ":", "# TODO(dmarting): Maybe we could drop the 'top' file all together?", "top", "=", "utils", ".", "GetTarFile", "(", "f", ",", "'top'", ")", "if", "top", ":", "return", "top", ".", "strip", "(", ")", "repositories", "=", "utils", ".", "GetTarFile", "(", "f", ",", "'repositories'", ")", "if", "repositories", ":", "data", "=", "json", ".", "loads", "(", "repositories", ")", "for", "k1", "in", "data", ":", "for", "k2", "in", "data", "[", "k1", "]", ":", "# Returns the first found key", "return", "data", "[", "k1", "]", "[", "k2", "]", ".", "strip", "(", ")", "return", "None" ]
https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/external/bazel_tools/tools/build_defs/docker/rewrite_json.py#L240-L264