nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
n1nj4sec/pupy
a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39
pupy/packages/windows/all/pupwinutils/processes.py
python
is_x64_architecture
()
Return True if the architecture is x64
Return True if the architecture is x64
[ "Return", "True", "if", "the", "architecture", "is", "x64" ]
def is_x64_architecture(): """ Return True if the architecture is x64 """ if "64" in platform.machine(): return True else: return False
[ "def", "is_x64_architecture", "(", ")", ":", "if", "\"64\"", "in", "platform", ".", "machine", "(", ")", ":", "return", "True", "else", ":", "return", "False" ]
https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/packages/windows/all/pupwinutils/processes.py#L76-L81
XX-net/XX-Net
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
python3.8.2/Lib/os.py
python
execvpe
(file, args, env)
execvpe(file, args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env, replacing the current process. args may be a list or tuple of strings.
execvpe(file, args, env)
[ "execvpe", "(", "file", "args", "env", ")" ]
def execvpe(file, args, env): """execvpe(file, args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env, replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args, env)
[ "def", "execvpe", "(", "file", ",", "args", ",", "env", ")", ":", "_execvpe", "(", "file", ",", "args", ",", "env", ")" ]
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/os.py#L570-L577
zaiweizhang/H3DNet
e69f2855634807b37ae12e6db5963c924e64d3e7
utils/tf_logger.py
python
Logger.scalar_summary
(self, tag, value, step)
Log a scalar variable.
Log a scalar variable.
[ "Log", "a", "scalar", "variable", "." ]
def scalar_summary(self, tag, value, step): """Log a scalar variable.""" summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]) self.writer.add_summary(summary, step)
[ "def", "scalar_summary", "(", "self", ",", "tag", ",", "value", ",", "step", ")", ":", "summary", "=", "tf", ".", "Summary", "(", "value", "=", "[", "tf", ".", "Summary", ".", "Value", "(", "tag", "=", "tag", ",", "simple_value", "=", "value", ")", "]", ")", "self", ".", "writer", ".", "add_summary", "(", "summary", ",", "step", ")" ]
https://github.com/zaiweizhang/H3DNet/blob/e69f2855634807b37ae12e6db5963c924e64d3e7/utils/tf_logger.py#L21-L24
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/mailbox.py
python
_mboxMMDF.get_string
(self, key, from_=False)
return email.message_from_bytes( self.get_bytes(key)).as_string(unixfrom=from_)
Return a string representation or raise a KeyError.
Return a string representation or raise a KeyError.
[ "Return", "a", "string", "representation", "or", "raise", "a", "KeyError", "." ]
def get_string(self, key, from_=False): """Return a string representation or raise a KeyError.""" return email.message_from_bytes( self.get_bytes(key)).as_string(unixfrom=from_)
[ "def", "get_string", "(", "self", ",", "key", ",", "from_", "=", "False", ")", ":", "return", "email", ".", "message_from_bytes", "(", "self", ".", "get_bytes", "(", "key", ")", ")", ".", "as_string", "(", "unixfrom", "=", "from_", ")" ]
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/mailbox.py#L800-L803
cisco/mindmeld
809c36112e9ea8019fe29d54d136ca14eb4fd8db
mindmeld/cli.py
python
active_learning
( # pylint: disable=R0913 app_path, batch_size, tuning_level, output_folder, tune, train_seed_pct, n_epochs, plot, select, strategy, unlabeled_logs_path, log_usage_pct, labeled_logs_pattern, )
Command to run active learning training or selection.
Command to run active learning training or selection.
[ "Command", "to", "run", "active", "learning", "training", "or", "selection", "." ]
def active_learning( # pylint: disable=R0913 app_path, batch_size, tuning_level, output_folder, tune, train_seed_pct, n_epochs, plot, select, strategy, unlabeled_logs_path, log_usage_pct, labeled_logs_pattern, ): """Command to run active learning training or selection.""" if not (tune or select): raise AssertionError("'tune' or 'select' must be passed in as a paramter.") config = get_active_learning_config(app_path=app_path) config["app_path"] = app_path or config.get("app_path") if batch_size: config["tuning"]["batch_size"] = batch_size if tuning_level: config["tuning"]["tuning_level"] = tuning_level config["output_folder"] = output_folder or config.get("output_folder") if not output_folder: raise AssertionError( "An 'output_folder' must be defined in either the CLI command or the config." ) if train_seed_pct: config["pre_tuning"]["train_seed_pct"] = train_seed_pct if n_epochs: config["tuning"]["n_epochs"] = n_epochs if strategy: if tune: config["tuning"]["tuning_strategies"] = [strategy] elif select: config["query_selection"]["selection_strategy"] = strategy if unlabeled_logs_path: config["query_selection"]["unlabeled_logs_path"] = unlabeled_logs_path if log_usage_pct: config["query_selection"]["log_usage_pct"] = log_usage_pct if labeled_logs_pattern: config["query_selection"]["labeled_logs_pattern"] = labeled_logs_pattern alp = ActiveLearningPipelineFactory.create_from_config(config) if tune: alp.tune_strategies() if plot: alp.plot() elif select: alp.select_queries()
[ "def", "active_learning", "(", "# pylint: disable=R0913", "app_path", ",", "batch_size", ",", "tuning_level", ",", "output_folder", ",", "tune", ",", "train_seed_pct", ",", "n_epochs", ",", "plot", ",", "select", ",", "strategy", ",", "unlabeled_logs_path", ",", "log_usage_pct", ",", "labeled_logs_pattern", ",", ")", ":", "if", "not", "(", "tune", "or", "select", ")", ":", "raise", "AssertionError", "(", "\"'tune' or 'select' must be passed in as a paramter.\"", ")", "config", "=", "get_active_learning_config", "(", "app_path", "=", "app_path", ")", "config", "[", "\"app_path\"", "]", "=", "app_path", "or", "config", ".", "get", "(", "\"app_path\"", ")", "if", "batch_size", ":", "config", "[", "\"tuning\"", "]", "[", "\"batch_size\"", "]", "=", "batch_size", "if", "tuning_level", ":", "config", "[", "\"tuning\"", "]", "[", "\"tuning_level\"", "]", "=", "tuning_level", "config", "[", "\"output_folder\"", "]", "=", "output_folder", "or", "config", ".", "get", "(", "\"output_folder\"", ")", "if", "not", "output_folder", ":", "raise", "AssertionError", "(", "\"An 'output_folder' must be defined in either the CLI command or the config.\"", ")", "if", "train_seed_pct", ":", "config", "[", "\"pre_tuning\"", "]", "[", "\"train_seed_pct\"", "]", "=", "train_seed_pct", "if", "n_epochs", ":", "config", "[", "\"tuning\"", "]", "[", "\"n_epochs\"", "]", "=", "n_epochs", "if", "strategy", ":", "if", "tune", ":", "config", "[", "\"tuning\"", "]", "[", "\"tuning_strategies\"", "]", "=", "[", "strategy", "]", "elif", "select", ":", "config", "[", "\"query_selection\"", "]", "[", "\"selection_strategy\"", "]", "=", "strategy", "if", "unlabeled_logs_path", ":", "config", "[", "\"query_selection\"", "]", "[", "\"unlabeled_logs_path\"", "]", "=", "unlabeled_logs_path", "if", "log_usage_pct", ":", "config", "[", "\"query_selection\"", "]", "[", "\"log_usage_pct\"", "]", "=", "log_usage_pct", "if", "labeled_logs_pattern", ":", "config", "[", "\"query_selection\"", "]", "[", "\"labeled_logs_pattern\"", "]", "=", "labeled_logs_pattern", "alp", "=", "ActiveLearningPipelineFactory", ".", "create_from_config", "(", "config", ")", "if", "tune", ":", "alp", ".", "tune_strategies", "(", ")", "if", "plot", ":", "alp", ".", "plot", "(", ")", "elif", "select", ":", "alp", ".", "select_queries", "(", ")" ]
https://github.com/cisco/mindmeld/blob/809c36112e9ea8019fe29d54d136ca14eb4fd8db/mindmeld/cli.py#L975-L1026
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py
python
SpecifierSet.contains
(self, item, prereleases=None)
return all( s.contains(item, prereleases=prereleases) for s in self._specs )
[]
def contains(self, item, prereleases=None): # Ensure that our item is a Version or LegacyVersion instance. if not isinstance(item, (LegacyVersion, Version)): item = parse(item) # Determine if we're forcing a prerelease or not, if we're not forcing # one for this particular filter call, then we'll use whatever the # SpecifierSet thinks for whether or not we should support prereleases. if prereleases is None: prereleases = self.prereleases # We can determine if we're going to allow pre-releases by looking to # see if any of the underlying items supports them. If none of them do # and this item is a pre-release then we do not allow it and we can # short circuit that here. # Note: This means that 1.0.dev1 would not be contained in something # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 if not prereleases and item.is_prerelease: return False # We simply dispatch to the underlying specs here to make sure that the # given version is contained within all of them. # Note: This use of all() here means that an empty set of specifiers # will always return True, this is an explicit design decision. return all( s.contains(item, prereleases=prereleases) for s in self._specs )
[ "def", "contains", "(", "self", ",", "item", ",", "prereleases", "=", "None", ")", ":", "# Ensure that our item is a Version or LegacyVersion instance.", "if", "not", "isinstance", "(", "item", ",", "(", "LegacyVersion", ",", "Version", ")", ")", ":", "item", "=", "parse", "(", "item", ")", "# Determine if we're forcing a prerelease or not, if we're not forcing", "# one for this particular filter call, then we'll use whatever the", "# SpecifierSet thinks for whether or not we should support prereleases.", "if", "prereleases", "is", "None", ":", "prereleases", "=", "self", ".", "prereleases", "# We can determine if we're going to allow pre-releases by looking to", "# see if any of the underlying items supports them. If none of them do", "# and this item is a pre-release then we do not allow it and we can", "# short circuit that here.", "# Note: This means that 1.0.dev1 would not be contained in something", "# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0", "if", "not", "prereleases", "and", "item", ".", "is_prerelease", ":", "return", "False", "# We simply dispatch to the underlying specs here to make sure that the", "# given version is contained within all of them.", "# Note: This use of all() here means that an empty set of specifiers", "# will always return True, this is an explicit design decision.", "return", "all", "(", "s", ".", "contains", "(", "item", ",", "prereleases", "=", "prereleases", ")", "for", "s", "in", "self", ".", "_specs", ")" ]
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py#L700-L727
alfiopuglisi/guietta
1a352e384380a540dcf65c2ff3e5b44b40dce7b8
guietta/guietta.py
python
Gui.row_stretch
(self, *lists)
Defines the row stretches Arguments are lists as in the initializer. Since typically all rows have the same stretch, it is allowed to define just one or only a few rows in this method. Every element in the lists must be a number, that will be passed to the setRowStretch() QT function, or _ if no particular stretch is desired.
Defines the row stretches Arguments are lists as in the initializer. Since typically all rows have the same stretch, it is allowed to define just one or only a few rows in this method.
[ "Defines", "the", "row", "stretches", "Arguments", "are", "lists", "as", "in", "the", "initializer", ".", "Since", "typically", "all", "rows", "have", "the", "same", "stretch", "it", "is", "allowed", "to", "define", "just", "one", "or", "only", "a", "few", "rows", "in", "this", "method", "." ]
def row_stretch(self, *lists): '''Defines the row stretches Arguments are lists as in the initializer. Since typically all rows have the same stretch, it is allowed to define just one or only a few rows in this method. Every element in the lists must be a number, that will be passed to the setRowStretch() QT function, or _ if no particular stretch is desired. ''' rows = Rows(lists) rows.check_same(self._rows, allow_less_rows=True) for i, _, stretch in rows.enumerate(): self._layout.setRowStretch(i, stretch)
[ "def", "row_stretch", "(", "self", ",", "*", "lists", ")", ":", "rows", "=", "Rows", "(", "lists", ")", "rows", ".", "check_same", "(", "self", ".", "_rows", ",", "allow_less_rows", "=", "True", ")", "for", "i", ",", "_", ",", "stretch", "in", "rows", ".", "enumerate", "(", ")", ":", "self", ".", "_layout", ".", "setRowStretch", "(", "i", ",", "stretch", ")" ]
https://github.com/alfiopuglisi/guietta/blob/1a352e384380a540dcf65c2ff3e5b44b40dce7b8/guietta/guietta.py#L1930-L1944
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/gpsd/sensor.py
python
GpsdSensor.__init__
(self, hass, name, host, port)
Initialize the GPSD sensor.
Initialize the GPSD sensor.
[ "Initialize", "the", "GPSD", "sensor", "." ]
def __init__(self, hass, name, host, port): """Initialize the GPSD sensor.""" self.hass = hass self._name = name self._host = host self._port = port self.agps_thread = AGPS3mechanism() self.agps_thread.stream_data(host=self._host, port=self._port) self.agps_thread.run_thread()
[ "def", "__init__", "(", "self", ",", "hass", ",", "name", ",", "host", ",", "port", ")", ":", "self", ".", "hass", "=", "hass", "self", ".", "_name", "=", "name", "self", ".", "_host", "=", "host", "self", ".", "_port", "=", "port", "self", ".", "agps_thread", "=", "AGPS3mechanism", "(", ")", "self", ".", "agps_thread", ".", "stream_data", "(", "host", "=", "self", ".", "_host", ",", "port", "=", "self", ".", "_port", ")", "self", ".", "agps_thread", ".", "run_thread", "(", ")" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/gpsd/sensor.py#L80-L89
annoviko/pyclustering
bf4f51a472622292627ec8c294eb205585e50f52
pyclustering/nnet/syncpr.py
python
syncpr_visualizer.show_pattern
(syncpr_output_dynamic, image_height, image_width)
! @brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition. @param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network. @param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators). @param[in] image_width (uint): Width of the pattern.
!
[ "!" ]
def show_pattern(syncpr_output_dynamic, image_height, image_width): """! @brief Displays evolution of phase oscillators as set of patterns where the last one means final result of recognition. @param[in] syncpr_output_dynamic (syncpr_dynamic): Output dynamic of a syncpr network. @param[in] image_height (uint): Height of the pattern (image_height * image_width should be equal to number of oscillators). @param[in] image_width (uint): Width of the pattern. """ number_pictures = len(syncpr_output_dynamic) iteration_math_step = 1.0 if number_pictures > 50: iteration_math_step = number_pictures / 50.0 number_pictures = 50 number_cols = int(numpy.ceil(number_pictures ** 0.5)) number_rows = int(numpy.ceil(number_pictures / number_cols)) real_index = 0, 0 double_indexer = True if (number_cols == 1) or (number_rows == 1): real_index = 0 double_indexer = False (figure, axarr) = plt.subplots(number_rows, number_cols) if (number_pictures > 1): plt.setp([ax for ax in axarr], visible=False) iteration_display = 0.0 for iteration in range(len(syncpr_output_dynamic)): if iteration >= iteration_display: iteration_display += iteration_math_step ax_handle = axarr if number_pictures > 1: ax_handle = axarr[real_index] syncpr_visualizer.__show_pattern(ax_handle, syncpr_output_dynamic, image_height, image_width, iteration) if double_indexer is True: real_index = real_index[0], real_index[1] + 1 if (real_index[1] >= number_cols): real_index = real_index[0] + 1, 0 else: real_index += 1 plt.show() plt.close(figure)
[ "def", "show_pattern", "(", "syncpr_output_dynamic", ",", "image_height", ",", "image_width", ")", ":", "number_pictures", "=", "len", "(", "syncpr_output_dynamic", ")", "iteration_math_step", "=", "1.0", "if", "number_pictures", ">", "50", ":", "iteration_math_step", "=", "number_pictures", "/", "50.0", "number_pictures", "=", "50", "number_cols", "=", "int", "(", "numpy", ".", "ceil", "(", "number_pictures", "**", "0.5", ")", ")", "number_rows", "=", "int", "(", "numpy", ".", "ceil", "(", "number_pictures", "/", "number_cols", ")", ")", "real_index", "=", "0", ",", "0", "double_indexer", "=", "True", "if", "(", "number_cols", "==", "1", ")", "or", "(", "number_rows", "==", "1", ")", ":", "real_index", "=", "0", "double_indexer", "=", "False", "(", "figure", ",", "axarr", ")", "=", "plt", ".", "subplots", "(", "number_rows", ",", "number_cols", ")", "if", "(", "number_pictures", ">", "1", ")", ":", "plt", ".", "setp", "(", "[", "ax", "for", "ax", "in", "axarr", "]", ",", "visible", "=", "False", ")", "iteration_display", "=", "0.0", "for", "iteration", "in", "range", "(", "len", "(", "syncpr_output_dynamic", ")", ")", ":", "if", "iteration", ">=", "iteration_display", ":", "iteration_display", "+=", "iteration_math_step", "ax_handle", "=", "axarr", "if", "number_pictures", ">", "1", ":", "ax_handle", "=", "axarr", "[", "real_index", "]", "syncpr_visualizer", ".", "__show_pattern", "(", "ax_handle", ",", "syncpr_output_dynamic", ",", "image_height", ",", "image_width", ",", "iteration", ")", "if", "double_indexer", "is", "True", ":", "real_index", "=", "real_index", "[", "0", "]", ",", "real_index", "[", "1", "]", "+", "1", "if", "(", "real_index", "[", "1", "]", ">=", "number_cols", ")", ":", "real_index", "=", "real_index", "[", "0", "]", "+", "1", ",", "0", "else", ":", "real_index", "+=", "1", "plt", ".", "show", "(", ")", "plt", ".", "close", "(", "figure", ")" ]
https://github.com/annoviko/pyclustering/blob/bf4f51a472622292627ec8c294eb205585e50f52/pyclustering/nnet/syncpr.py#L54-L102
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/pip/pip/_vendor/html5lib/_tokenizer.py
python
HTMLTokenizer.scriptDataEscapedEndTagOpenState
(self)
return True
[]
def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True
[ "def", "scriptDataEscapedEndTagOpenState", "(", "self", ")", ":", "data", "=", "self", ".", "stream", ".", "char", "(", ")", "if", "data", "in", "asciiLetters", ":", "self", ".", "temporaryBuffer", "=", "data", "self", ".", "state", "=", "self", ".", "scriptDataEscapedEndTagNameState", "else", ":", "self", ".", "tokenQueue", ".", "append", "(", "{", "\"type\"", ":", "tokenTypes", "[", "\"Characters\"", "]", ",", "\"data\"", ":", "\"</\"", "}", ")", "self", ".", "stream", ".", "unget", "(", "data", ")", "self", ".", "state", "=", "self", ".", "scriptDataEscapedState", "return", "True" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/html5lib/_tokenizer.py#L706-L715
roglew/guppy-proxy
01df16be71dd9f23d7de415a315821659c29bc63
guppyproxy/hexteditor.py
python
PrettyPrintWidget.fill_highlighted
(self)
[]
def fill_highlighted(self): with DisableUpdates(self.htmlxml_widg): self.highlighted_widg.setPlainText("") if not self.data: return ct = self.headers.get('Content-Type').lower() if ";" in ct: ct = ct.split(";")[0] try: lexer = get_lexer_for_mimetype(ct) highlighted = textedit_highlight(self.data, lexer) except: highlighted = printable_data(self.data) self.highlighted_widg.setHtml(highlighted)
[ "def", "fill_highlighted", "(", "self", ")", ":", "with", "DisableUpdates", "(", "self", ".", "htmlxml_widg", ")", ":", "self", ".", "highlighted_widg", ".", "setPlainText", "(", "\"\"", ")", "if", "not", "self", ".", "data", ":", "return", "ct", "=", "self", ".", "headers", ".", "get", "(", "'Content-Type'", ")", ".", "lower", "(", ")", "if", "\";\"", "in", "ct", ":", "ct", "=", "ct", ".", "split", "(", "\";\"", ")", "[", "0", "]", "try", ":", "lexer", "=", "get_lexer_for_mimetype", "(", "ct", ")", "highlighted", "=", "textedit_highlight", "(", "self", ".", "data", ",", "lexer", ")", "except", ":", "highlighted", "=", "printable_data", "(", "self", ".", "data", ")", "self", ".", "highlighted_widg", ".", "setHtml", "(", "highlighted", ")" ]
https://github.com/roglew/guppy-proxy/blob/01df16be71dd9f23d7de415a315821659c29bc63/guppyproxy/hexteditor.py#L138-L151
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/cluster/hierarchy.py
python
weighted
(y)
return linkage(y, method='weighted', metric='euclidean')
Perform weighted/WPGMA linkage on the condensed distance matrix. See `linkage` for more information on the return structure and algorithm. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See `linkage` for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import weighted, fcluster >>> from scipy.spatial.distance import pdist First we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = weighted(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 9. , 11. , 1. , 2. ], [ 2. , 12. , 1.20710678, 3. ], [ 8. , 13. , 1.20710678, 3. ], [ 5. , 14. , 1.20710678, 3. ], [10. , 15. , 1.20710678, 3. ], [18. , 19. , 3.05595762, 6. ], [16. , 17. , 3.32379407, 6. ], [20. , 21. , 4.06357713, 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) >>> fcluster(Z, 1.5, criterion='distance') array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) >>> fcluster(Z, 4, criterion='distance') array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) >>> fcluster(Z, 6, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram.
Perform weighted/WPGMA linkage on the condensed distance matrix.
[ "Perform", "weighted", "/", "WPGMA", "linkage", "on", "the", "condensed", "distance", "matrix", "." ]
def weighted(y): """ Perform weighted/WPGMA linkage on the condensed distance matrix. See `linkage` for more information on the return structure and algorithm. Parameters ---------- y : ndarray The upper triangular of the distance matrix. The result of ``pdist`` is returned in this form. Returns ------- Z : ndarray A linkage matrix containing the hierarchical clustering. See `linkage` for more information on its structure. See Also -------- linkage : for advanced creation of hierarchical clusterings. scipy.spatial.distance.pdist : pairwise distance metrics Examples -------- >>> from scipy.cluster.hierarchy import weighted, fcluster >>> from scipy.spatial.distance import pdist First we need a toy dataset to play with:: x x x x x x x x x x x x >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] Then we get a condensed distance matrix from this dataset: >>> y = pdist(X) Finally, we can perform the clustering: >>> Z = weighted(y) >>> Z array([[ 0. , 1. , 1. , 2. ], [ 6. , 7. , 1. , 2. ], [ 3. , 4. , 1. , 2. ], [ 9. , 11. , 1. , 2. ], [ 2. , 12. , 1.20710678, 3. ], [ 8. , 13. , 1.20710678, 3. ], [ 5. , 14. , 1.20710678, 3. ], [10. , 15. , 1.20710678, 3. ], [18. , 19. , 3.05595762, 6. ], [16. , 17. , 3.32379407, 6. ], [20. , 21. , 4.06357713, 12. ]]) The linkage matrix ``Z`` represents a dendrogram - see `scipy.cluster.hierarchy.linkage` for a detailed explanation of its contents. We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster each initial point would belong given a distance threshold: >>> fcluster(Z, 0.9, criterion='distance') array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32) >>> fcluster(Z, 1.5, criterion='distance') array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32) >>> fcluster(Z, 4, criterion='distance') array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32) >>> fcluster(Z, 6, criterion='distance') array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32) Also `scipy.cluster.hierarchy.dendrogram` can be used to generate a plot of the dendrogram. """ return linkage(y, method='weighted', metric='euclidean')
[ "def", "weighted", "(", "y", ")", ":", "return", "linkage", "(", "y", ",", "method", "=", "'weighted'", ",", "metric", "=", "'euclidean'", ")" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/cluster/hierarchy.py#L445-L527
petercorke/robotics-toolbox-python
51aa8bbb3663a7c815f9880d538d61e7c85bc470
roboticstoolbox/tools/urdf/urdf.py
python
Joint.axis
(self)
return self._axis
(3,) float : The joint axis in the joint frame.
(3,) float : The joint axis in the joint frame.
[ "(", "3", ")", "float", ":", "The", "joint", "axis", "in", "the", "joint", "frame", "." ]
def axis(self): """(3,) float : The joint axis in the joint frame.""" return self._axis
[ "def", "axis", "(", "self", ")", ":", "return", "self", ".", "_axis" ]
https://github.com/petercorke/robotics-toolbox-python/blob/51aa8bbb3663a7c815f9880d538d61e7c85bc470/roboticstoolbox/tools/urdf/urdf.py#L1340-L1342
tensorflow/models
6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3
official/nlp/data/classifier_data_lib.py
python
XtremePawsxProcessor.get_labels
(self)
return ["0", "1"]
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_labels(self): """See base class.""" return ["0", "1"]
[ "def", "get_labels", "(", "self", ")", ":", "return", "[", "\"0\"", ",", "\"1\"", "]" ]
https://github.com/tensorflow/models/blob/6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3/official/nlp/data/classifier_data_lib.py#L945-L947
NervanaSystems/ngraph-python
ac032c83c7152b615a9ad129d54d350f9d6a2986
ngraph/op_graph/axes.py
python
Axes.batch_axes
(self)
Returns: The tensor's batch Axis wrapped in an Axes object if there is one on this tensor, otherwise returns None
Returns: The tensor's batch Axis wrapped in an Axes object if there is one on this tensor, otherwise returns None
[ "Returns", ":", "The", "tensor", "s", "batch", "Axis", "wrapped", "in", "an", "Axes", "object", "if", "there", "is", "one", "on", "this", "tensor", "otherwise", "returns", "None" ]
def batch_axes(self): """ Returns: The tensor's batch Axis wrapped in an Axes object if there is one on this tensor, otherwise returns None """ batch_axis = self.batch_axis() if batch_axis: return Axes([batch_axis]) else: return None
[ "def", "batch_axes", "(", "self", ")", ":", "batch_axis", "=", "self", ".", "batch_axis", "(", ")", "if", "batch_axis", ":", "return", "Axes", "(", "[", "batch_axis", "]", ")", "else", ":", "return", "None" ]
https://github.com/NervanaSystems/ngraph-python/blob/ac032c83c7152b615a9ad129d54d350f9d6a2986/ngraph/op_graph/axes.py#L469-L479
openstack/cinder
23494a6d6c51451688191e1847a458f1d3cdcaa5
cinder/volume/drivers/dell_emc/powermax/provision.py
python
PowerMaxProvision.break_rdf_relationship
(self, array, device_id, sg_name, rdf_group, rep_extra_specs, state)
Break the rdf relationship between a pair of devices. Resuming replication after suspending is necessary where this function is called from. Doing so in here will disrupt the ability to perform further actions on the RDFG without suspending again. :param array: the array serial number :param device_id: the source device id :param sg_name: storage group :param rdf_group: the rdf group number :param rep_extra_specs: replication extra specs :param state: the state of the rdf pair
Break the rdf relationship between a pair of devices.
[ "Break", "the", "rdf", "relationship", "between", "a", "pair", "of", "devices", "." ]
def break_rdf_relationship(self, array, device_id, sg_name, rdf_group, rep_extra_specs, state): """Break the rdf relationship between a pair of devices. Resuming replication after suspending is necessary where this function is called from. Doing so in here will disrupt the ability to perform further actions on the RDFG without suspending again. :param array: the array serial number :param device_id: the source device id :param sg_name: storage group :param rdf_group: the rdf group number :param rep_extra_specs: replication extra specs :param state: the state of the rdf pair """ LOG.info("Suspending RDF group %(rdf)s to delete source device " "%(dev)s RDF pair.", {'rdf': rdf_group, 'dev': device_id}) if state.lower() == utils.RDF_SYNCINPROG_STATE: self.rest.wait_for_rdf_pair_sync( array, rdf_group, device_id, rep_extra_specs) if state.lower() != utils.RDF_SUSPENDED_STATE: self.rest.srdf_suspend_replication( array, sg_name, rdf_group, rep_extra_specs) self.rest.srdf_delete_device_pair(array, rdf_group, device_id)
[ "def", "break_rdf_relationship", "(", "self", ",", "array", ",", "device_id", ",", "sg_name", ",", "rdf_group", ",", "rep_extra_specs", ",", "state", ")", ":", "LOG", ".", "info", "(", "\"Suspending RDF group %(rdf)s to delete source device \"", "\"%(dev)s RDF pair.\"", ",", "{", "'rdf'", ":", "rdf_group", ",", "'dev'", ":", "device_id", "}", ")", "if", "state", ".", "lower", "(", ")", "==", "utils", ".", "RDF_SYNCINPROG_STATE", ":", "self", ".", "rest", ".", "wait_for_rdf_pair_sync", "(", "array", ",", "rdf_group", ",", "device_id", ",", "rep_extra_specs", ")", "if", "state", ".", "lower", "(", ")", "!=", "utils", ".", "RDF_SUSPENDED_STATE", ":", "self", ".", "rest", ".", "srdf_suspend_replication", "(", "array", ",", "sg_name", ",", "rdf_group", ",", "rep_extra_specs", ")", "self", ".", "rest", ".", "srdf_delete_device_pair", "(", "array", ",", "rdf_group", ",", "device_id", ")" ]
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/volume/drivers/dell_emc/powermax/provision.py#L568-L593
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py
python
ParserElement.__invert__
( self )
return NotAny( self )
Implementation of ~ operator - returns C{L{NotAny}}
Implementation of ~ operator - returns C{L{NotAny}}
[ "Implementation", "of", "~", "operator", "-", "returns", "C", "{", "L", "{", "NotAny", "}}" ]
def __invert__( self ): """ Implementation of ~ operator - returns C{L{NotAny}} """ return NotAny( self )
[ "def", "__invert__", "(", "self", ")", ":", "return", "NotAny", "(", "self", ")" ]
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py/venv/lib/python2.7/site-packages/pkg_resources/_vendor/pyparsing.py#L1979-L1983
web2py/web2py
095905c4e010a1426c729483d912e270a51b7ba8
gluon/tools.py
python
Auth.requires_permission
(self, name, table_name='', record_id=0, otherwise=None)
return self.requires(has_permission, otherwise=otherwise)
Decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'.
Decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'.
[ "Decorator", "that", "prevents", "access", "to", "action", "if", "not", "logged", "in", "or", "if", "user", "logged", "in", "is", "not", "a", "member", "of", "any", "group", "(", "role", ")", "that", "has", "name", "access", "to", "table_name", "record_id", "." ]
def requires_permission(self, name, table_name='', record_id=0, otherwise=None): """ Decorator that prevents access to action if not logged in or if user logged in is not a member of any group (role) that has 'name' access to 'table_name', 'record_id'. """ def has_permission(self=self, name=name, table_name=table_name, record_id=record_id): return self.has_permission(name, table_name, record_id) return self.requires(has_permission, otherwise=otherwise)
[ "def", "requires_permission", "(", "self", ",", "name", ",", "table_name", "=", "''", ",", "record_id", "=", "0", ",", "otherwise", "=", "None", ")", ":", "def", "has_permission", "(", "self", "=", "self", ",", "name", "=", "name", ",", "table_name", "=", "table_name", ",", "record_id", "=", "record_id", ")", ":", "return", "self", ".", "has_permission", "(", "name", ",", "table_name", ",", "record_id", ")", "return", "self", ".", "requires", "(", "has_permission", ",", "otherwise", "=", "otherwise", ")" ]
https://github.com/web2py/web2py/blob/095905c4e010a1426c729483d912e270a51b7ba8/gluon/tools.py#L3921-L3931
pvlib/pvlib-python
1ab0eb20f9cd9fb9f7a0ddf35f81283f2648e34a
pvlib/iam.py
python
marion_diffuse
(model, surface_tilt, **kwargs)
return iam
Determine diffuse irradiance incidence angle modifiers using Marion's method of integrating over solid angle. Parameters ---------- model : str The IAM function to evaluate across solid angle. Must be one of `'ashrae', 'physical', 'martin_ruiz', 'sapm'`. surface_tilt : numeric Surface tilt angles in decimal degrees. The tilt angle is defined as degrees from horizontal (e.g. surface facing up = 0, surface facing horizon = 90). **kwargs Extra parameters passed to the IAM function. Returns ------- iam : dict IAM values for each type of diffuse irradiance: * 'sky': radiation from the sky dome (zenith <= 90) * 'horizon': radiation from the region of the sky near the horizon (89.5 <= zenith <= 90) * 'ground': radiation reflected from the ground (zenith >= 90) See [1]_ for a detailed description of each class. See Also -------- pvlib.iam.marion_integrate References ---------- .. [1] B. Marion "Numerical method for angle-of-incidence correction factors for diffuse radiation incident photovoltaic modules", Solar Energy, Volume 147, Pages 344-348. 2017. DOI: 10.1016/j.solener.2017.03.027 Examples -------- >>> marion_diffuse('physical', surface_tilt=20) {'sky': 0.9539178294437575, 'horizon': 0.7652650139134007, 'ground': 0.6387140117795903} >>> marion_diffuse('ashrae', [20, 30], b=0.04) {'sky': array([0.96748999, 0.96938408]), 'horizon': array([0.86478428, 0.91825792]), 'ground': array([0.77004435, 0.8522436 ])}
Determine diffuse irradiance incidence angle modifiers using Marion's method of integrating over solid angle.
[ "Determine", "diffuse", "irradiance", "incidence", "angle", "modifiers", "using", "Marion", "s", "method", "of", "integrating", "over", "solid", "angle", "." ]
def marion_diffuse(model, surface_tilt, **kwargs): """ Determine diffuse irradiance incidence angle modifiers using Marion's method of integrating over solid angle. Parameters ---------- model : str The IAM function to evaluate across solid angle. Must be one of `'ashrae', 'physical', 'martin_ruiz', 'sapm'`. surface_tilt : numeric Surface tilt angles in decimal degrees. The tilt angle is defined as degrees from horizontal (e.g. surface facing up = 0, surface facing horizon = 90). **kwargs Extra parameters passed to the IAM function. Returns ------- iam : dict IAM values for each type of diffuse irradiance: * 'sky': radiation from the sky dome (zenith <= 90) * 'horizon': radiation from the region of the sky near the horizon (89.5 <= zenith <= 90) * 'ground': radiation reflected from the ground (zenith >= 90) See [1]_ for a detailed description of each class. See Also -------- pvlib.iam.marion_integrate References ---------- .. [1] B. Marion "Numerical method for angle-of-incidence correction factors for diffuse radiation incident photovoltaic modules", Solar Energy, Volume 147, Pages 344-348. 2017. DOI: 10.1016/j.solener.2017.03.027 Examples -------- >>> marion_diffuse('physical', surface_tilt=20) {'sky': 0.9539178294437575, 'horizon': 0.7652650139134007, 'ground': 0.6387140117795903} >>> marion_diffuse('ashrae', [20, 30], b=0.04) {'sky': array([0.96748999, 0.96938408]), 'horizon': array([0.86478428, 0.91825792]), 'ground': array([0.77004435, 0.8522436 ])} """ models = { 'physical': physical, 'ashrae': ashrae, 'sapm': sapm, 'martin_ruiz': martin_ruiz, } try: iam_model = models[model] except KeyError: raise ValueError('model must be one of: ' + str(list(models.keys()))) iam_function = functools.partial(iam_model, **kwargs) iam = {} for region in ['sky', 'horizon', 'ground']: iam[region] = marion_integrate(iam_function, surface_tilt, region) return iam
[ "def", "marion_diffuse", "(", "model", ",", "surface_tilt", ",", "*", "*", "kwargs", ")", ":", "models", "=", "{", "'physical'", ":", "physical", ",", "'ashrae'", ":", "ashrae", ",", "'sapm'", ":", "sapm", ",", "'martin_ruiz'", ":", "martin_ruiz", ",", "}", "try", ":", "iam_model", "=", "models", "[", "model", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'model must be one of: '", "+", "str", "(", "list", "(", "models", ".", "keys", "(", ")", ")", ")", ")", "iam_function", "=", "functools", ".", "partial", "(", "iam_model", ",", "*", "*", "kwargs", ")", "iam", "=", "{", "}", "for", "region", "in", "[", "'sky'", ",", "'horizon'", ",", "'ground'", "]", ":", "iam", "[", "region", "]", "=", "marion_integrate", "(", "iam_function", ",", "surface_tilt", ",", "region", ")", "return", "iam" ]
https://github.com/pvlib/pvlib-python/blob/1ab0eb20f9cd9fb9f7a0ddf35f81283f2648e34a/pvlib/iam.py#L533-L605
microsoft/ptvsd
99c8513921021d2cc7cd82e132b65c644c256768
src/ptvsd/_vendored/pydevd/_pydev_imps/_pydev_inspect.py
python
ismethod
(object)
return isinstance(object, types.MethodType)
Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method im_self instance to which this method is bound, or None
Return true if the object is an instance method.
[ "Return", "true", "if", "the", "object", "is", "an", "instance", "method", "." ]
def ismethod(object): """Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method im_self instance to which this method is bound, or None""" return isinstance(object, types.MethodType)
[ "def", "ismethod", "(", "object", ")", ":", "return", "isinstance", "(", "object", ",", "types", ".", "MethodType", ")" ]
https://github.com/microsoft/ptvsd/blob/99c8513921021d2cc7cd82e132b65c644c256768/src/ptvsd/_vendored/pydevd/_pydev_imps/_pydev_inspect.py#L49-L58
SirVer/ultisnips
2c83e40ce66814bf813457bb58ea96184ab9bb81
pythonx/UltiSnips/snippet/definition/base.py
python
SnippetDefinition.matched
(self)
return self._matched
The last text that matched this snippet in match() or could_match().
The last text that matched this snippet in match() or could_match().
[ "The", "last", "text", "that", "matched", "this", "snippet", "in", "match", "()", "or", "could_match", "()", "." ]
def matched(self): """The last text that matched this snippet in match() or could_match().""" return self._matched
[ "def", "matched", "(", "self", ")", ":", "return", "self", ".", "_matched" ]
https://github.com/SirVer/ultisnips/blob/2c83e40ce66814bf813457bb58ea96184ab9bb81/pythonx/UltiSnips/snippet/definition/base.py#L274-L277
prkumar/uplink
3472806f68a60a93f7cb555d36365551a5411cc5
uplink/arguments.py
python
Timeout._modify_request
(self, request_builder, value)
Modifies request timeout.
Modifies request timeout.
[ "Modifies", "request", "timeout", "." ]
def _modify_request(self, request_builder, value): """Modifies request timeout.""" request_builder.info["timeout"] = value
[ "def", "_modify_request", "(", "self", ",", "request_builder", ",", "value", ")", ":", "request_builder", ".", "info", "[", "\"timeout\"", "]", "=", "value" ]
https://github.com/prkumar/uplink/blob/3472806f68a60a93f7cb555d36365551a5411cc5/uplink/arguments.py#L733-L735
nschaetti/EchoTorch
cba209c49e0fda73172d2e853b85c747f9f5117e
echotorch/models/reservoir/ESN.py
python
ESN.w_in
(self)
return self._esn_cell.w_in
Input matrix :return: Input matrix
Input matrix :return: Input matrix
[ "Input", "matrix", ":", "return", ":", "Input", "matrix" ]
def w_in(self): """ Input matrix :return: Input matrix """ return self._esn_cell.w_in
[ "def", "w_in", "(", "self", ")", ":", "return", "self", ".", "_esn_cell", ".", "w_in" ]
https://github.com/nschaetti/EchoTorch/blob/cba209c49e0fda73172d2e853b85c747f9f5117e/echotorch/models/reservoir/ESN.py#L227-L232
wbond/package_control
cfaaeb57612023e3679ecb7f8cd7ceac9f57990d
package_control/deps/asn1crypto/core.py
python
Void.native
(self)
return None
The native Python datatype representation of this value :return: None
The native Python datatype representation of this value
[ "The", "native", "Python", "datatype", "representation", "of", "this", "value" ]
def native(self): """ The native Python datatype representation of this value :return: None """ return None
[ "def", "native", "(", "self", ")", ":", "return", "None" ]
https://github.com/wbond/package_control/blob/cfaaeb57612023e3679ecb7f8cd7ceac9f57990d/package_control/deps/asn1crypto/core.py#L832-L840
facebookresearch/votenet
2f6d6d36ff98d96901182e935afe48ccee82d566
utils/pc_util.py
python
point_cloud_to_image
(points, imgsize, radius=1.0, num_sample=128)
return img
input is Nx3 points output is imgsize*imgsize*num_sample*3 assumes points are in range [-radius, radius] samples num_sample points in each pixel, if there are less than num_sample points, replicate the points Added on Feb 19
input is Nx3 points output is imgsize*imgsize*num_sample*3 assumes points are in range [-radius, radius] samples num_sample points in each pixel, if there are less than num_sample points, replicate the points Added on Feb 19
[ "input", "is", "Nx3", "points", "output", "is", "imgsize", "*", "imgsize", "*", "num_sample", "*", "3", "assumes", "points", "are", "in", "range", "[", "-", "radius", "radius", "]", "samples", "num_sample", "points", "in", "each", "pixel", "if", "there", "are", "less", "than", "num_sample", "points", "replicate", "the", "points", "Added", "on", "Feb", "19" ]
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128): """ input is Nx3 points output is imgsize*imgsize*num_sample*3 assumes points are in range [-radius, radius] samples num_sample points in each pixel, if there are less than num_sample points, replicate the points Added on Feb 19 """ img = np.zeros((imgsize, imgsize, num_sample, 3)) pixel = 2*radius/float(imgsize) locations = (points[:,0:2] + radius)/pixel # Nx2 locations = locations.astype(int) loc2pc = {} for n in range(points.shape[0]): loc = tuple(locations[n,:]) if loc not in loc2pc: loc2pc[loc] = [] loc2pc[loc].append(points[n,:]) for i in range(imgsize): for j in range(imgsize): if (i,j) not in loc2pc: img[i,j,:,:] = np.zeros((num_sample,3)) else: pc = loc2pc[(i,j)] pc = np.vstack(pc) if pc.shape[0]>num_sample: pc = random_sampling(pc, num_sample, False) elif pc.shape[0]<num_sample: pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge') pc_center = (np.array([i,j])+0.5)*pixel - radius pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel img[i,j,:,:] = pc return img
[ "def", "point_cloud_to_image", "(", "points", ",", "imgsize", ",", "radius", "=", "1.0", ",", "num_sample", "=", "128", ")", ":", "img", "=", "np", ".", "zeros", "(", "(", "imgsize", ",", "imgsize", ",", "num_sample", ",", "3", ")", ")", "pixel", "=", "2", "*", "radius", "/", "float", "(", "imgsize", ")", "locations", "=", "(", "points", "[", ":", ",", "0", ":", "2", "]", "+", "radius", ")", "/", "pixel", "# Nx2", "locations", "=", "locations", ".", "astype", "(", "int", ")", "loc2pc", "=", "{", "}", "for", "n", "in", "range", "(", "points", ".", "shape", "[", "0", "]", ")", ":", "loc", "=", "tuple", "(", "locations", "[", "n", ",", ":", "]", ")", "if", "loc", "not", "in", "loc2pc", ":", "loc2pc", "[", "loc", "]", "=", "[", "]", "loc2pc", "[", "loc", "]", ".", "append", "(", "points", "[", "n", ",", ":", "]", ")", "for", "i", "in", "range", "(", "imgsize", ")", ":", "for", "j", "in", "range", "(", "imgsize", ")", ":", "if", "(", "i", ",", "j", ")", "not", "in", "loc2pc", ":", "img", "[", "i", ",", "j", ",", ":", ",", ":", "]", "=", "np", ".", "zeros", "(", "(", "num_sample", ",", "3", ")", ")", "else", ":", "pc", "=", "loc2pc", "[", "(", "i", ",", "j", ")", "]", "pc", "=", "np", ".", "vstack", "(", "pc", ")", "if", "pc", ".", "shape", "[", "0", "]", ">", "num_sample", ":", "pc", "=", "random_sampling", "(", "pc", ",", "num_sample", ",", "False", ")", "elif", "pc", ".", "shape", "[", "0", "]", "<", "num_sample", ":", "pc", "=", "np", ".", "lib", ".", "pad", "(", "pc", ",", "(", "(", "0", ",", "num_sample", "-", "pc", ".", "shape", "[", "0", "]", ")", ",", "(", "0", ",", "0", ")", ")", ",", "'edge'", ")", "pc_center", "=", "(", "np", ".", "array", "(", "[", "i", ",", "j", "]", ")", "+", "0.5", ")", "*", "pixel", "-", "radius", "pc", "[", ":", ",", "0", ":", "2", "]", "=", "(", "pc", "[", ":", ",", "0", ":", "2", "]", "-", "pc_center", ")", "/", "pixel", "img", "[", "i", ",", "j", ",", ":", ",", ":", "]", "=", "pc", "return", "img" ]
https://github.com/facebookresearch/votenet/blob/2f6d6d36ff98d96901182e935afe48ccee82d566/utils/pc_util.py#L156-L188
DeepPavlov/convai
54d921f99606960941ece4865a396925dfc264f4
2017/solutions/bot#1337/dialog_tracker/from_opennmt_summary/get_reply.py
python
split_text_on_chunks
(text, max_len=75, overlap=15)
return chunks
Args: text: input text max_len: max chunk length (in words) overlap: length of chunks overlapping Returns: list of chunks
Args: text: input text max_len: max chunk length (in words) overlap: length of chunks overlapping
[ "Args", ":", "text", ":", "input", "text", "max_len", ":", "max", "chunk", "length", "(", "in", "words", ")", "overlap", ":", "length", "of", "chunks", "overlapping" ]
def split_text_on_chunks(text, max_len=75, overlap=15): """ Args: text: input text max_len: max chunk length (in words) overlap: length of chunks overlapping Returns: list of chunks """ chunks = [] tokens = normalize(text).split(' ') for i in range(0, len(tokens), overlap): chunks.append(' '.join(tokens[i: i + max_len])) return chunks
[ "def", "split_text_on_chunks", "(", "text", ",", "max_len", "=", "75", ",", "overlap", "=", "15", ")", ":", "chunks", "=", "[", "]", "tokens", "=", "normalize", "(", "text", ")", ".", "split", "(", "' '", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "tokens", ")", ",", "overlap", ")", ":", "chunks", ".", "append", "(", "' '", ".", "join", "(", "tokens", "[", "i", ":", "i", "+", "max_len", "]", ")", ")", "return", "chunks" ]
https://github.com/DeepPavlov/convai/blob/54d921f99606960941ece4865a396925dfc264f4/2017/solutions/bot#1337/dialog_tracker/from_opennmt_summary/get_reply.py#L55-L69
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_zabbix/library/zbx_action.py
python
get_condition_type
(event_source, inc_condition)
return c_types[inc_condition]
determine the condition type
determine the condition type
[ "determine", "the", "condition", "type" ]
def get_condition_type(event_source, inc_condition): '''determine the condition type''' c_types = {} if event_source == 'trigger': c_types = {'host group': 0, 'host': 1, 'trigger': 2, 'trigger name': 3, 'trigger severity': 4, 'trigger value': 5, 'time period': 6, 'host template': 13, 'application': 15, 'maintenance status': 16, } elif event_source == 'discovery': c_types = {'host IP': 7, 'discovered service type': 8, 'discovered service port': 9, 'discovery status': 10, 'uptime or downtime duration': 11, 'received value': 12, 'discovery rule': 18, 'discovery check': 19, 'proxy': 20, 'discovery object': 21, } elif event_source == 'auto': c_types = {'proxy': 20, 'host name': 22, 'host metadata': 24, } elif event_source == 'internal': c_types = {'host group': 0, 'host': 1, 'host template': 13, 'application': 15, 'event type': 23, } else: raise ZabbixAPIError('Unkown event source %s' % event_source) return c_types[inc_condition]
[ "def", "get_condition_type", "(", "event_source", ",", "inc_condition", ")", ":", "c_types", "=", "{", "}", "if", "event_source", "==", "'trigger'", ":", "c_types", "=", "{", "'host group'", ":", "0", ",", "'host'", ":", "1", ",", "'trigger'", ":", "2", ",", "'trigger name'", ":", "3", ",", "'trigger severity'", ":", "4", ",", "'trigger value'", ":", "5", ",", "'time period'", ":", "6", ",", "'host template'", ":", "13", ",", "'application'", ":", "15", ",", "'maintenance status'", ":", "16", ",", "}", "elif", "event_source", "==", "'discovery'", ":", "c_types", "=", "{", "'host IP'", ":", "7", ",", "'discovered service type'", ":", "8", ",", "'discovered service port'", ":", "9", ",", "'discovery status'", ":", "10", ",", "'uptime or downtime duration'", ":", "11", ",", "'received value'", ":", "12", ",", "'discovery rule'", ":", "18", ",", "'discovery check'", ":", "19", ",", "'proxy'", ":", "20", ",", "'discovery object'", ":", "21", ",", "}", "elif", "event_source", "==", "'auto'", ":", "c_types", "=", "{", "'proxy'", ":", "20", ",", "'host name'", ":", "22", ",", "'host metadata'", ":", "24", ",", "}", "elif", "event_source", "==", "'internal'", ":", "c_types", "=", "{", "'host group'", ":", "0", ",", "'host'", ":", "1", ",", "'host template'", ":", "13", ",", "'application'", ":", "15", ",", "'event type'", ":", "23", ",", "}", "else", ":", "raise", "ZabbixAPIError", "(", "'Unkown event source %s'", "%", "event_source", ")", "return", "c_types", "[", "inc_condition", "]" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_zabbix/library/zbx_action.py#L333-L378
pfalcon/pycopy-lib
56ebf2110f3caa63a3785d439ce49b11e13c75c0
datetime/datetime.py
python
datetime.utcfromtimestamp
(cls, t)
return cls(y, m, d, hh, mm, ss, us)
Construct a UTC datetime from a POSIX timestamp (like time.time()).
Construct a UTC datetime from a POSIX timestamp (like time.time()).
[ "Construct", "a", "UTC", "datetime", "from", "a", "POSIX", "timestamp", "(", "like", "time", ".", "time", "()", ")", "." ]
def utcfromtimestamp(cls, t): "Construct a UTC datetime from a POSIX timestamp (like time.time())." t, frac = divmod(t, 1.0) us = int(frac * 1e6) # If timestamp is less than one microsecond smaller than a # full second, us can be rounded up to 1000000. In this case, # roll over to seconds, otherwise, ValueError is raised # by the constructor. if us == 1000000: t += 1 us = 0 y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t) ss = min(ss, 59) # clamp out leap seconds if the platform has them return cls(y, m, d, hh, mm, ss, us)
[ "def", "utcfromtimestamp", "(", "cls", ",", "t", ")", ":", "t", ",", "frac", "=", "divmod", "(", "t", ",", "1.0", ")", "us", "=", "int", "(", "frac", "*", "1e6", ")", "# If timestamp is less than one microsecond smaller than a", "# full second, us can be rounded up to 1000000. In this case,", "# roll over to seconds, otherwise, ValueError is raised", "# by the constructor.", "if", "us", "==", "1000000", ":", "t", "+=", "1", "us", "=", "0", "y", ",", "m", ",", "d", ",", "hh", ",", "mm", ",", "ss", ",", "weekday", ",", "jday", ",", "dst", "=", "_time", ".", "gmtime", "(", "t", ")", "ss", "=", "min", "(", "ss", ",", "59", ")", "# clamp out leap seconds if the platform has them", "return", "cls", "(", "y", ",", "m", ",", "d", ",", "hh", ",", "mm", ",", "ss", ",", "us", ")" ]
https://github.com/pfalcon/pycopy-lib/blob/56ebf2110f3caa63a3785d439ce49b11e13c75c0/datetime/datetime.py#L1374-L1388
vcheckzen/FODI
3bb23644938a33c3fdfb9611a622e35ed4ce6532
back-end-py/main/3rd/Crypto/Hash/SHA256.py
python
SHA256Hash.copy
(self)
return clone
Return a copy ("clone") of the hash object. The copy will have the same internal state as the original hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. :return: A hash object of the same type
Return a copy ("clone") of the hash object.
[ "Return", "a", "copy", "(", "clone", ")", "of", "the", "hash", "object", "." ]
def copy(self): """Return a copy ("clone") of the hash object. The copy will have the same internal state as the original hash object. This can be used to efficiently compute the digests of strings that share a common initial substring. :return: A hash object of the same type """ clone = SHA256Hash() result = _raw_sha256_lib.SHA256_copy(self._state.get(), clone._state.get()) if result: raise ValueError("Error %d while copying SHA256" % result) return clone
[ "def", "copy", "(", "self", ")", ":", "clone", "=", "SHA256Hash", "(", ")", "result", "=", "_raw_sha256_lib", ".", "SHA256_copy", "(", "self", ".", "_state", ".", "get", "(", ")", ",", "clone", ".", "_state", ".", "get", "(", ")", ")", "if", "result", ":", "raise", "ValueError", "(", "\"Error %d while copying SHA256\"", "%", "result", ")", "return", "clone" ]
https://github.com/vcheckzen/FODI/blob/3bb23644938a33c3fdfb9611a622e35ed4ce6532/back-end-py/main/3rd/Crypto/Hash/SHA256.py#L124-L140
google/glazier
8a7f3dacb8be8a73a15d988d02a3c14e306d8f6e
glazier/lib/buildinfo.py
python
BuildInfo.Release
(self)
return None
Determine the current build release. Returns: The build release as a string.
Determine the current build release.
[ "Determine", "the", "current", "build", "release", "." ]
def Release(self) -> Optional[str]: """Determine the current build release. Returns: The build release as a string. """ rel_id_file = '%s/%s' % (self.ReleasePath().rstrip('/'), 'release-id.yaml') try: data = files.Read(rel_id_file) except files.Error as e: raise Error(e) if data and 'release_id' in data: return data['release_id'] return None
[ "def", "Release", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "rel_id_file", "=", "'%s/%s'", "%", "(", "self", ".", "ReleasePath", "(", ")", ".", "rstrip", "(", "'/'", ")", ",", "'release-id.yaml'", ")", "try", ":", "data", "=", "files", ".", "Read", "(", "rel_id_file", ")", "except", "files", ".", "Error", "as", "e", ":", "raise", "Error", "(", "e", ")", "if", "data", "and", "'release_id'", "in", "data", ":", "return", "data", "[", "'release_id'", "]", "return", "None" ]
https://github.com/google/glazier/blob/8a7f3dacb8be8a73a15d988d02a3c14e306d8f6e/glazier/lib/buildinfo.py#L147-L160
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/managers/datasets.py
python
DatasetAssociationFilterParser.eq_datatype
(self, dataset_assoc, class_str)
return comparison_class and dataset_assoc.datatype.__class__ == comparison_class
Is the `dataset_assoc` datatype equal to the registered datatype `class_str`?
Is the `dataset_assoc` datatype equal to the registered datatype `class_str`?
[ "Is", "the", "dataset_assoc", "datatype", "equal", "to", "the", "registered", "datatype", "class_str", "?" ]
def eq_datatype(self, dataset_assoc, class_str): """ Is the `dataset_assoc` datatype equal to the registered datatype `class_str`? """ comparison_class = self.app.datatypes_registry.get_datatype_class_by_name(class_str) return comparison_class and dataset_assoc.datatype.__class__ == comparison_class
[ "def", "eq_datatype", "(", "self", ",", "dataset_assoc", ",", "class_str", ")", ":", "comparison_class", "=", "self", ".", "app", ".", "datatypes_registry", ".", "get_datatype_class_by_name", "(", "class_str", ")", "return", "comparison_class", "and", "dataset_assoc", ".", "datatype", ".", "__class__", "==", "comparison_class" ]
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/managers/datasets.py#L735-L740
urinieto/msaf
17db5b698e06d662dfa5c7442d826022746454b7
msaf/algorithms/interface.py
python
SegmenterInterface._postprocess
(self, est_idxs, est_labels)
return est_idxs, est_labels
Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.
Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.
[ "Post", "processes", "the", "estimations", "from", "the", "algorithm", "removing", "empty", "segments", "and", "making", "sure", "the", "lenghts", "of", "the", "boundaries", "and", "labels", "match", "." ]
def _postprocess(self, est_idxs, est_labels): """Post processes the estimations from the algorithm, removing empty segments and making sure the lenghts of the boundaries and labels match.""" # Make sure we are using the previously input bounds, if any if self.in_bound_idxs is not None: F = self._preprocess() est_labels = U.synchronize_labels(self.in_bound_idxs, est_idxs, est_labels, F.shape[0]) est_idxs = self.in_bound_idxs # Remove empty segments if needed est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels) assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \ "(%d) and number of labels(%d) don't match" % (len(est_idxs), len(est_labels)) # Make sure the indeces are integers est_idxs = np.asarray(est_idxs, dtype=int) return est_idxs, est_labels
[ "def", "_postprocess", "(", "self", ",", "est_idxs", ",", "est_labels", ")", ":", "# Make sure we are using the previously input bounds, if any", "if", "self", ".", "in_bound_idxs", "is", "not", "None", ":", "F", "=", "self", ".", "_preprocess", "(", ")", "est_labels", "=", "U", ".", "synchronize_labels", "(", "self", ".", "in_bound_idxs", ",", "est_idxs", ",", "est_labels", ",", "F", ".", "shape", "[", "0", "]", ")", "est_idxs", "=", "self", ".", "in_bound_idxs", "# Remove empty segments if needed", "est_idxs", ",", "est_labels", "=", "U", ".", "remove_empty_segments", "(", "est_idxs", ",", "est_labels", ")", "assert", "len", "(", "est_idxs", ")", "-", "1", "==", "len", "(", "est_labels", ")", ",", "\"Number of boundaries \"", "\"(%d) and number of labels(%d) don't match\"", "%", "(", "len", "(", "est_idxs", ")", ",", "len", "(", "est_labels", ")", ")", "# Make sure the indeces are integers", "est_idxs", "=", "np", ".", "asarray", "(", "est_idxs", ",", "dtype", "=", "int", ")", "return", "est_idxs", ",", "est_labels" ]
https://github.com/urinieto/msaf/blob/17db5b698e06d662dfa5c7442d826022746454b7/msaf/algorithms/interface.py#L102-L123
Project-MONAI/MONAI
83f8b06372a3803ebe9281300cb794a1f3395018
monai/networks/nets/efficientnet.py
python
BlockArgs.from_string
(block_string: str)
return BlockArgs( num_repeat=int(options["r"]), kernel_size=int(options["k"]), stride=int(options["s"][0]), expand_ratio=int(options["e"]), input_filters=int(options["i"]), output_filters=int(options["o"]), id_skip=("noskip" not in block_string), se_ratio=float(options["se"]) if "se" in options else None, )
Get a BlockArgs object from a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: "r1_k3_s11_e1_i32_o16_se0.25". Returns: BlockArgs: namedtuple defined at the top of this function.
Get a BlockArgs object from a string notation of arguments.
[ "Get", "a", "BlockArgs", "object", "from", "a", "string", "notation", "of", "arguments", "." ]
def from_string(block_string: str): """ Get a BlockArgs object from a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: "r1_k3_s11_e1_i32_o16_se0.25". Returns: BlockArgs: namedtuple defined at the top of this function. """ ops = block_string.split("_") options = {} for op in ops: splits = re.split(r"(\d.*)", op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # check stride stride_check = ( ("s" in options and len(options["s"]) == 1) or (len(options["s"]) == 2 and options["s"][0] == options["s"][1]) or (len(options["s"]) == 3 and options["s"][0] == options["s"][1] and options["s"][0] == options["s"][2]) ) if not stride_check: raise ValueError("invalid stride option received") return BlockArgs( num_repeat=int(options["r"]), kernel_size=int(options["k"]), stride=int(options["s"][0]), expand_ratio=int(options["e"]), input_filters=int(options["i"]), output_filters=int(options["o"]), id_skip=("noskip" not in block_string), se_ratio=float(options["se"]) if "se" in options else None, )
[ "def", "from_string", "(", "block_string", ":", "str", ")", ":", "ops", "=", "block_string", ".", "split", "(", "\"_\"", ")", "options", "=", "{", "}", "for", "op", "in", "ops", ":", "splits", "=", "re", ".", "split", "(", "r\"(\\d.*)\"", ",", "op", ")", "if", "len", "(", "splits", ")", ">=", "2", ":", "key", ",", "value", "=", "splits", "[", ":", "2", "]", "options", "[", "key", "]", "=", "value", "# check stride", "stride_check", "=", "(", "(", "\"s\"", "in", "options", "and", "len", "(", "options", "[", "\"s\"", "]", ")", "==", "1", ")", "or", "(", "len", "(", "options", "[", "\"s\"", "]", ")", "==", "2", "and", "options", "[", "\"s\"", "]", "[", "0", "]", "==", "options", "[", "\"s\"", "]", "[", "1", "]", ")", "or", "(", "len", "(", "options", "[", "\"s\"", "]", ")", "==", "3", "and", "options", "[", "\"s\"", "]", "[", "0", "]", "==", "options", "[", "\"s\"", "]", "[", "1", "]", "and", "options", "[", "\"s\"", "]", "[", "0", "]", "==", "options", "[", "\"s\"", "]", "[", "2", "]", ")", ")", "if", "not", "stride_check", ":", "raise", "ValueError", "(", "\"invalid stride option received\"", ")", "return", "BlockArgs", "(", "num_repeat", "=", "int", "(", "options", "[", "\"r\"", "]", ")", ",", "kernel_size", "=", "int", "(", "options", "[", "\"k\"", "]", ")", ",", "stride", "=", "int", "(", "options", "[", "\"s\"", "]", "[", "0", "]", ")", ",", "expand_ratio", "=", "int", "(", "options", "[", "\"e\"", "]", ")", ",", "input_filters", "=", "int", "(", "options", "[", "\"i\"", "]", ")", ",", "output_filters", "=", "int", "(", "options", "[", "\"o\"", "]", ")", ",", "id_skip", "=", "(", "\"noskip\"", "not", "in", "block_string", ")", ",", "se_ratio", "=", "float", "(", "options", "[", "\"se\"", "]", ")", "if", "\"se\"", "in", "options", "else", "None", ",", ")" ]
https://github.com/Project-MONAI/MONAI/blob/83f8b06372a3803ebe9281300cb794a1f3395018/monai/networks/nets/efficientnet.py#L883-L920
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/django-1.2/django/contrib/gis/gdal/layer.py
python
Layer.__init__
(self, layer_ptr, ds)
Initializes on an OGR C pointer to the Layer and the `DataSource` object that owns this layer. The `DataSource` object is required so that a reference to it is kept with this Layer. This prevents garbage collection of the `DataSource` while this Layer is still active.
Initializes on an OGR C pointer to the Layer and the `DataSource` object that owns this layer. The `DataSource` object is required so that a reference to it is kept with this Layer. This prevents garbage collection of the `DataSource` while this Layer is still active.
[ "Initializes", "on", "an", "OGR", "C", "pointer", "to", "the", "Layer", "and", "the", "DataSource", "object", "that", "owns", "this", "layer", ".", "The", "DataSource", "object", "is", "required", "so", "that", "a", "reference", "to", "it", "is", "kept", "with", "this", "Layer", ".", "This", "prevents", "garbage", "collection", "of", "the", "DataSource", "while", "this", "Layer", "is", "still", "active", "." ]
def __init__(self, layer_ptr, ds): """ Initializes on an OGR C pointer to the Layer and the `DataSource` object that owns this layer. The `DataSource` object is required so that a reference to it is kept with this Layer. This prevents garbage collection of the `DataSource` while this Layer is still active. """ if not layer_ptr: raise OGRException('Cannot create Layer, invalid pointer given') self.ptr = layer_ptr self._ds = ds self._ldefn = capi.get_layer_defn(self._ptr) # Does the Layer support random reading? self._random_read = self.test_capability('RandomRead')
[ "def", "__init__", "(", "self", ",", "layer_ptr", ",", "ds", ")", ":", "if", "not", "layer_ptr", ":", "raise", "OGRException", "(", "'Cannot create Layer, invalid pointer given'", ")", "self", ".", "ptr", "=", "layer_ptr", "self", ".", "_ds", "=", "ds", "self", ".", "_ldefn", "=", "capi", ".", "get_layer_defn", "(", "self", ".", "_ptr", ")", "# Does the Layer support random reading?", "self", ".", "_random_read", "=", "self", ".", "test_capability", "(", "'RandomRead'", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-1.2/django/contrib/gis/gdal/layer.py#L25-L38
cogitas3d/OrtogOnBlender
881e93f5beb2263e44c270974dd0e81deca44762
PontosAnatomicos.py
python
PNS_pt.execute
(self, context)
return {'FINISHED'}
[]
def execute(self, context): CriaPontoDef('PNS point', 'Anatomical Points - Maxilla') TestaPontoCollDef() return {'FINISHED'}
[ "def", "execute", "(", "self", ",", "context", ")", ":", "CriaPontoDef", "(", "'PNS point'", ",", "'Anatomical Points - Maxilla'", ")", "TestaPontoCollDef", "(", ")", "return", "{", "'FINISHED'", "}" ]
https://github.com/cogitas3d/OrtogOnBlender/blob/881e93f5beb2263e44c270974dd0e81deca44762/PontosAnatomicos.py#L499-L502
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv/lib/python2.7/site-packages/setuptools/command/egg_info.py
python
write_file
(filename, contents)
Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it.
Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it.
[ "Create", "a", "file", "with", "the", "specified", "name", "and", "write", "contents", "(", "a", "sequence", "of", "strings", "without", "line", "terminators", ")", "to", "it", "." ]
def write_file(filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ contents = "\n".join(contents) if sys.version_info >= (3,): contents = contents.encode("utf-8") f = open(filename, "wb") # always write POSIX-style manifest f.write(contents) f.close()
[ "def", "write_file", "(", "filename", ",", "contents", ")", ":", "contents", "=", "\"\\n\"", ".", "join", "(", "contents", ")", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", ":", "contents", "=", "contents", ".", "encode", "(", "\"utf-8\"", ")", "f", "=", "open", "(", "filename", ",", "\"wb\"", ")", "# always write POSIX-style manifest", "f", ".", "write", "(", "contents", ")", "f", ".", "close", "(", ")" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv/lib/python2.7/site-packages/setuptools/command/egg_info.py#L302-L311
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/docutils-0.14/docutils/readers/__init__.py
python
Reader.__init__
(self, parser=None, parser_name=None)
Initialize the Reader instance. Several instance attributes are defined with dummy initial values. Subclasses may use these attributes as they wish.
Initialize the Reader instance.
[ "Initialize", "the", "Reader", "instance", "." ]
def __init__(self, parser=None, parser_name=None): """ Initialize the Reader instance. Several instance attributes are defined with dummy initial values. Subclasses may use these attributes as they wish. """ self.parser = parser """A `parsers.Parser` instance shared by all doctrees. May be left unspecified if the document source determines the parser.""" if parser is None and parser_name: self.set_parser(parser_name) self.source = None """`docutils.io` IO object, source of input data.""" self.input = None """Raw text input; either a single string or, for more complex cases, a collection of strings."""
[ "def", "__init__", "(", "self", ",", "parser", "=", "None", ",", "parser_name", "=", "None", ")", ":", "self", ".", "parser", "=", "parser", "\"\"\"A `parsers.Parser` instance shared by all doctrees. May be left\n unspecified if the document source determines the parser.\"\"\"", "if", "parser", "is", "None", "and", "parser_name", ":", "self", ".", "set_parser", "(", "parser_name", ")", "self", ".", "source", "=", "None", "\"\"\"`docutils.io` IO object, source of input data.\"\"\"", "self", ".", "input", "=", "None", "\"\"\"Raw text input; either a single string or, for more complex cases,\n a collection of strings.\"\"\"" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/docutils-0.14/docutils/readers/__init__.py#L39-L59
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/Centos_5.9/paramiko/packet.py
python
Packetizer.close
(self)
[]
def close(self): self.__closed = True
[ "def", "close", "(", "self", ")", ":", "self", ".", "__closed", "=", "True" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_5.9/paramiko/packet.py#L153-L154
neulab/xnmt
d93f8f3710f986f36eb54e9ff3976a6b683da2a4
xnmt/train/tasks.py
python
TrainingTask.checkpoint
(self, control_learning_schedule: bool = False)
Perform a dev checkpoint. Args: control_learning_schedule: If ``False``, only evaluate dev data. If ``True``, also perform model saving, LR decay etc. if needed. Returns: ``True`` iff the model needs saving
Perform a dev checkpoint.
[ "Perform", "a", "dev", "checkpoint", "." ]
def checkpoint(self, control_learning_schedule: bool = False) -> bool: """ Perform a dev checkpoint. Args: control_learning_schedule: If ``False``, only evaluate dev data. If ``True``, also perform model saving, LR decay etc. if needed. Returns: ``True`` iff the model needs saving """ raise NotImplementedError("must be implemented by subclasses")
[ "def", "checkpoint", "(", "self", ",", "control_learning_schedule", ":", "bool", "=", "False", ")", "->", "bool", ":", "raise", "NotImplementedError", "(", "\"must be implemented by subclasses\"", ")" ]
https://github.com/neulab/xnmt/blob/d93f8f3710f986f36eb54e9ff3976a6b683da2a4/xnmt/train/tasks.py#L57-L67
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/plant/__init__.py
python
async_setup
(hass: HomeAssistant, config: ConfigType)
return True
Set up the Plant component.
Set up the Plant component.
[ "Set", "up", "the", "Plant", "component", "." ]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the Plant component.""" component = EntityComponent(_LOGGER, DOMAIN, hass) entities = [] for plant_name, plant_config in config[DOMAIN].items(): _LOGGER.info("Added plant %s", plant_name) entity = Plant(plant_name, plant_config) entities.append(entity) await component.async_add_entities(entities) return True
[ "async", "def", "async_setup", "(", "hass", ":", "HomeAssistant", ",", "config", ":", "ConfigType", ")", "->", "bool", ":", "component", "=", "EntityComponent", "(", "_LOGGER", ",", "DOMAIN", ",", "hass", ")", "entities", "=", "[", "]", "for", "plant_name", ",", "plant_config", "in", "config", "[", "DOMAIN", "]", ".", "items", "(", ")", ":", "_LOGGER", ".", "info", "(", "\"Added plant %s\"", ",", "plant_name", ")", "entity", "=", "Plant", "(", "plant_name", ",", "plant_config", ")", "entities", ".", "append", "(", "entity", ")", "await", "component", ".", "async_add_entities", "(", "entities", ")", "return", "True" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/plant/__init__.py#L117-L128
OUCMachineLearning/OUCML
5b54337d7c0316084cb1a74befda2bba96137d4a
One_Day_One_GAN/day6/srgan/srgan.py
python
SRGAN.sample_images
(self, epoch)
[]
def sample_images(self, epoch): os.makedirs('images/%s' % self.dataset_name, exist_ok=True) r, c = 2, 2 imgs_hr, imgs_lr = self.data_loader.load_data(batch_size=2, is_testing=True) fake_hr = self.generator.predict(imgs_lr) # Rescale images 0 - 1 imgs_lr = 0.5 * imgs_lr + 0.5 fake_hr = 0.5 * fake_hr + 0.5 imgs_hr = 0.5 * imgs_hr + 0.5 # Save generated images and the high resolution originals titles = ['Generated', 'Original'] fig, axs = plt.subplots(r, c) cnt = 0 for row in range(r): for col, image in enumerate([fake_hr, imgs_hr]): axs[row, col].imshow(image[row]) axs[row, col].set_title(titles[col]) axs[row, col].axis('off') cnt += 1 fig.savefig("images/%s/%d.png" % (self.dataset_name, epoch)) plt.close() # Save low resolution images for comparison for i in range(r): fig = plt.figure() plt.imshow(imgs_lr[i]) fig.savefig('images/%s/%d_lowres%d.png' % (self.dataset_name, epoch, i)) plt.close()
[ "def", "sample_images", "(", "self", ",", "epoch", ")", ":", "os", ".", "makedirs", "(", "'images/%s'", "%", "self", ".", "dataset_name", ",", "exist_ok", "=", "True", ")", "r", ",", "c", "=", "2", ",", "2", "imgs_hr", ",", "imgs_lr", "=", "self", ".", "data_loader", ".", "load_data", "(", "batch_size", "=", "2", ",", "is_testing", "=", "True", ")", "fake_hr", "=", "self", ".", "generator", ".", "predict", "(", "imgs_lr", ")", "# Rescale images 0 - 1", "imgs_lr", "=", "0.5", "*", "imgs_lr", "+", "0.5", "fake_hr", "=", "0.5", "*", "fake_hr", "+", "0.5", "imgs_hr", "=", "0.5", "*", "imgs_hr", "+", "0.5", "# Save generated images and the high resolution originals", "titles", "=", "[", "'Generated'", ",", "'Original'", "]", "fig", ",", "axs", "=", "plt", ".", "subplots", "(", "r", ",", "c", ")", "cnt", "=", "0", "for", "row", "in", "range", "(", "r", ")", ":", "for", "col", ",", "image", "in", "enumerate", "(", "[", "fake_hr", ",", "imgs_hr", "]", ")", ":", "axs", "[", "row", ",", "col", "]", ".", "imshow", "(", "image", "[", "row", "]", ")", "axs", "[", "row", ",", "col", "]", ".", "set_title", "(", "titles", "[", "col", "]", ")", "axs", "[", "row", ",", "col", "]", ".", "axis", "(", "'off'", ")", "cnt", "+=", "1", "fig", ".", "savefig", "(", "\"images/%s/%d.png\"", "%", "(", "self", ".", "dataset_name", ",", "epoch", ")", ")", "plt", ".", "close", "(", ")", "# Save low resolution images for comparison", "for", "i", "in", "range", "(", "r", ")", ":", "fig", "=", "plt", ".", "figure", "(", ")", "plt", ".", "imshow", "(", "imgs_lr", "[", "i", "]", ")", "fig", ".", "savefig", "(", "'images/%s/%d_lowres%d.png'", "%", "(", "self", ".", "dataset_name", ",", "epoch", ",", "i", ")", ")", "plt", ".", "close", "(", ")" ]
https://github.com/OUCMachineLearning/OUCML/blob/5b54337d7c0316084cb1a74befda2bba96137d4a/One_Day_One_GAN/day6/srgan/srgan.py#L239-L269
xhtml2pdf/xhtml2pdf
3eef378f869e951448bbf95b7be475f22b659dae
xhtml2pdf/w3c/cssParser.py
python
CSSParser._parseAtCharset
(self, src)
return src
[ CHARSET_SYM S* STRING S* ';' ]?
[ CHARSET_SYM S* STRING S* ';' ]?
[ "[", "CHARSET_SYM", "S", "*", "STRING", "S", "*", ";", "]", "?" ]
def _parseAtCharset(self, src): """[ CHARSET_SYM S* STRING S* ';' ]?""" if isAtRuleIdent(src, 'charset'): src = stripAtRuleIdent(src) charset, src = self._getString(src) src = src.lstrip() if src[:1] != ';': raise self.ParseError('@charset expected a terminating \';\'', src, self.ctxsrc) src = src[1:].lstrip() self.cssBuilder.atCharset(charset) return src
[ "def", "_parseAtCharset", "(", "self", ",", "src", ")", ":", "if", "isAtRuleIdent", "(", "src", ",", "'charset'", ")", ":", "src", "=", "stripAtRuleIdent", "(", "src", ")", "charset", ",", "src", "=", "self", ".", "_getString", "(", "src", ")", "src", "=", "src", ".", "lstrip", "(", ")", "if", "src", "[", ":", "1", "]", "!=", "';'", ":", "raise", "self", ".", "ParseError", "(", "'@charset expected a terminating \\';\\''", ",", "src", ",", "self", ".", "ctxsrc", ")", "src", "=", "src", "[", "1", ":", "]", ".", "lstrip", "(", ")", "self", ".", "cssBuilder", ".", "atCharset", "(", "charset", ")", "return", "src" ]
https://github.com/xhtml2pdf/xhtml2pdf/blob/3eef378f869e951448bbf95b7be475f22b659dae/xhtml2pdf/w3c/cssParser.py#L581-L592
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/patched/notpip/_internal/resolution/legacy/resolver.py
python
Resolver._is_upgrade_allowed
(self, req: InstallRequirement)
[]
def _is_upgrade_allowed(self, req: InstallRequirement) -> bool: if self.upgrade_strategy == "to-satisfy-only": return False elif self.upgrade_strategy == "eager": return True else: assert self.upgrade_strategy == "only-if-needed" return req.user_supplied or req.constraint
[ "def", "_is_upgrade_allowed", "(", "self", ",", "req", ":", "InstallRequirement", ")", "->", "bool", ":", "if", "self", ".", "upgrade_strategy", "==", "\"to-satisfy-only\"", ":", "return", "False", "elif", "self", ".", "upgrade_strategy", "==", "\"eager\"", ":", "return", "True", "else", ":", "assert", "self", ".", "upgrade_strategy", "==", "\"only-if-needed\"", "return", "req", ".", "user_supplied", "or", "req", ".", "constraint" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_internal/resolution/legacy/resolver.py#L182-L189
MushroomRL/mushroom-rl
a0eaa2cf8001e433419234a9fc48b64170e3f61c
mushroom_rl/utils/torch.py
python
get_gradient
(params)
return torch.cat(views, 0)
Function used to get the value of the gradient of a set of torch parameters. Args: parameters (list): list of parameters to be considered.
Function used to get the value of the gradient of a set of torch parameters.
[ "Function", "used", "to", "get", "the", "value", "of", "the", "gradient", "of", "a", "set", "of", "torch", "parameters", "." ]
def get_gradient(params): """ Function used to get the value of the gradient of a set of torch parameters. Args: parameters (list): list of parameters to be considered. """ views = [] for p in params: if p.grad is None: view = p.new(p.numel()).zero_() else: view = p.grad.view(-1) views.append(view) return torch.cat(views, 0)
[ "def", "get_gradient", "(", "params", ")", ":", "views", "=", "[", "]", "for", "p", "in", "params", ":", "if", "p", ".", "grad", "is", "None", ":", "view", "=", "p", ".", "new", "(", "p", ".", "numel", "(", ")", ")", ".", "zero_", "(", ")", "else", ":", "view", "=", "p", ".", "grad", ".", "view", "(", "-", "1", ")", "views", ".", "append", "(", "view", ")", "return", "torch", ".", "cat", "(", "views", ",", "0", ")" ]
https://github.com/MushroomRL/mushroom-rl/blob/a0eaa2cf8001e433419234a9fc48b64170e3f61c/mushroom_rl/utils/torch.py#L77-L93
mdiazcl/fuzzbunch-debian
2b76c2249ade83a389ae3badb12a1bd09901fd2c
windows/Resources/Python/Core/Lib/idlelib/PyShell.py
python
ModifiedInterpreter.showsyntaxerror
(self, filename=None)
Extend base class method: Add Colorizing Color the offending position instead of printing it and pointing at it with a caret.
Extend base class method: Add Colorizing Color the offending position instead of printing it and pointing at it with a caret.
[ "Extend", "base", "class", "method", ":", "Add", "Colorizing", "Color", "the", "offending", "position", "instead", "of", "printing", "it", "and", "pointing", "at", "it", "with", "a", "caret", "." ]
def showsyntaxerror(self, filename=None): """Extend base class method: Add Colorizing Color the offending position instead of printing it and pointing at it with a caret. """ text = self.tkconsole.text stuff = self.unpackerror() if stuff: msg, lineno, offset, line = stuff if lineno == 1: pos = 'iomark + %d chars' % (offset - 1) else: pos = 'iomark linestart + %d lines + %d chars' % ( lineno - 1, offset - 1) text.tag_add('ERROR', pos) text.see(pos) char = text.get(pos) if char and char in IDENTCHARS: text.tag_add('ERROR', pos + ' wordstart', pos) self.tkconsole.resetoutput() self.write('SyntaxError: %s\n' % str(msg)) else: self.tkconsole.resetoutput() InteractiveInterpreter.showsyntaxerror(self, filename) self.tkconsole.showprompt()
[ "def", "showsyntaxerror", "(", "self", ",", "filename", "=", "None", ")", ":", "text", "=", "self", ".", "tkconsole", ".", "text", "stuff", "=", "self", ".", "unpackerror", "(", ")", "if", "stuff", ":", "msg", ",", "lineno", ",", "offset", ",", "line", "=", "stuff", "if", "lineno", "==", "1", ":", "pos", "=", "'iomark + %d chars'", "%", "(", "offset", "-", "1", ")", "else", ":", "pos", "=", "'iomark linestart + %d lines + %d chars'", "%", "(", "lineno", "-", "1", ",", "offset", "-", "1", ")", "text", ".", "tag_add", "(", "'ERROR'", ",", "pos", ")", "text", ".", "see", "(", "pos", ")", "char", "=", "text", ".", "get", "(", "pos", ")", "if", "char", "and", "char", "in", "IDENTCHARS", ":", "text", ".", "tag_add", "(", "'ERROR'", ",", "pos", "+", "' wordstart'", ",", "pos", ")", "self", ".", "tkconsole", ".", "resetoutput", "(", ")", "self", ".", "write", "(", "'SyntaxError: %s\\n'", "%", "str", "(", "msg", ")", ")", "else", ":", "self", ".", "tkconsole", ".", "resetoutput", "(", ")", "InteractiveInterpreter", ".", "showsyntaxerror", "(", "self", ",", "filename", ")", "self", ".", "tkconsole", ".", "showprompt", "(", ")" ]
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/idlelib/PyShell.py#L597-L623
ray-project/ray
703c1610348615dcb8c2d141a0c46675084660f5
python/ray/_private/gcs_pubsub.py
python
GcsActorSubscriber.poll
(self, timeout=None)
Polls for new actor messages. Returns: A byte string of function key. None if polling times out or subscriber closed.
Polls for new actor messages.
[ "Polls", "for", "new", "actor", "messages", "." ]
def poll(self, timeout=None) -> Optional[bytes]: """Polls for new actor messages. Returns: A byte string of function key. None if polling times out or subscriber closed. """ with self._lock: self._poll_locked(timeout=timeout) return self._pop_actor(self._queue)
[ "def", "poll", "(", "self", ",", "timeout", "=", "None", ")", "->", "Optional", "[", "bytes", "]", ":", "with", "self", ".", "_lock", ":", "self", ".", "_poll_locked", "(", "timeout", "=", "timeout", ")", "return", "self", ".", "_pop_actor", "(", "self", ".", "_queue", ")" ]
https://github.com/ray-project/ray/blob/703c1610348615dcb8c2d141a0c46675084660f5/python/ray/_private/gcs_pubsub.py#L406-L415
coreemu/core
7e18a7a72023a69a92ad61d87461bd659ba27f7c
daemon/core/api/grpc/client.py
python
CoreGrpcClient.add_session_server
( self, session_id: int, name: str, host: str )
return self.stub.AddSessionServer(request)
Add distributed session server. :param session_id: id of session :param name: name of server to add :param host: host address to connect to :return: response with result of success or failure :raises grpc.RpcError: when session doesn't exist
Add distributed session server.
[ "Add", "distributed", "session", "server", "." ]
def add_session_server( self, session_id: int, name: str, host: str ) -> core_pb2.AddSessionServerResponse: """ Add distributed session server. :param session_id: id of session :param name: name of server to add :param host: host address to connect to :return: response with result of success or failure :raises grpc.RpcError: when session doesn't exist """ request = core_pb2.AddSessionServerRequest( session_id=session_id, name=name, host=host ) return self.stub.AddSessionServer(request)
[ "def", "add_session_server", "(", "self", ",", "session_id", ":", "int", ",", "name", ":", "str", ",", "host", ":", "str", ")", "->", "core_pb2", ".", "AddSessionServerResponse", ":", "request", "=", "core_pb2", ".", "AddSessionServerRequest", "(", "session_id", "=", "session_id", ",", "name", "=", "name", ",", "host", "=", "host", ")", "return", "self", ".", "stub", ".", "AddSessionServer", "(", "request", ")" ]
https://github.com/coreemu/core/blob/7e18a7a72023a69a92ad61d87461bd659ba27f7c/daemon/core/api/grpc/client.py#L431-L446
blawar/nut
2cf351400418399a70164987e28670309f6c9cb5
Fs/Bktr.py
python
Header.printInfo
(self, maxDepth=3, indent=0)
[]
def printInfo(self, maxDepth=3, indent=0): if not self.bktr_size: return tabs = '\t' * indent Print.info('\n%sBKTR' % (tabs)) Print.info('%soffset = %d' % (tabs, self.bktr_offset)) Print.info('%ssize = %d' % (tabs, self.bktr_size)) Print.info('%sentry count = %d' % (tabs, self.enctryCount)) Print.info('\n')
[ "def", "printInfo", "(", "self", ",", "maxDepth", "=", "3", ",", "indent", "=", "0", ")", ":", "if", "not", "self", ".", "bktr_size", ":", "return", "tabs", "=", "'\\t'", "*", "indent", "Print", ".", "info", "(", "'\\n%sBKTR'", "%", "(", "tabs", ")", ")", "Print", ".", "info", "(", "'%soffset = %d'", "%", "(", "tabs", ",", "self", ".", "bktr_offset", ")", ")", "Print", ".", "info", "(", "'%ssize = %d'", "%", "(", "tabs", ",", "self", ".", "bktr_size", ")", ")", "Print", ".", "info", "(", "'%sentry count = %d'", "%", "(", "tabs", ",", "self", ".", "enctryCount", ")", ")", "Print", ".", "info", "(", "'\\n'", ")" ]
https://github.com/blawar/nut/blob/2cf351400418399a70164987e28670309f6c9cb5/Fs/Bktr.py#L48-L58
galaxyproject/galaxy
4c03520f05062e0f4a1b3655dc0b7452fda69943
lib/galaxy/objectstore/__init__.py
python
ObjectStore.get_store_by
(self, obj)
Return how object is stored (by 'uuid', 'id', or None if not yet saved). Certain Galaxy remote data features aren't available if objects are stored by 'id'.
Return how object is stored (by 'uuid', 'id', or None if not yet saved).
[ "Return", "how", "object", "is", "stored", "(", "by", "uuid", "id", "or", "None", "if", "not", "yet", "saved", ")", "." ]
def get_store_by(self, obj): """Return how object is stored (by 'uuid', 'id', or None if not yet saved). Certain Galaxy remote data features aren't available if objects are stored by 'id'. """ raise NotImplementedError()
[ "def", "get_store_by", "(", "self", ",", "obj", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/galaxyproject/galaxy/blob/4c03520f05062e0f4a1b3655dc0b7452fda69943/lib/galaxy/objectstore/__init__.py#L208-L213
namisan/mt-dnn
8564c8cfa971391187bd699116fbe4388438d62d
hnn/src/apps/hnn_model.py
python
BiLinearSim.forward
(self, src, tgt)
return output
[]
def forward(self, src, tgt): src_ = self.linear(src) output = torch.matmul(src_, tgt.transpose(2,1)) return output
[ "def", "forward", "(", "self", ",", "src", ",", "tgt", ")", ":", "src_", "=", "self", ".", "linear", "(", "src", ")", "output", "=", "torch", ".", "matmul", "(", "src_", ",", "tgt", ".", "transpose", "(", "2", ",", "1", ")", ")", "return", "output" ]
https://github.com/namisan/mt-dnn/blob/8564c8cfa971391187bd699116fbe4388438d62d/hnn/src/apps/hnn_model.py#L79-L82
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/calendar.py
python
TextCalendar.formatweekheader
(self, width)
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
Return a header for a week.
Return a header for a week.
[ "Return", "a", "header", "for", "a", "week", "." ]
def formatweekheader(self, width): """ Return a header for a week. """ return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
[ "def", "formatweekheader", "(", "self", ",", "width", ")", ":", "return", "' '", ".", "join", "(", "self", ".", "formatweekday", "(", "i", ",", "width", ")", "for", "i", "in", "self", ".", "iterweekdays", "(", ")", ")" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/calendar.py#L297-L301
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/attack/rfi.py
python
RFIShell.execute
(self, command)
This method is called when a user writes a command in the shell and hits enter. Before calling this method, the framework calls the generic_user_input method from the shell class. :param command: The command to handle ( ie. "read", "exec", etc ). :return: The result of the command.
This method is called when a user writes a command in the shell and hits enter.
[ "This", "method", "is", "called", "when", "a", "user", "writes", "a", "command", "in", "the", "shell", "and", "hits", "enter", "." ]
def execute(self, command): """ This method is called when a user writes a command in the shell and hits enter. Before calling this method, the framework calls the generic_user_input method from the shell class. :param command: The command to handle ( ie. "read", "exec", etc ). :return: The result of the command. """ mutant = self._exploit_mutant.copy() uri = mutant.get_uri() uri.querystring.update([('cmd', [command])]) try: http_res = self._uri_opener.send_mutant(mutant) except BaseFrameworkException, w3: return 'Exception from the remote web application: "%s"' % w3 except Exception, e: return 'Unhandled exception from the remote web application: "%s"' % e else: return shell_handler.extract_result(http_res.get_body())
[ "def", "execute", "(", "self", ",", "command", ")", ":", "mutant", "=", "self", ".", "_exploit_mutant", ".", "copy", "(", ")", "uri", "=", "mutant", ".", "get_uri", "(", ")", "uri", ".", "querystring", ".", "update", "(", "[", "(", "'cmd'", ",", "[", "command", "]", ")", "]", ")", "try", ":", "http_res", "=", "self", ".", "_uri_opener", ".", "send_mutant", "(", "mutant", ")", "except", "BaseFrameworkException", ",", "w3", ":", "return", "'Exception from the remote web application: \"%s\"'", "%", "w3", "except", "Exception", ",", "e", ":", "return", "'Unhandled exception from the remote web application: \"%s\"'", "%", "e", "else", ":", "return", "shell_handler", ".", "extract_result", "(", "http_res", ".", "get_body", "(", ")", ")" ]
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/rfi.py#L466-L488
mozilla-services/socorro
8bff4a90e9e3320eabe7e067adbe0e89f6a39ba7
socorro/lib/sentry_client.py
python
capture_error
(logger=None, exc_info=None, extra=None)
Capture an error in sentry if enabled. :arg logger: the logger to use :arg exc_info: the exception information as a tuple like from `sys.exc_info` :arg extra: any extra information to send along as a dict
Capture an error in sentry if enabled.
[ "Capture", "an", "error", "in", "sentry", "if", "enabled", "." ]
def capture_error(logger=None, exc_info=None, extra=None): """Capture an error in sentry if enabled. :arg logger: the logger to use :arg exc_info: the exception information as a tuple like from `sys.exc_info` :arg extra: any extra information to send along as a dict """ logger = logger or logging.getLogger(__name__) exc_info = exc_info or sys.exc_info() if is_enabled(): extra = extra or {} try: # Get the configured Sentry hub hub = get_hub() with sentry_sdk.push_scope() as scope: for key, value in extra.items(): scope.set_extra(key, value) # Send the exception. identifier = hub.capture_exception(error=exc_info) logger.info("Error captured in Sentry! Reference: %s" % identifier) # At this point, if everything is good, the exceptions were # successfully sent to sentry and we can return. return except Exception: # Log the exception from trying to send the error to Sentry. logger.error("Unable to report error with Sentry", exc_info=True) # Sentry isn't configured or it's busted, so log the error we got that we # wanted to capture. logger.warning("Sentry DSN is not configured and an exception happened") logger.error("Exception occurred", exc_info=exc_info)
[ "def", "capture_error", "(", "logger", "=", "None", ",", "exc_info", "=", "None", ",", "extra", "=", "None", ")", ":", "logger", "=", "logger", "or", "logging", ".", "getLogger", "(", "__name__", ")", "exc_info", "=", "exc_info", "or", "sys", ".", "exc_info", "(", ")", "if", "is_enabled", "(", ")", ":", "extra", "=", "extra", "or", "{", "}", "try", ":", "# Get the configured Sentry hub", "hub", "=", "get_hub", "(", ")", "with", "sentry_sdk", ".", "push_scope", "(", ")", "as", "scope", ":", "for", "key", ",", "value", "in", "extra", ".", "items", "(", ")", ":", "scope", ".", "set_extra", "(", "key", ",", "value", ")", "# Send the exception.", "identifier", "=", "hub", ".", "capture_exception", "(", "error", "=", "exc_info", ")", "logger", ".", "info", "(", "\"Error captured in Sentry! Reference: %s\"", "%", "identifier", ")", "# At this point, if everything is good, the exceptions were", "# successfully sent to sentry and we can return.", "return", "except", "Exception", ":", "# Log the exception from trying to send the error to Sentry.", "logger", ".", "error", "(", "\"Unable to report error with Sentry\"", ",", "exc_info", "=", "True", ")", "# Sentry isn't configured or it's busted, so log the error we got that we", "# wanted to capture.", "logger", ".", "warning", "(", "\"Sentry DSN is not configured and an exception happened\"", ")", "logger", ".", "error", "(", "\"Exception occurred\"", ",", "exc_info", "=", "exc_info", ")" ]
https://github.com/mozilla-services/socorro/blob/8bff4a90e9e3320eabe7e067adbe0e89f6a39ba7/socorro/lib/sentry_client.py#L29-L66
sympy/sympy
d822fcba181155b85ff2b29fe525adbafb22b448
sympy/matrices/matrices.py
python
MatrixBase.irregular
(cls, ntop, *matrices, **kwargs)
return cls._new(rows)
Return a matrix filled by the given matrices which are listed in order of appearance from left to right, top to bottom as they first appear in the matrix. They must fill the matrix completely. Examples ======== >>> from sympy import ones, Matrix >>> Matrix.irregular(3, ones(2,1), ones(3,3)*2, ones(2,2)*3, ... ones(1,1)*4, ones(2,2)*5, ones(1,2)*6, ones(1,2)*7) Matrix([ [1, 2, 2, 2, 3, 3], [1, 2, 2, 2, 3, 3], [4, 2, 2, 2, 5, 5], [6, 6, 7, 7, 5, 5]])
Return a matrix filled by the given matrices which are listed in order of appearance from left to right, top to bottom as they first appear in the matrix. They must fill the matrix completely.
[ "Return", "a", "matrix", "filled", "by", "the", "given", "matrices", "which", "are", "listed", "in", "order", "of", "appearance", "from", "left", "to", "right", "top", "to", "bottom", "as", "they", "first", "appear", "in", "the", "matrix", ".", "They", "must", "fill", "the", "matrix", "completely", "." ]
def irregular(cls, ntop, *matrices, **kwargs): """Return a matrix filled by the given matrices which are listed in order of appearance from left to right, top to bottom as they first appear in the matrix. They must fill the matrix completely. Examples ======== >>> from sympy import ones, Matrix >>> Matrix.irregular(3, ones(2,1), ones(3,3)*2, ones(2,2)*3, ... ones(1,1)*4, ones(2,2)*5, ones(1,2)*6, ones(1,2)*7) Matrix([ [1, 2, 2, 2, 3, 3], [1, 2, 2, 2, 3, 3], [4, 2, 2, 2, 5, 5], [6, 6, 7, 7, 5, 5]]) """ ntop = as_int(ntop) # make sure we are working with explicit matrices b = [i.as_explicit() if hasattr(i, 'as_explicit') else i for i in matrices] q = list(range(len(b))) dat = [i.rows for i in b] active = [q.pop(0) for _ in range(ntop)] cols = sum([b[i].cols for i in active]) rows = [] while any(dat): r = [] for a, j in enumerate(active): r.extend(b[j][-dat[j], :]) dat[j] -= 1 if dat[j] == 0 and q: active[a] = q.pop(0) if len(r) != cols: raise ValueError(filldedent(''' Matrices provided do not appear to fill the space completely.''')) rows.append(r) return cls._new(rows)
[ "def", "irregular", "(", "cls", ",", "ntop", ",", "*", "matrices", ",", "*", "*", "kwargs", ")", ":", "ntop", "=", "as_int", "(", "ntop", ")", "# make sure we are working with explicit matrices", "b", "=", "[", "i", ".", "as_explicit", "(", ")", "if", "hasattr", "(", "i", ",", "'as_explicit'", ")", "else", "i", "for", "i", "in", "matrices", "]", "q", "=", "list", "(", "range", "(", "len", "(", "b", ")", ")", ")", "dat", "=", "[", "i", ".", "rows", "for", "i", "in", "b", "]", "active", "=", "[", "q", ".", "pop", "(", "0", ")", "for", "_", "in", "range", "(", "ntop", ")", "]", "cols", "=", "sum", "(", "[", "b", "[", "i", "]", ".", "cols", "for", "i", "in", "active", "]", ")", "rows", "=", "[", "]", "while", "any", "(", "dat", ")", ":", "r", "=", "[", "]", "for", "a", ",", "j", "in", "enumerate", "(", "active", ")", ":", "r", ".", "extend", "(", "b", "[", "j", "]", "[", "-", "dat", "[", "j", "]", ",", ":", "]", ")", "dat", "[", "j", "]", "-=", "1", "if", "dat", "[", "j", "]", "==", "0", "and", "q", ":", "active", "[", "a", "]", "=", "q", ".", "pop", "(", "0", ")", "if", "len", "(", "r", ")", "!=", "cols", ":", "raise", "ValueError", "(", "filldedent", "(", "'''\n Matrices provided do not appear to fill\n the space completely.'''", ")", ")", "rows", ".", "append", "(", "r", ")", "return", "cls", ".", "_new", "(", "rows", ")" ]
https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/matrices/matrices.py#L868-L907
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/databases/cremona.py
python
LargeCremonaDatabase._init_degphi
(self, ftpdata, largest_conductor=0)
Initialize the degphi table by reading the corresponding ftpdata files and importing them into the database. To create the large database from Cremona's text files, see sage.databases.cremona.build, do NOT run this method directly. EXAMPLES:: sage: d = sage.databases.cremona.LargeCremonaDatabase(name='cremona', read_only=False, rebuild=True) # not tested sage: d._init_degphi('.') # not tested
Initialize the degphi table by reading the corresponding ftpdata files and importing them into the database.
[ "Initialize", "the", "degphi", "table", "by", "reading", "the", "corresponding", "ftpdata", "files", "and", "importing", "them", "into", "the", "database", "." ]
def _init_degphi(self, ftpdata, largest_conductor=0): """ Initialize the degphi table by reading the corresponding ftpdata files and importing them into the database. To create the large database from Cremona's text files, see sage.databases.cremona.build, do NOT run this method directly. EXAMPLES:: sage: d = sage.databases.cremona.LargeCremonaDatabase(name='cremona', read_only=False, rebuild=True) # not tested sage: d._init_degphi('.') # not tested """ if self.__read_only__: raise RuntimeError("The database must not be read_only.") files = sorted(os.listdir(ftpdata)) name = "degphi" con = self.get_connection() for F in files: if not F[:len(name)] == name: continue print("Inserting", F) class_data = [] for L in open(ftpdata + "/" + F).readlines(): N, iso, num, degree, primes, curve = L.split() if largest_conductor and int(N) > largest_conductor: break class_data.append((degree,N+iso)) con.executemany('UPDATE t_class SET deg=? WHERE class=?', class_data) print("Committing...") self.commit() if largest_conductor and int(N) > largest_conductor: break
[ "def", "_init_degphi", "(", "self", ",", "ftpdata", ",", "largest_conductor", "=", "0", ")", ":", "if", "self", ".", "__read_only__", ":", "raise", "RuntimeError", "(", "\"The database must not be read_only.\"", ")", "files", "=", "sorted", "(", "os", ".", "listdir", "(", "ftpdata", ")", ")", "name", "=", "\"degphi\"", "con", "=", "self", ".", "get_connection", "(", ")", "for", "F", "in", "files", ":", "if", "not", "F", "[", ":", "len", "(", "name", ")", "]", "==", "name", ":", "continue", "print", "(", "\"Inserting\"", ",", "F", ")", "class_data", "=", "[", "]", "for", "L", "in", "open", "(", "ftpdata", "+", "\"/\"", "+", "F", ")", ".", "readlines", "(", ")", ":", "N", ",", "iso", ",", "num", ",", "degree", ",", "primes", ",", "curve", "=", "L", ".", "split", "(", ")", "if", "largest_conductor", "and", "int", "(", "N", ")", ">", "largest_conductor", ":", "break", "class_data", ".", "append", "(", "(", "degree", ",", "N", "+", "iso", ")", ")", "con", ".", "executemany", "(", "'UPDATE t_class SET deg=? WHERE class=?'", ",", "class_data", ")", "print", "(", "\"Committing...\"", ")", "self", ".", "commit", "(", ")", "if", "largest_conductor", "and", "int", "(", "N", ")", ">", "largest_conductor", ":", "break" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/databases/cremona.py#L1566-L1599
dmlc/gluon-nlp
5d4bc9eba7226ea9f9aabbbd39e3b1e886547e48
scripts/machine_translation/evaluate_transformer.py
python
get_base_tokenizer
(method, lang)
The base tokenization method Parameters ---------- method lang Returns -------
The base tokenization method
[ "The", "base", "tokenization", "method" ]
def get_base_tokenizer(method, lang): """The base tokenization method Parameters ---------- method lang Returns ------- """ if method == 'moses': return tokenizers.create('moses', lang) elif method == 'whitespace': return tokenizers.create('whitespace') elif method == 'no': return None else: raise NotImplementedError
[ "def", "get_base_tokenizer", "(", "method", ",", "lang", ")", ":", "if", "method", "==", "'moses'", ":", "return", "tokenizers", ".", "create", "(", "'moses'", ",", "lang", ")", "elif", "method", "==", "'whitespace'", ":", "return", "tokenizers", ".", "create", "(", "'whitespace'", ")", "elif", "method", "==", "'no'", ":", "return", "None", "else", ":", "raise", "NotImplementedError" ]
https://github.com/dmlc/gluon-nlp/blob/5d4bc9eba7226ea9f9aabbbd39e3b1e886547e48/scripts/machine_translation/evaluate_transformer.py#L163-L183
vmware/vsphere-automation-sdk-python
ba7d4e0742f58a641dfed9538ecbbb1db4f3891e
samples/vsphere/common/vim/datastore_file.py
python
File.__init__
(self, parent=None, path=None, ftype=None)
[]
def __init__(self, parent=None, path=None, ftype=None): self._file_manager = None if isinstance(parent, vim.Datastore): # Iteratively look for the Datacenter parent self._datacenter_mo = get_datacenter_for_datastore(parent) self._datastore_mo = parent self._ftype = FOLDER if path: self._path = path else: self._path = '' elif isinstance(parent, File): self._datacenter_mo = parent._datacenter_mo self._datastore_mo = parent.datastore_mo self._ftype = ftype if parent._path == '': self._path = path else: self._path = '{}/{}'.format(parent._path, path) else: raise Exception( "Invalid type '{}' for datastore_file".format(type(parent)))
[ "def", "__init__", "(", "self", ",", "parent", "=", "None", ",", "path", "=", "None", ",", "ftype", "=", "None", ")", ":", "self", ".", "_file_manager", "=", "None", "if", "isinstance", "(", "parent", ",", "vim", ".", "Datastore", ")", ":", "# Iteratively look for the Datacenter parent", "self", ".", "_datacenter_mo", "=", "get_datacenter_for_datastore", "(", "parent", ")", "self", ".", "_datastore_mo", "=", "parent", "self", ".", "_ftype", "=", "FOLDER", "if", "path", ":", "self", ".", "_path", "=", "path", "else", ":", "self", ".", "_path", "=", "''", "elif", "isinstance", "(", "parent", ",", "File", ")", ":", "self", ".", "_datacenter_mo", "=", "parent", ".", "_datacenter_mo", "self", ".", "_datastore_mo", "=", "parent", ".", "datastore_mo", "self", ".", "_ftype", "=", "ftype", "if", "parent", ".", "_path", "==", "''", ":", "self", ".", "_path", "=", "path", "else", ":", "self", ".", "_path", "=", "'{}/{}'", ".", "format", "(", "parent", ".", "_path", ",", "path", ")", "else", ":", "raise", "Exception", "(", "\"Invalid type '{}' for datastore_file\"", ".", "format", "(", "type", "(", "parent", ")", ")", ")" ]
https://github.com/vmware/vsphere-automation-sdk-python/blob/ba7d4e0742f58a641dfed9538ecbbb1db4f3891e/samples/vsphere/common/vim/datastore_file.py#L102-L123
materialsproject/fireworks
83a907c19baf2a5c9fdcf63996f9797c3c85b785
fireworks/core/launchpad.py
python
LaunchPad.change_launch_dir
(self, launch_id, launch_dir)
Change the launch directory corresponding to the given launch id. Args: launch_id (int) launch_dir (str): path to the new launch directory.
Change the launch directory corresponding to the given launch id.
[ "Change", "the", "launch", "directory", "corresponding", "to", "the", "given", "launch", "id", "." ]
def change_launch_dir(self, launch_id, launch_dir): """ Change the launch directory corresponding to the given launch id. Args: launch_id (int) launch_dir (str): path to the new launch directory. """ m_launch = self.get_launch_by_id(launch_id) m_launch.launch_dir = launch_dir self.launches.find_one_and_replace({"launch_id": m_launch.launch_id}, m_launch.to_db_dict(), upsert=True)
[ "def", "change_launch_dir", "(", "self", ",", "launch_id", ",", "launch_dir", ")", ":", "m_launch", "=", "self", ".", "get_launch_by_id", "(", "launch_id", ")", "m_launch", ".", "launch_dir", "=", "launch_dir", "self", ".", "launches", ".", "find_one_and_replace", "(", "{", "\"launch_id\"", ":", "m_launch", ".", "launch_id", "}", ",", "m_launch", ".", "to_db_dict", "(", ")", ",", "upsert", "=", "True", ")" ]
https://github.com/materialsproject/fireworks/blob/83a907c19baf2a5c9fdcf63996f9797c3c85b785/fireworks/core/launchpad.py#L1500-L1510
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py
python
MSExecutionContext.get_result_proxy
(self)
[]
def get_result_proxy(self): if self._result_proxy: return self._result_proxy else: return engine.ResultProxy(self)
[ "def", "get_result_proxy", "(", "self", ")", ":", "if", "self", ".", "_result_proxy", ":", "return", "self", ".", "_result_proxy", "else", ":", "return", "engine", ".", "ResultProxy", "(", "self", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/base.py#L1233-L1237
mit-han-lab/data-efficient-gans
6858275f08f43a33026844c8c2ac4e703e8a07ba
DiffAugment-stylegan2/training/dataset_tool.py
python
TFRecordExporter.add_image
(self, img)
[]
def add_image(self, img): if self.print_progress and self.cur_images % self.progress_interval == 0: print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True) if self.shape is None: self.set_shape(img.shape) assert img.shape == self.shape for lod, tfr_writer in enumerate(self.tfr_writers): if lod: img = img.astype(np.float32) img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25 quant = np.rint(img).clip(0, 255).astype(np.uint8) ex = tf.train.Example(features=tf.train.Features(feature={ 'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)), 'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))})) tfr_writer.write(ex.SerializeToString()) self.cur_images += 1
[ "def", "add_image", "(", "self", ",", "img", ")", ":", "if", "self", ".", "print_progress", "and", "self", ".", "cur_images", "%", "self", ".", "progress_interval", "==", "0", ":", "print", "(", "'%d / %d\\r'", "%", "(", "self", ".", "cur_images", ",", "self", ".", "expected_images", ")", ",", "end", "=", "''", ",", "flush", "=", "True", ")", "if", "self", ".", "shape", "is", "None", ":", "self", ".", "set_shape", "(", "img", ".", "shape", ")", "assert", "img", ".", "shape", "==", "self", ".", "shape", "for", "lod", ",", "tfr_writer", "in", "enumerate", "(", "self", ".", "tfr_writers", ")", ":", "if", "lod", ":", "img", "=", "img", ".", "astype", "(", "np", ".", "float32", ")", "img", "=", "(", "img", "[", ":", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", "]", "+", "img", "[", ":", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", "]", "+", "img", "[", ":", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", "]", "+", "img", "[", ":", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", "]", ")", "*", "0.25", "quant", "=", "np", ".", "rint", "(", "img", ")", ".", "clip", "(", "0", ",", "255", ")", ".", "astype", "(", "np", ".", "uint8", ")", "ex", "=", "tf", ".", "train", ".", "Example", "(", "features", "=", "tf", ".", "train", ".", "Features", "(", "feature", "=", "{", "'shape'", ":", "tf", ".", "train", ".", "Feature", "(", "int64_list", "=", "tf", ".", "train", ".", "Int64List", "(", "value", "=", "quant", ".", "shape", ")", ")", ",", "'data'", ":", "tf", ".", "train", ".", "Feature", "(", "bytes_list", "=", "tf", ".", "train", ".", "BytesList", "(", "value", "=", "[", "quant", ".", "tostring", "(", ")", "]", ")", ")", "}", ")", ")", "tfr_writer", ".", "write", "(", "ex", ".", "SerializeToString", "(", ")", ")", "self", ".", "cur_images", "+=", "1" ]
https://github.com/mit-han-lab/data-efficient-gans/blob/6858275f08f43a33026844c8c2ac4e703e8a07ba/DiffAugment-stylegan2/training/dataset_tool.py#L74-L89
amiller/HoneyBadgerBFT
78b66448a00c2ab0645d7a70a62f687e62ae27f4
core/broadcasts.py
python
shared_coin
(instance, pid, N, t, broadcast, receive)
return getCoin
A dummy version of the Shared Coin :param pid: my id number :param N: the number of parties :param t: the number of byzantine parties :param broadcast: broadcast channel :param receive: receive channel :return: yield values b
A dummy version of the Shared Coin :param pid: my id number :param N: the number of parties :param t: the number of byzantine parties :param broadcast: broadcast channel :param receive: receive channel :return: yield values b
[ "A", "dummy", "version", "of", "the", "Shared", "Coin", ":", "param", "pid", ":", "my", "id", "number", ":", "param", "N", ":", "the", "number", "of", "parties", ":", "param", "t", ":", "the", "number", "of", "byzantine", "parties", ":", "param", "broadcast", ":", "broadcast", "channel", ":", "param", "receive", ":", "receive", "channel", ":", "return", ":", "yield", "values", "b" ]
def shared_coin(instance, pid, N, t, broadcast, receive): ''' A dummy version of the Shared Coin :param pid: my id number :param N: the number of parties :param t: the number of byzantine parties :param broadcast: broadcast channel :param receive: receive channel :return: yield values b ''' received = defaultdict(set) outputQueue = defaultdict(lambda: Queue(1)) PK, SKs = getKeys() def _recv(): while True: # New shares for some round r (i, (r, sig)) = receive() assert i in range(N) assert r >= 0 received[r].add((i, serialize(sig))) # After reaching the threshold, compute the output and # make it available locally if len(received[r]) == t + 1: h = PK.hash_message(str((r, instance))) def tmpFunc(r, t): # Verify and get the combined signature s = combine_and_verify(h, dict(tuple((t, deserialize1(sig)) for t, sig in received[r])[:t+1])) outputQueue[r].put(ord(s[0]) & 1) # explicitly convert to int Greenlet( tmpFunc, r, t ).start() greenletPacker(Greenlet(_recv), 'shared_coin_dummy', (pid, N, t, broadcast, receive)).start() def getCoin(round): broadcast((round, SKs[pid].sign(PK.hash_message(str((round,instance)))))) # I have to do mapping to 1..l return outputQueue[round].get() return getCoin
[ "def", "shared_coin", "(", "instance", ",", "pid", ",", "N", ",", "t", ",", "broadcast", ",", "receive", ")", ":", "received", "=", "defaultdict", "(", "set", ")", "outputQueue", "=", "defaultdict", "(", "lambda", ":", "Queue", "(", "1", ")", ")", "PK", ",", "SKs", "=", "getKeys", "(", ")", "def", "_recv", "(", ")", ":", "while", "True", ":", "# New shares for some round r", "(", "i", ",", "(", "r", ",", "sig", ")", ")", "=", "receive", "(", ")", "assert", "i", "in", "range", "(", "N", ")", "assert", "r", ">=", "0", "received", "[", "r", "]", ".", "add", "(", "(", "i", ",", "serialize", "(", "sig", ")", ")", ")", "# After reaching the threshold, compute the output and", "# make it available locally", "if", "len", "(", "received", "[", "r", "]", ")", "==", "t", "+", "1", ":", "h", "=", "PK", ".", "hash_message", "(", "str", "(", "(", "r", ",", "instance", ")", ")", ")", "def", "tmpFunc", "(", "r", ",", "t", ")", ":", "# Verify and get the combined signature", "s", "=", "combine_and_verify", "(", "h", ",", "dict", "(", "tuple", "(", "(", "t", ",", "deserialize1", "(", "sig", ")", ")", "for", "t", ",", "sig", "in", "received", "[", "r", "]", ")", "[", ":", "t", "+", "1", "]", ")", ")", "outputQueue", "[", "r", "]", ".", "put", "(", "ord", "(", "s", "[", "0", "]", ")", "&", "1", ")", "# explicitly convert to int", "Greenlet", "(", "tmpFunc", ",", "r", ",", "t", ")", ".", "start", "(", ")", "greenletPacker", "(", "Greenlet", "(", "_recv", ")", ",", "'shared_coin_dummy'", ",", "(", "pid", ",", "N", ",", "t", ",", "broadcast", ",", "receive", ")", ")", ".", "start", "(", ")", "def", "getCoin", "(", "round", ")", ":", "broadcast", "(", "(", "round", ",", "SKs", "[", "pid", "]", ".", "sign", "(", "PK", ".", "hash_message", "(", "str", "(", "(", "round", ",", "instance", ")", ")", ")", ")", ")", ")", "# I have to do mapping to 1..l", "return", "outputQueue", "[", "round", "]", ".", "get", "(", ")", "return", "getCoin" ]
https://github.com/amiller/HoneyBadgerBFT/blob/78b66448a00c2ab0645d7a70a62f687e62ae27f4/core/broadcasts.py#L78-L117
netaddr/netaddr
e84688f7034b7a88ac00a676359be57eb7a78184
netaddr/ip/__init__.py
python
smallest_matching_cidr
(ip, cidrs)
return match
Matches an IP address or subnet against a given sequence of IP addresses and subnets. :param ip: a single IP address or subnet. :param cidrs: a sequence of IP addresses and/or subnets. :return: the smallest (most specific) matching IPAddress or IPNetwork object from the provided sequence, None if there was no match.
Matches an IP address or subnet against a given sequence of IP addresses and subnets.
[ "Matches", "an", "IP", "address", "or", "subnet", "against", "a", "given", "sequence", "of", "IP", "addresses", "and", "subnets", "." ]
def smallest_matching_cidr(ip, cidrs): """ Matches an IP address or subnet against a given sequence of IP addresses and subnets. :param ip: a single IP address or subnet. :param cidrs: a sequence of IP addresses and/or subnets. :return: the smallest (most specific) matching IPAddress or IPNetwork object from the provided sequence, None if there was no match. """ match = None if not hasattr(cidrs, '__iter__'): raise TypeError('IP address/subnet sequence expected, not %r!' % (cidrs,)) ip = IPAddress(ip) for cidr in sorted([IPNetwork(cidr) for cidr in cidrs]): if ip in cidr: match = cidr else: if match is not None and cidr.network not in match: break return match
[ "def", "smallest_matching_cidr", "(", "ip", ",", "cidrs", ")", ":", "match", "=", "None", "if", "not", "hasattr", "(", "cidrs", ",", "'__iter__'", ")", ":", "raise", "TypeError", "(", "'IP address/subnet sequence expected, not %r!'", "%", "(", "cidrs", ",", ")", ")", "ip", "=", "IPAddress", "(", "ip", ")", "for", "cidr", "in", "sorted", "(", "[", "IPNetwork", "(", "cidr", ")", "for", "cidr", "in", "cidrs", "]", ")", ":", "if", "ip", "in", "cidr", ":", "match", "=", "cidr", "else", ":", "if", "match", "is", "not", "None", "and", "cidr", ".", "network", "not", "in", "match", ":", "break", "return", "match" ]
https://github.com/netaddr/netaddr/blob/e84688f7034b7a88ac00a676359be57eb7a78184/netaddr/ip/__init__.py#L1831-L1857
vivisect/vivisect
37b0b655d8dedfcf322e86b0f144b096e48d547e
vstruct/defs/dns.py
python
DnsMessage.getAuthorityRecords
(self)
return self._getResourceRecords(structure=self.section.authority)
Return a list of Authority records as (rrtype, dnsclass, ttl, fqdn, rdata) tuples. If a parser is available for the dnsclass, the 'rdata' field will be further parsed into its components (as a tuple if necessary).
Return a list of Authority records as (rrtype, dnsclass, ttl, fqdn, rdata) tuples. If a parser is available for the dnsclass, the 'rdata' field will be further parsed into its components (as a tuple if necessary).
[ "Return", "a", "list", "of", "Authority", "records", "as", "(", "rrtype", "dnsclass", "ttl", "fqdn", "rdata", ")", "tuples", ".", "If", "a", "parser", "is", "available", "for", "the", "dnsclass", "the", "rdata", "field", "will", "be", "further", "parsed", "into", "its", "components", "(", "as", "a", "tuple", "if", "necessary", ")", "." ]
def getAuthorityRecords(self): ''' Return a list of Authority records as (rrtype, dnsclass, ttl, fqdn, rdata) tuples. If a parser is available for the dnsclass, the 'rdata' field will be further parsed into its components (as a tuple if necessary). ''' return self._getResourceRecords(structure=self.section.authority)
[ "def", "getAuthorityRecords", "(", "self", ")", ":", "return", "self", ".", "_getResourceRecords", "(", "structure", "=", "self", ".", "section", ".", "authority", ")" ]
https://github.com/vivisect/vivisect/blob/37b0b655d8dedfcf322e86b0f144b096e48d547e/vstruct/defs/dns.py#L484-L491
iclavera/learning_to_adapt
bd7d99ba402521c96631e7d09714128f549db0f1
learning_to_adapt/mujoco_py/glfw.py
python
set_window_size
(window, width, height)
Sets the size of the client area of the specified window. Wrapper for: void glfwSetWindowSize(GLFWwindow* window, int width, int height);
Sets the size of the client area of the specified window.
[ "Sets", "the", "size", "of", "the", "client", "area", "of", "the", "specified", "window", "." ]
def set_window_size(window, width, height): ''' Sets the size of the client area of the specified window. Wrapper for: void glfwSetWindowSize(GLFWwindow* window, int width, int height); ''' _glfw.glfwSetWindowSize(window, width, height)
[ "def", "set_window_size", "(", "window", ",", "width", ",", "height", ")", ":", "_glfw", ".", "glfwSetWindowSize", "(", "window", ",", "width", ",", "height", ")" ]
https://github.com/iclavera/learning_to_adapt/blob/bd7d99ba402521c96631e7d09714128f549db0f1/learning_to_adapt/mujoco_py/glfw.py#L896-L903
pyscf/pyscf
0adfb464333f5ceee07b664f291d4084801bae64
pyscf/gto/mole.py
python
offset_2c_by_atom
(mol)
return aoslice_by_atom(mol, mol.ao_loc_2c())
2-component AO offset for each atom. Return a list, each item of the list gives (start-shell-id, stop-shell-id, start-AO-id, stop-AO-id)
2-component AO offset for each atom. Return a list, each item of the list gives (start-shell-id, stop-shell-id, start-AO-id, stop-AO-id)
[ "2", "-", "component", "AO", "offset", "for", "each", "atom", ".", "Return", "a", "list", "each", "item", "of", "the", "list", "gives", "(", "start", "-", "shell", "-", "id", "stop", "-", "shell", "-", "id", "start", "-", "AO", "-", "id", "stop", "-", "AO", "-", "id", ")" ]
def offset_2c_by_atom(mol): '''2-component AO offset for each atom. Return a list, each item of the list gives (start-shell-id, stop-shell-id, start-AO-id, stop-AO-id) ''' return aoslice_by_atom(mol, mol.ao_loc_2c())
[ "def", "offset_2c_by_atom", "(", "mol", ")", ":", "return", "aoslice_by_atom", "(", "mol", ",", "mol", ".", "ao_loc_2c", "(", ")", ")" ]
https://github.com/pyscf/pyscf/blob/0adfb464333f5ceee07b664f291d4084801bae64/pyscf/gto/mole.py#L1648-L1652
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/op2/tables/oes_stressStrain/oes.py
python
OES._oes_cbar_34
(self, data: bytes, ndata: int, dt: Any, is_magnitude_phase: bool, result_type: str, prefix: str, postfix: str)
return n, nelements, ntotal
reads stress/strain for element type: - 34 : CBAR
reads stress/strain for element type: - 34 : CBAR
[ "reads", "stress", "/", "strain", "for", "element", "type", ":", "-", "34", ":", "CBAR" ]
def _oes_cbar_34(self, data: bytes, ndata: int, dt: Any, is_magnitude_phase: bool, result_type: str, prefix: str, postfix: str) -> Tuple[int, int, int]: """ reads stress/strain for element type: - 34 : CBAR """ op2 = self.op2 #if isinstance(op2.nonlinear_factor, float): #op2.sort_bits[0] = 1 # sort2 #op2.sort_method = 2 n = 0 if op2.is_stress: result_name = prefix + 'cbar_stress' + postfix else: result_name = prefix + 'cbar_strain' + postfix if op2._results.is_not_saved(result_name): return ndata, None, None op2._results._found_result(result_name) slot = op2.get_result(result_name) if result_type == 0 and op2.num_wide == 16: # real if op2.is_stress: obj_vector_real = RealBarStressArray else: obj_vector_real = RealBarStrainArray ntotal = 64 * self.factor # 16*4 nelements = ndata // ntotal #print('CBAR nelements =', nelements) auto_return, is_vectorized = op2._create_oes_object4( nelements, result_name, slot, obj_vector_real) if auto_return: return ndata, None, None if op2.is_debug_file: op2.binary_debug.write(' [cap, element1, element2, ..., cap]\n') #op2.binary_debug.write(' cap = %i # assume 1 cap when there could have been multiple\n' % ndata) op2.binary_debug.write(' #elementi = [eid_device, s1a, s2a, s3a, s4a, axial, smaxa, smina, MSt,\n') op2.binary_debug.write(' s1b, s2b, s3b, s4b, smaxb, sminb, MSc]\n') op2.binary_debug.write(' nelements=%i; nnodes=1 # centroid\n' % nelements) obj = op2.obj if op2.use_vector and is_vectorized and op2.sort_method == 1: # self.itime = 0 # self.ielement = 0 # self.itotal = 0 #self.ntimes = 0 #self.nelements = 0 n = nelements * op2.num_wide * 4 ielement = obj.ielement ielement2 = ielement + nelements obj._times[obj.itime] = dt self.obj_set_element(obj, ielement, ielement2, data, nelements) floats = frombuffer(data, dtype=op2.fdtype8).reshape(nelements, 16) #[s1a, s2a, s3a, s4a, axial, smaxa, smina, margin_tension, # s1b, s2b, s3b, s4b, smaxb, sminb, margin_compression] obj.data[obj.itime, ielement:ielement2, :] = floats[:, 1:].copy() obj.itotal = ielement2 obj.ielement = ielement2 else: if is_vectorized and op2.use_vector: # pragma: no cover op2.log.debug('vectorize CBAR real SORT%s' % op2.sort_method) n = oes_cbar_real_16(op2, data, obj, nelements, ntotal, dt) elif result_type == 1 and op2.num_wide == 19: # imag if op2.is_stress: obj_vector_complex = ComplexBarStressArray else: obj_vector_complex = ComplexBarStrainArray ntotal = 76 * self.factor nelements = ndata // ntotal auto_return, is_vectorized = op2._create_oes_object4( nelements, result_name, slot, obj_vector_complex) if auto_return: return ndata, None, None if op2.is_debug_file: op2.binary_debug.write(' [cap, element1, element2, ..., cap]\n') #op2.binary_debug.write(' cap = %i # assume 1 cap when there could have been multiple\n' % ndata) op2.binary_debug.write(' #elementi = [eid_device, s1a, s2a, s3a, s4a, axial,\n') op2.binary_debug.write(' s1b, s2b, s3b, s4b]\n') op2.binary_debug.write(' nelements=%i; nnodes=1 # centroid\n' % nelements) obj = op2.obj if op2.use_vector and is_vectorized and op2.sort_method == 1: n = nelements * ntotal itotal = obj.itotal itotal2 = itotal + nelements ielement2 = itotal2 floats = frombuffer(data, dtype=op2.fdtype8).reshape(nelements, 19).copy() obj._times[obj.itime] = dt self.obj_set_element(obj, itotal, itotal2, data, nelements) isave1 = [1, 2, 3, 4, 5, 11, 12, 13, 14] isave2 = [6, 7, 8, 9, 10, 15, 16, 17, 18] real_imag = apply_mag_phase(floats, is_magnitude_phase, isave1, isave2) obj.data[obj.itime, itotal:itotal2, :] = real_imag obj.itotal = itotal2 obj.ielement = ielement2 else: if is_vectorized and op2.use_vector: # pragma: no cover op2.log.debug('vectorize CBAR imag SORT%s' % op2.sort_method) n = oes_cbar_complex_19(op2, data, obj, nelements, ntotal, is_magnitude_phase) elif result_type == 2 and op2.num_wide == 19: # random strain? raise RuntimeError(op2.code_information()) elif result_type in [1, 2] and op2.num_wide == 10: # random # random stress/strain per example # # DMAP says random stress has num_wide=10 and # random strain has numwide=19, but it's wrong...maybe??? # # format_code = 1 - NO/RMS (SORT1 regardless of whether this is a SORT2 table or not) # format_code = 2 - ATO/PSD/CRM (actually SORT2) # element_id = op2.nonlinear_factor if op2.is_stress: obj_vector_random = RandomBarStressArray else: obj_vector_random = RandomBarStrainArray op2.data_code['nonlinear_factor'] = element_id ntotal = 10 * self.size nelements = ndata // ntotal #print(f'CBAR* nelements={nelements}') auto_return, is_vectorized = op2._create_oes_object4( nelements, result_name, slot, obj_vector_random) if auto_return: return ndata, None, None if op2.is_debug_file: op2.binary_debug.write(' [cap, element1, element2, ..., cap]\n') #op2.binary_debug.write(' cap = %i # assume 1 cap when there could have been multiple\n' % ndata) op2.binary_debug.write(' #elementi = [eid_device, s1a, s2a, s3a, s4a, axial,\n') op2.binary_debug.write(' s1b, s2b, s3b, s4b]\n') op2.binary_debug.write(' nelements=%i; nnodes=1 # centroid\n' % nelements) obj = op2.obj if op2.use_vector and is_vectorized and 0: # pragma: no cover # self.itime = 0 # self.ielement = 0 # self.itotal = 0 #self.ntimes = 0 #self.nelements = 0 n = nelements * ntotal ielement = obj.ielement ielement2 = ielement + nelements obj._times[obj.itime] = dt self.obj_set_element(obj, itotal, itotal2, data, nelements) floats = frombuffer(data, dtype=op2.fdtype).reshape(nelements, 10) #[s1a, s2a, s3a, s4a, axial, # s1b, s2b, s3b, s4b] obj.data[obj.itime, ielement:ielement2, :] = floats[:, 1:].copy() obj.itotal = ielement2 obj.ielement = ielement2 else: if is_vectorized and op2.use_vector and obj.itime == 0: # pragma: no cover op2.log.debug('vectorize CBAR random SORT%s' % op2.sort_method) n = oes_cbar_random_10(op2, data, obj, nelements, ntotal) else: # pragma: no cover raise RuntimeError(op2.code_information()) return n, nelements, ntotal
[ "def", "_oes_cbar_34", "(", "self", ",", "data", ":", "bytes", ",", "ndata", ":", "int", ",", "dt", ":", "Any", ",", "is_magnitude_phase", ":", "bool", ",", "result_type", ":", "str", ",", "prefix", ":", "str", ",", "postfix", ":", "str", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", "]", ":", "op2", "=", "self", ".", "op2", "#if isinstance(op2.nonlinear_factor, float):", "#op2.sort_bits[0] = 1 # sort2", "#op2.sort_method = 2", "n", "=", "0", "if", "op2", ".", "is_stress", ":", "result_name", "=", "prefix", "+", "'cbar_stress'", "+", "postfix", "else", ":", "result_name", "=", "prefix", "+", "'cbar_strain'", "+", "postfix", "if", "op2", ".", "_results", ".", "is_not_saved", "(", "result_name", ")", ":", "return", "ndata", ",", "None", ",", "None", "op2", ".", "_results", ".", "_found_result", "(", "result_name", ")", "slot", "=", "op2", ".", "get_result", "(", "result_name", ")", "if", "result_type", "==", "0", "and", "op2", ".", "num_wide", "==", "16", ":", "# real", "if", "op2", ".", "is_stress", ":", "obj_vector_real", "=", "RealBarStressArray", "else", ":", "obj_vector_real", "=", "RealBarStrainArray", "ntotal", "=", "64", "*", "self", ".", "factor", "# 16*4", "nelements", "=", "ndata", "//", "ntotal", "#print('CBAR nelements =', nelements)", "auto_return", ",", "is_vectorized", "=", "op2", ".", "_create_oes_object4", "(", "nelements", ",", "result_name", ",", "slot", ",", "obj_vector_real", ")", "if", "auto_return", ":", "return", "ndata", ",", "None", ",", "None", "if", "op2", ".", "is_debug_file", ":", "op2", ".", "binary_debug", ".", "write", "(", "' [cap, element1, element2, ..., cap]\\n'", ")", "#op2.binary_debug.write(' cap = %i # assume 1 cap when there could have been multiple\\n' % ndata)", "op2", ".", "binary_debug", ".", "write", "(", "' #elementi = [eid_device, s1a, s2a, s3a, s4a, axial, smaxa, smina, MSt,\\n'", ")", "op2", ".", "binary_debug", ".", "write", "(", "' s1b, s2b, s3b, s4b, smaxb, sminb, MSc]\\n'", ")", "op2", ".", "binary_debug", ".", "write", "(", "' nelements=%i; nnodes=1 # centroid\\n'", "%", "nelements", ")", "obj", "=", "op2", ".", "obj", "if", "op2", ".", "use_vector", "and", "is_vectorized", "and", "op2", ".", "sort_method", "==", "1", ":", "# self.itime = 0", "# self.ielement = 0", "# self.itotal = 0", "#self.ntimes = 0", "#self.nelements = 0", "n", "=", "nelements", "*", "op2", ".", "num_wide", "*", "4", "ielement", "=", "obj", ".", "ielement", "ielement2", "=", "ielement", "+", "nelements", "obj", ".", "_times", "[", "obj", ".", "itime", "]", "=", "dt", "self", ".", "obj_set_element", "(", "obj", ",", "ielement", ",", "ielement2", ",", "data", ",", "nelements", ")", "floats", "=", "frombuffer", "(", "data", ",", "dtype", "=", "op2", ".", "fdtype8", ")", ".", "reshape", "(", "nelements", ",", "16", ")", "#[s1a, s2a, s3a, s4a, axial, smaxa, smina, margin_tension,", "# s1b, s2b, s3b, s4b, smaxb, sminb, margin_compression]", "obj", ".", "data", "[", "obj", ".", "itime", ",", "ielement", ":", "ielement2", ",", ":", "]", "=", "floats", "[", ":", ",", "1", ":", "]", ".", "copy", "(", ")", "obj", ".", "itotal", "=", "ielement2", "obj", ".", "ielement", "=", "ielement2", "else", ":", "if", "is_vectorized", "and", "op2", ".", "use_vector", ":", "# pragma: no cover", "op2", ".", "log", ".", "debug", "(", "'vectorize CBAR real SORT%s'", "%", "op2", ".", "sort_method", ")", "n", "=", "oes_cbar_real_16", "(", "op2", ",", "data", ",", "obj", ",", "nelements", ",", "ntotal", ",", "dt", ")", "elif", "result_type", "==", "1", "and", "op2", ".", "num_wide", "==", "19", ":", "# imag", "if", "op2", ".", "is_stress", ":", "obj_vector_complex", "=", "ComplexBarStressArray", "else", ":", "obj_vector_complex", "=", "ComplexBarStrainArray", "ntotal", "=", "76", "*", "self", ".", "factor", "nelements", "=", "ndata", "//", "ntotal", "auto_return", ",", "is_vectorized", "=", "op2", ".", "_create_oes_object4", "(", "nelements", ",", "result_name", ",", "slot", ",", "obj_vector_complex", ")", "if", "auto_return", ":", "return", "ndata", ",", "None", ",", "None", "if", "op2", ".", "is_debug_file", ":", "op2", ".", "binary_debug", ".", "write", "(", "' [cap, element1, element2, ..., cap]\\n'", ")", "#op2.binary_debug.write(' cap = %i # assume 1 cap when there could have been multiple\\n' % ndata)", "op2", ".", "binary_debug", ".", "write", "(", "' #elementi = [eid_device, s1a, s2a, s3a, s4a, axial,\\n'", ")", "op2", ".", "binary_debug", ".", "write", "(", "' s1b, s2b, s3b, s4b]\\n'", ")", "op2", ".", "binary_debug", ".", "write", "(", "' nelements=%i; nnodes=1 # centroid\\n'", "%", "nelements", ")", "obj", "=", "op2", ".", "obj", "if", "op2", ".", "use_vector", "and", "is_vectorized", "and", "op2", ".", "sort_method", "==", "1", ":", "n", "=", "nelements", "*", "ntotal", "itotal", "=", "obj", ".", "itotal", "itotal2", "=", "itotal", "+", "nelements", "ielement2", "=", "itotal2", "floats", "=", "frombuffer", "(", "data", ",", "dtype", "=", "op2", ".", "fdtype8", ")", ".", "reshape", "(", "nelements", ",", "19", ")", ".", "copy", "(", ")", "obj", ".", "_times", "[", "obj", ".", "itime", "]", "=", "dt", "self", ".", "obj_set_element", "(", "obj", ",", "itotal", ",", "itotal2", ",", "data", ",", "nelements", ")", "isave1", "=", "[", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "11", ",", "12", ",", "13", ",", "14", "]", "isave2", "=", "[", "6", ",", "7", ",", "8", ",", "9", ",", "10", ",", "15", ",", "16", ",", "17", ",", "18", "]", "real_imag", "=", "apply_mag_phase", "(", "floats", ",", "is_magnitude_phase", ",", "isave1", ",", "isave2", ")", "obj", ".", "data", "[", "obj", ".", "itime", ",", "itotal", ":", "itotal2", ",", ":", "]", "=", "real_imag", "obj", ".", "itotal", "=", "itotal2", "obj", ".", "ielement", "=", "ielement2", "else", ":", "if", "is_vectorized", "and", "op2", ".", "use_vector", ":", "# pragma: no cover", "op2", ".", "log", ".", "debug", "(", "'vectorize CBAR imag SORT%s'", "%", "op2", ".", "sort_method", ")", "n", "=", "oes_cbar_complex_19", "(", "op2", ",", "data", ",", "obj", ",", "nelements", ",", "ntotal", ",", "is_magnitude_phase", ")", "elif", "result_type", "==", "2", "and", "op2", ".", "num_wide", "==", "19", ":", "# random strain?", "raise", "RuntimeError", "(", "op2", ".", "code_information", "(", ")", ")", "elif", "result_type", "in", "[", "1", ",", "2", "]", "and", "op2", ".", "num_wide", "==", "10", ":", "# random", "# random stress/strain per example", "#", "# DMAP says random stress has num_wide=10 and", "# random strain has numwide=19, but it's wrong...maybe???", "#", "# format_code = 1 - NO/RMS (SORT1 regardless of whether this is a SORT2 table or not)", "# format_code = 2 - ATO/PSD/CRM (actually SORT2)", "#", "element_id", "=", "op2", ".", "nonlinear_factor", "if", "op2", ".", "is_stress", ":", "obj_vector_random", "=", "RandomBarStressArray", "else", ":", "obj_vector_random", "=", "RandomBarStrainArray", "op2", ".", "data_code", "[", "'nonlinear_factor'", "]", "=", "element_id", "ntotal", "=", "10", "*", "self", ".", "size", "nelements", "=", "ndata", "//", "ntotal", "#print(f'CBAR* nelements={nelements}')", "auto_return", ",", "is_vectorized", "=", "op2", ".", "_create_oes_object4", "(", "nelements", ",", "result_name", ",", "slot", ",", "obj_vector_random", ")", "if", "auto_return", ":", "return", "ndata", ",", "None", ",", "None", "if", "op2", ".", "is_debug_file", ":", "op2", ".", "binary_debug", ".", "write", "(", "' [cap, element1, element2, ..., cap]\\n'", ")", "#op2.binary_debug.write(' cap = %i # assume 1 cap when there could have been multiple\\n' % ndata)", "op2", ".", "binary_debug", ".", "write", "(", "' #elementi = [eid_device, s1a, s2a, s3a, s4a, axial,\\n'", ")", "op2", ".", "binary_debug", ".", "write", "(", "' s1b, s2b, s3b, s4b]\\n'", ")", "op2", ".", "binary_debug", ".", "write", "(", "' nelements=%i; nnodes=1 # centroid\\n'", "%", "nelements", ")", "obj", "=", "op2", ".", "obj", "if", "op2", ".", "use_vector", "and", "is_vectorized", "and", "0", ":", "# pragma: no cover", "# self.itime = 0", "# self.ielement = 0", "# self.itotal = 0", "#self.ntimes = 0", "#self.nelements = 0", "n", "=", "nelements", "*", "ntotal", "ielement", "=", "obj", ".", "ielement", "ielement2", "=", "ielement", "+", "nelements", "obj", ".", "_times", "[", "obj", ".", "itime", "]", "=", "dt", "self", ".", "obj_set_element", "(", "obj", ",", "itotal", ",", "itotal2", ",", "data", ",", "nelements", ")", "floats", "=", "frombuffer", "(", "data", ",", "dtype", "=", "op2", ".", "fdtype", ")", ".", "reshape", "(", "nelements", ",", "10", ")", "#[s1a, s2a, s3a, s4a, axial,", "# s1b, s2b, s3b, s4b]", "obj", ".", "data", "[", "obj", ".", "itime", ",", "ielement", ":", "ielement2", ",", ":", "]", "=", "floats", "[", ":", ",", "1", ":", "]", ".", "copy", "(", ")", "obj", ".", "itotal", "=", "ielement2", "obj", ".", "ielement", "=", "ielement2", "else", ":", "if", "is_vectorized", "and", "op2", ".", "use_vector", "and", "obj", ".", "itime", "==", "0", ":", "# pragma: no cover", "op2", ".", "log", ".", "debug", "(", "'vectorize CBAR random SORT%s'", "%", "op2", ".", "sort_method", ")", "n", "=", "oes_cbar_random_10", "(", "op2", ",", "data", ",", "obj", ",", "nelements", ",", "ntotal", ")", "else", ":", "# pragma: no cover", "raise", "RuntimeError", "(", "op2", ".", "code_information", "(", ")", ")", "return", "n", ",", "nelements", ",", "ntotal" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/op2/tables/oes_stressStrain/oes.py#L2661-L2835
rsmusllp/termineter
9311d6d995a7bf0f80853a00a115a8fa16aa0727
lib/c1219/data.py
python
get_history_entry_record
(endianess, hist_date_time_flag, tm_format, event_number_flag, hist_seq_nbr_flag, data)
return rcd
Return data formatted into a log entry. :param str endianess: The endianess to use when packing values ('>' or '<') :param bool hist_date_time_flag: Whether or not a time stamp is included. :param int tm_format: The format that the data is packed in, this typically corresponds with the value in the GEN_CONFIG_TBL (table #0) (1 <= tm_format <= 4) :param bool event_number_flag: Whether or not an event number is included. :param bool hist_seq_nbr_flag: Whether or not an history sequence number is included. :param str data: The packed and machine-formatted data to parse :rtype: dict
Return data formatted into a log entry.
[ "Return", "data", "formatted", "into", "a", "log", "entry", "." ]
def get_history_entry_record(endianess, hist_date_time_flag, tm_format, event_number_flag, hist_seq_nbr_flag, data): """ Return data formatted into a log entry. :param str endianess: The endianess to use when packing values ('>' or '<') :param bool hist_date_time_flag: Whether or not a time stamp is included. :param int tm_format: The format that the data is packed in, this typically corresponds with the value in the GEN_CONFIG_TBL (table #0) (1 <= tm_format <= 4) :param bool event_number_flag: Whether or not an event number is included. :param bool hist_seq_nbr_flag: Whether or not an history sequence number is included. :param str data: The packed and machine-formatted data to parse :rtype: dict """ rcd = {} if hist_date_time_flag: tmstmp = format_ltime(endianess, tm_format, data[0:LTIME_LENGTH.get(tm_format)]) if tmstmp: rcd['Time'] = tmstmp data = data[LTIME_LENGTH.get(tm_format):] if event_number_flag: rcd['Event Number'] = struct.unpack(endianess + 'H', data[:2])[0] data = data[2:] if hist_seq_nbr_flag: rcd['History Sequence Number'] = struct.unpack(endianess + 'H', data[:2])[0] data = data[2:] rcd['User ID'] = struct.unpack(endianess + 'H', data[:2])[0] rcd['Procedure Number'], rcd['Std vs Mfg'] = get_table_idbb_field(endianess, data[2:4])[:2] rcd['Arguments'] = data[4:] return rcd
[ "def", "get_history_entry_record", "(", "endianess", ",", "hist_date_time_flag", ",", "tm_format", ",", "event_number_flag", ",", "hist_seq_nbr_flag", ",", "data", ")", ":", "rcd", "=", "{", "}", "if", "hist_date_time_flag", ":", "tmstmp", "=", "format_ltime", "(", "endianess", ",", "tm_format", ",", "data", "[", "0", ":", "LTIME_LENGTH", ".", "get", "(", "tm_format", ")", "]", ")", "if", "tmstmp", ":", "rcd", "[", "'Time'", "]", "=", "tmstmp", "data", "=", "data", "[", "LTIME_LENGTH", ".", "get", "(", "tm_format", ")", ":", "]", "if", "event_number_flag", ":", "rcd", "[", "'Event Number'", "]", "=", "struct", ".", "unpack", "(", "endianess", "+", "'H'", ",", "data", "[", ":", "2", "]", ")", "[", "0", "]", "data", "=", "data", "[", "2", ":", "]", "if", "hist_seq_nbr_flag", ":", "rcd", "[", "'History Sequence Number'", "]", "=", "struct", ".", "unpack", "(", "endianess", "+", "'H'", ",", "data", "[", ":", "2", "]", ")", "[", "0", "]", "data", "=", "data", "[", "2", ":", "]", "rcd", "[", "'User ID'", "]", "=", "struct", ".", "unpack", "(", "endianess", "+", "'H'", ",", "data", "[", ":", "2", "]", ")", "[", "0", "]", "rcd", "[", "'Procedure Number'", "]", ",", "rcd", "[", "'Std vs Mfg'", "]", "=", "get_table_idbb_field", "(", "endianess", ",", "data", "[", "2", ":", "4", "]", ")", "[", ":", "2", "]", "rcd", "[", "'Arguments'", "]", "=", "data", "[", "4", ":", "]", "return", "rcd" ]
https://github.com/rsmusllp/termineter/blob/9311d6d995a7bf0f80853a00a115a8fa16aa0727/lib/c1219/data.py#L82-L111
choasup/SIN
4851efb7b1c64180026e51ab8abcd95265c0602c
lib/rpn_msr/proposal_target_layer_tf.py
python
_compute_targets
(ex_rois, gt_rois, labels)
return np.hstack( (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
Compute bounding-box regression targets for an image.
Compute bounding-box regression targets for an image.
[ "Compute", "bounding", "-", "box", "regression", "targets", "for", "an", "image", "." ]
def _compute_targets(ex_rois, gt_rois, labels): """Compute bounding-box regression targets for an image.""" assert ex_rois.shape[0] == gt_rois.shape[0] assert ex_rois.shape[1] == 4 assert gt_rois.shape[1] == 4 targets = bbox_transform(ex_rois, gt_rois) if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED: # Optionally normalize targets by a precomputed mean and stdev targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)) / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS)) return np.hstack( (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
[ "def", "_compute_targets", "(", "ex_rois", ",", "gt_rois", ",", "labels", ")", ":", "assert", "ex_rois", ".", "shape", "[", "0", "]", "==", "gt_rois", ".", "shape", "[", "0", "]", "assert", "ex_rois", ".", "shape", "[", "1", "]", "==", "4", "assert", "gt_rois", ".", "shape", "[", "1", "]", "==", "4", "targets", "=", "bbox_transform", "(", "ex_rois", ",", "gt_rois", ")", "if", "cfg", ".", "TRAIN", ".", "BBOX_NORMALIZE_TARGETS_PRECOMPUTED", ":", "# Optionally normalize targets by a precomputed mean and stdev", "targets", "=", "(", "(", "targets", "-", "np", ".", "array", "(", "cfg", ".", "TRAIN", ".", "BBOX_NORMALIZE_MEANS", ")", ")", "/", "np", ".", "array", "(", "cfg", ".", "TRAIN", ".", "BBOX_NORMALIZE_STDS", ")", ")", "return", "np", ".", "hstack", "(", "(", "labels", "[", ":", ",", "np", ".", "newaxis", "]", ",", "targets", ")", ")", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")" ]
https://github.com/choasup/SIN/blob/4851efb7b1c64180026e51ab8abcd95265c0602c/lib/rpn_msr/proposal_target_layer_tf.py#L94-L107
kamalgill/flask-appengine-template
11760f83faccbb0d0afe416fc58e67ecfb4643c2
src/lib/click/types.py
python
Path.convert
(self, value, param, ctx)
return self.coerce_path_result(rv)
[]
def convert(self, value, param, ctx): rv = value is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') if not is_dash: if self.resolve_path: rv = os.path.realpath(rv) try: st = os.stat(rv) except OSError: if not self.exists: return self.coerce_path_result(rv) self.fail('%s "%s" does not exist.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if not self.file_okay and stat.S_ISREG(st.st_mode): self.fail('%s "%s" is a file.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if not self.dir_okay and stat.S_ISDIR(st.st_mode): self.fail('%s "%s" is a directory.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if self.writable and not os.access(value, os.W_OK): self.fail('%s "%s" is not writable.' % ( self.path_type, filename_to_ui(value) ), param, ctx) if self.readable and not os.access(value, os.R_OK): self.fail('%s "%s" is not readable.' % ( self.path_type, filename_to_ui(value) ), param, ctx) return self.coerce_path_result(rv)
[ "def", "convert", "(", "self", ",", "value", ",", "param", ",", "ctx", ")", ":", "rv", "=", "value", "is_dash", "=", "self", ".", "file_okay", "and", "self", ".", "allow_dash", "and", "rv", "in", "(", "b'-'", ",", "'-'", ")", "if", "not", "is_dash", ":", "if", "self", ".", "resolve_path", ":", "rv", "=", "os", ".", "path", ".", "realpath", "(", "rv", ")", "try", ":", "st", "=", "os", ".", "stat", "(", "rv", ")", "except", "OSError", ":", "if", "not", "self", ".", "exists", ":", "return", "self", ".", "coerce_path_result", "(", "rv", ")", "self", ".", "fail", "(", "'%s \"%s\" does not exist.'", "%", "(", "self", ".", "path_type", ",", "filename_to_ui", "(", "value", ")", ")", ",", "param", ",", "ctx", ")", "if", "not", "self", ".", "file_okay", "and", "stat", ".", "S_ISREG", "(", "st", ".", "st_mode", ")", ":", "self", ".", "fail", "(", "'%s \"%s\" is a file.'", "%", "(", "self", ".", "path_type", ",", "filename_to_ui", "(", "value", ")", ")", ",", "param", ",", "ctx", ")", "if", "not", "self", ".", "dir_okay", "and", "stat", ".", "S_ISDIR", "(", "st", ".", "st_mode", ")", ":", "self", ".", "fail", "(", "'%s \"%s\" is a directory.'", "%", "(", "self", ".", "path_type", ",", "filename_to_ui", "(", "value", ")", ")", ",", "param", ",", "ctx", ")", "if", "self", ".", "writable", "and", "not", "os", ".", "access", "(", "value", ",", "os", ".", "W_OK", ")", ":", "self", ".", "fail", "(", "'%s \"%s\" is not writable.'", "%", "(", "self", ".", "path_type", ",", "filename_to_ui", "(", "value", ")", ")", ",", "param", ",", "ctx", ")", "if", "self", ".", "readable", "and", "not", "os", ".", "access", "(", "value", ",", "os", ".", "R_OK", ")", ":", "self", ".", "fail", "(", "'%s \"%s\" is not readable.'", "%", "(", "self", ".", "path_type", ",", "filename_to_ui", "(", "value", ")", ")", ",", "param", ",", "ctx", ")", "return", "self", ".", "coerce_path_result", "(", "rv", ")" ]
https://github.com/kamalgill/flask-appengine-template/blob/11760f83faccbb0d0afe416fc58e67ecfb4643c2/src/lib/click/types.py#L402-L442
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/serverless/v1/service/function/__init__.py
python
FunctionInstance.account_sid
(self)
return self._properties['account_sid']
:returns: The SID of the Account that created the Function resource :rtype: unicode
:returns: The SID of the Account that created the Function resource :rtype: unicode
[ ":", "returns", ":", "The", "SID", "of", "the", "Account", "that", "created", "the", "Function", "resource", ":", "rtype", ":", "unicode" ]
def account_sid(self): """ :returns: The SID of the Account that created the Function resource :rtype: unicode """ return self._properties['account_sid']
[ "def", "account_sid", "(", "self", ")", ":", "return", "self", ".", "_properties", "[", "'account_sid'", "]" ]
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/serverless/v1/service/function/__init__.py#L354-L359
mayank93/Twitter-Sentiment-Analysis
f095c6ca6bf69787582b5dabb140fefaf278eb37
front-end/web2py/applications/TSAA/modules/PhraseLevel/replaceExpand.py
python
replaceEmoticons
(emoticonsDict,tweet,token)
return tweet,token
replaces the emoticons present in tweet with its polarity takes as input a emoticons dict which has emoticons as key and polarity as value and a list which contains words in tweet and return list of words in tweet after replacement
replaces the emoticons present in tweet with its polarity takes as input a emoticons dict which has emoticons as key and polarity as value and a list which contains words in tweet and return list of words in tweet after replacement
[ "replaces", "the", "emoticons", "present", "in", "tweet", "with", "its", "polarity", "takes", "as", "input", "a", "emoticons", "dict", "which", "has", "emoticons", "as", "key", "and", "polarity", "as", "value", "and", "a", "list", "which", "contains", "words", "in", "tweet", "and", "return", "list", "of", "words", "in", "tweet", "after", "replacement" ]
def replaceEmoticons(emoticonsDict,tweet,token): """replaces the emoticons present in tweet with its polarity takes as input a emoticons dict which has emoticons as key and polarity as value and a list which contains words in tweet and return list of words in tweet after replacement""" for i in range(len(tweet)): if tweet[i] in emoticonsDict: tweet[i]=emoticonsDict[tweet[i]] token[i]='E' return tweet,token
[ "def", "replaceEmoticons", "(", "emoticonsDict", ",", "tweet", ",", "token", ")", ":", "for", "i", "in", "range", "(", "len", "(", "tweet", ")", ")", ":", "if", "tweet", "[", "i", "]", "in", "emoticonsDict", ":", "tweet", "[", "i", "]", "=", "emoticonsDict", "[", "tweet", "[", "i", "]", "]", "token", "[", "i", "]", "=", "'E'", "return", "tweet", ",", "token" ]
https://github.com/mayank93/Twitter-Sentiment-Analysis/blob/f095c6ca6bf69787582b5dabb140fefaf278eb37/front-end/web2py/applications/TSAA/modules/PhraseLevel/replaceExpand.py#L51-L60
CalebBell/thermo
572a47d1b03d49fe609b8d5f826fa6a7cde00828
thermo/property_package.py
python
WilsonPP.d2GE_dTdxs
(self, T, xs)
return d2GE_dTdxs
r''' .. math:: \frac{\partial^2 G^E}{\partial x_k \partial T} = -R\left[T\left( \sum_i \left(\frac{x_i \frac{\partial n_{ik}}{\partial T}}{\sum_j x_j \Lambda_{ij}} - \frac{x_i \Lambda_{ik} (\sum_j x_j \frac{\partial \Lambda_{ij}}{\partial T} )}{(\partial_j x_j \Lambda_{ij})^2} \right) + \frac{\sum_i x_i \frac{\partial \Lambda_{ki}}{\partial T}}{\sum_j x_j \Lambda_{kj}} \right) + \ln\left(\sum_i x_i \Lambda_{ki}\right) + \sum_i \frac{x_i \Lambda_{ik}}{\sum_j x_j \Lambda_{ij}} \right]
r'''
[ "r" ]
def d2GE_dTdxs(self, T, xs): r''' .. math:: \frac{\partial^2 G^E}{\partial x_k \partial T} = -R\left[T\left( \sum_i \left(\frac{x_i \frac{\partial n_{ik}}{\partial T}}{\sum_j x_j \Lambda_{ij}} - \frac{x_i \Lambda_{ik} (\sum_j x_j \frac{\partial \Lambda_{ij}}{\partial T} )}{(\partial_j x_j \Lambda_{ij})^2} \right) + \frac{\sum_i x_i \frac{\partial \Lambda_{ki}}{\partial T}}{\sum_j x_j \Lambda_{kj}} \right) + \ln\left(\sum_i x_i \Lambda_{ki}\right) + \sum_i \frac{x_i \Lambda_{ik}}{\sum_j x_j \Lambda_{ij}} \right] ''' cmps = self.cmps lambdas = self.lambdas(T) dlambdas_dT = self.dlambdas_dT(T) d2lambdas_dT2 = self.d2lambdas_dT2(T) xj_Lambdas_ijs = [] for i in cmps: tot = 0.0 for j in cmps: tot += xs[j]*lambdas[i][j] xj_Lambdas_ijs.append(tot) xj_dLambdas_dTijs = [] for i in cmps: tot = 0.0 for j in cmps: tot += xs[j]*dlambdas_dT[i][j] xj_dLambdas_dTijs.append(tot) d2GE_dTdxs = [] for k in cmps: tot1 = 0.0 for i in cmps: tot1 += (xs[i]*dlambdas_dT[i][k]/xj_Lambdas_ijs[i] - xs[i]*xj_dLambdas_dTijs[i]*lambdas[i][k]/(xj_Lambdas_ijs[i]*xj_Lambdas_ijs[i])) tot1 += xj_dLambdas_dTijs[k]/xj_Lambdas_ijs[k] tot2 = 0.0 for i in cmps: tot2 += xs[i]*lambdas[i][k]/xj_Lambdas_ijs[i] dG = -R*(T*tot1 + log(xj_Lambdas_ijs[k]) + tot2) d2GE_dTdxs.append(dG) return d2GE_dTdxs
[ "def", "d2GE_dTdxs", "(", "self", ",", "T", ",", "xs", ")", ":", "cmps", "=", "self", ".", "cmps", "lambdas", "=", "self", ".", "lambdas", "(", "T", ")", "dlambdas_dT", "=", "self", ".", "dlambdas_dT", "(", "T", ")", "d2lambdas_dT2", "=", "self", ".", "d2lambdas_dT2", "(", "T", ")", "xj_Lambdas_ijs", "=", "[", "]", "for", "i", "in", "cmps", ":", "tot", "=", "0.0", "for", "j", "in", "cmps", ":", "tot", "+=", "xs", "[", "j", "]", "*", "lambdas", "[", "i", "]", "[", "j", "]", "xj_Lambdas_ijs", ".", "append", "(", "tot", ")", "xj_dLambdas_dTijs", "=", "[", "]", "for", "i", "in", "cmps", ":", "tot", "=", "0.0", "for", "j", "in", "cmps", ":", "tot", "+=", "xs", "[", "j", "]", "*", "dlambdas_dT", "[", "i", "]", "[", "j", "]", "xj_dLambdas_dTijs", ".", "append", "(", "tot", ")", "d2GE_dTdxs", "=", "[", "]", "for", "k", "in", "cmps", ":", "tot1", "=", "0.0", "for", "i", "in", "cmps", ":", "tot1", "+=", "(", "xs", "[", "i", "]", "*", "dlambdas_dT", "[", "i", "]", "[", "k", "]", "/", "xj_Lambdas_ijs", "[", "i", "]", "-", "xs", "[", "i", "]", "*", "xj_dLambdas_dTijs", "[", "i", "]", "*", "lambdas", "[", "i", "]", "[", "k", "]", "/", "(", "xj_Lambdas_ijs", "[", "i", "]", "*", "xj_Lambdas_ijs", "[", "i", "]", ")", ")", "tot1", "+=", "xj_dLambdas_dTijs", "[", "k", "]", "/", "xj_Lambdas_ijs", "[", "k", "]", "tot2", "=", "0.0", "for", "i", "in", "cmps", ":", "tot2", "+=", "xs", "[", "i", "]", "*", "lambdas", "[", "i", "]", "[", "k", "]", "/", "xj_Lambdas_ijs", "[", "i", "]", "dG", "=", "-", "R", "*", "(", "T", "*", "tot1", "+", "log", "(", "xj_Lambdas_ijs", "[", "k", "]", ")", "+", "tot2", ")", "d2GE_dTdxs", ".", "append", "(", "dG", ")", "return", "d2GE_dTdxs" ]
https://github.com/CalebBell/thermo/blob/572a47d1b03d49fe609b8d5f826fa6a7cde00828/thermo/property_package.py#L3706-L3756
ethereum/trinity
6383280c5044feb06695ac2f7bc1100b7bcf4fe0
p2p/qualifiers.py
python
BaseQualifier.__or__
(self, other: QualifierFn)
return OrQualifier(self, other)
[]
def __or__(self, other: QualifierFn) -> 'BaseQualifier': return OrQualifier(self, other)
[ "def", "__or__", "(", "self", ",", "other", ":", "QualifierFn", ")", "->", "'BaseQualifier'", ":", "return", "OrQualifier", "(", "self", ",", "other", ")" ]
https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/p2p/qualifiers.py#L23-L24
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/requests/utils.py
python
address_in_network
(ip, net)
return (ipaddr & netmask) == (network & netmask)
This function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool
This function allows you to check if an IP belongs to a network subnet
[ "This", "function", "allows", "you", "to", "check", "if", "an", "IP", "belongs", "to", "a", "network", "subnet" ]
def address_in_network(ip, net): """This function allows you to check if an IP belongs to a network subnet Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 :rtype: bool """ ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] netaddr, bits = net.split('/') netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask return (ipaddr & netmask) == (network & netmask)
[ "def", "address_in_network", "(", "ip", ",", "net", ")", ":", "ipaddr", "=", "struct", ".", "unpack", "(", "'=L'", ",", "socket", ".", "inet_aton", "(", "ip", ")", ")", "[", "0", "]", "netaddr", ",", "bits", "=", "net", ".", "split", "(", "'/'", ")", "netmask", "=", "struct", ".", "unpack", "(", "'=L'", ",", "socket", ".", "inet_aton", "(", "dotted_netmask", "(", "int", "(", "bits", ")", ")", ")", ")", "[", "0", "]", "network", "=", "struct", ".", "unpack", "(", "'=L'", ",", "socket", ".", "inet_aton", "(", "netaddr", ")", ")", "[", "0", "]", "&", "netmask", "return", "(", "ipaddr", "&", "netmask", ")", "==", "(", "network", "&", "netmask", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/requests/utils.py#L546-L558
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/database/database_client.py
python
DatabaseClient.list_exadata_infrastructures
(self, compartment_id, **kwargs)
Lists the Exadata infrastructure resources in the specified compartment. Applies to Exadata Cloud@Customer instances only. To list the Exadata Cloud Service infrastructure resources in a compartment, use the :func:`list_cloud_exadata_infrastructures` operation. :param str compartment_id: (required) The compartment `OCID`__. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param int limit: (optional) The maximum number of items to return per page. :param str page: (optional) The pagination token to continue listing from. :param str opc_request_id: (optional) Unique identifier for the request. :param str sort_by: (optional) The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME sort order is case sensitive. Allowed values are: "TIMECREATED", "DISPLAYNAME" :param str sort_order: (optional) The sort order to use, either ascending (`ASC`) or descending (`DESC`). Allowed values are: "ASC", "DESC" :param str lifecycle_state: (optional) A filter to return only resources that match the given lifecycle state exactly. Allowed values are: "CREATING", "REQUIRES_ACTIVATION", "ACTIVATING", "ACTIVE", "ACTIVATION_FAILED", "FAILED", "UPDATING", "DELETING", "DELETED", "DISCONNECTED", "MAINTENANCE_IN_PROGRESS" :param str display_name: (optional) A filter to return only resources that match the entire display name given. The match is not case sensitive. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.database.models.ExadataInfrastructureSummary` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/database/list_exadata_infrastructures.py.html>`__ to see an example of how to use list_exadata_infrastructures API.
Lists the Exadata infrastructure resources in the specified compartment. Applies to Exadata Cloud@Customer instances only. To list the Exadata Cloud Service infrastructure resources in a compartment, use the :func:`list_cloud_exadata_infrastructures` operation.
[ "Lists", "the", "Exadata", "infrastructure", "resources", "in", "the", "specified", "compartment", ".", "Applies", "to", "Exadata", "Cloud@Customer", "instances", "only", ".", "To", "list", "the", "Exadata", "Cloud", "Service", "infrastructure", "resources", "in", "a", "compartment", "use", "the", ":", "func", ":", "list_cloud_exadata_infrastructures", "operation", "." ]
def list_exadata_infrastructures(self, compartment_id, **kwargs): """ Lists the Exadata infrastructure resources in the specified compartment. Applies to Exadata Cloud@Customer instances only. To list the Exadata Cloud Service infrastructure resources in a compartment, use the :func:`list_cloud_exadata_infrastructures` operation. :param str compartment_id: (required) The compartment `OCID`__. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param int limit: (optional) The maximum number of items to return per page. :param str page: (optional) The pagination token to continue listing from. :param str opc_request_id: (optional) Unique identifier for the request. :param str sort_by: (optional) The field to sort by. You can provide one sort order (`sortOrder`). Default order for TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME sort order is case sensitive. Allowed values are: "TIMECREATED", "DISPLAYNAME" :param str sort_order: (optional) The sort order to use, either ascending (`ASC`) or descending (`DESC`). Allowed values are: "ASC", "DESC" :param str lifecycle_state: (optional) A filter to return only resources that match the given lifecycle state exactly. Allowed values are: "CREATING", "REQUIRES_ACTIVATION", "ACTIVATING", "ACTIVE", "ACTIVATION_FAILED", "FAILED", "UPDATING", "DELETING", "DELETED", "DISCONNECTED", "MAINTENANCE_IN_PROGRESS" :param str display_name: (optional) A filter to return only resources that match the entire display name given. The match is not case sensitive. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.database.models.ExadataInfrastructureSummary` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/database/list_exadata_infrastructures.py.html>`__ to see an example of how to use list_exadata_infrastructures API. """ resource_path = "/exadataInfrastructures" method = "GET" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "limit", "page", "opc_request_id", "sort_by", "sort_order", "lifecycle_state", "display_name" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "list_exadata_infrastructures got unknown kwargs: {!r}".format(extra_kwargs)) if 'sort_by' in kwargs: sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"] if kwargs['sort_by'] not in sort_by_allowed_values: raise ValueError( "Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values) ) if 'sort_order' in kwargs: sort_order_allowed_values = ["ASC", "DESC"] if kwargs['sort_order'] not in sort_order_allowed_values: raise ValueError( "Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values) ) if 'lifecycle_state' in kwargs: lifecycle_state_allowed_values = ["CREATING", "REQUIRES_ACTIVATION", "ACTIVATING", "ACTIVE", "ACTIVATION_FAILED", "FAILED", "UPDATING", "DELETING", "DELETED", "DISCONNECTED", "MAINTENANCE_IN_PROGRESS"] if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values: raise ValueError( "Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values) ) query_params = { "compartmentId": compartment_id, "limit": kwargs.get("limit", missing), "page": kwargs.get("page", missing), "sortBy": kwargs.get("sort_by", missing), "sortOrder": kwargs.get("sort_order", missing), "lifecycleState": kwargs.get("lifecycle_state", missing), "displayName": kwargs.get("display_name", missing) } query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None} header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.base_client.get_preferred_retry_strategy( operation_retry_strategy=kwargs.get('retry_strategy'), client_retry_strategy=self.retry_strategy ) if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_client_retries_header(header_params) retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[ExadataInfrastructureSummary]") else: return self.base_client.call_api( resource_path=resource_path, method=method, query_params=query_params, header_params=header_params, response_type="list[ExadataInfrastructureSummary]")
[ "def", "list_exadata_infrastructures", "(", "self", ",", "compartment_id", ",", "*", "*", "kwargs", ")", ":", "resource_path", "=", "\"/exadataInfrastructures\"", "method", "=", "\"GET\"", "# Don't accept unknown kwargs", "expected_kwargs", "=", "[", "\"retry_strategy\"", ",", "\"limit\"", ",", "\"page\"", ",", "\"opc_request_id\"", ",", "\"sort_by\"", ",", "\"sort_order\"", ",", "\"lifecycle_state\"", ",", "\"display_name\"", "]", "extra_kwargs", "=", "[", "_key", "for", "_key", "in", "six", ".", "iterkeys", "(", "kwargs", ")", "if", "_key", "not", "in", "expected_kwargs", "]", "if", "extra_kwargs", ":", "raise", "ValueError", "(", "\"list_exadata_infrastructures got unknown kwargs: {!r}\"", ".", "format", "(", "extra_kwargs", ")", ")", "if", "'sort_by'", "in", "kwargs", ":", "sort_by_allowed_values", "=", "[", "\"TIMECREATED\"", ",", "\"DISPLAYNAME\"", "]", "if", "kwargs", "[", "'sort_by'", "]", "not", "in", "sort_by_allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `sort_by`, must be one of {0}\"", ".", "format", "(", "sort_by_allowed_values", ")", ")", "if", "'sort_order'", "in", "kwargs", ":", "sort_order_allowed_values", "=", "[", "\"ASC\"", ",", "\"DESC\"", "]", "if", "kwargs", "[", "'sort_order'", "]", "not", "in", "sort_order_allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `sort_order`, must be one of {0}\"", ".", "format", "(", "sort_order_allowed_values", ")", ")", "if", "'lifecycle_state'", "in", "kwargs", ":", "lifecycle_state_allowed_values", "=", "[", "\"CREATING\"", ",", "\"REQUIRES_ACTIVATION\"", ",", "\"ACTIVATING\"", ",", "\"ACTIVE\"", ",", "\"ACTIVATION_FAILED\"", ",", "\"FAILED\"", ",", "\"UPDATING\"", ",", "\"DELETING\"", ",", "\"DELETED\"", ",", "\"DISCONNECTED\"", ",", "\"MAINTENANCE_IN_PROGRESS\"", "]", "if", "kwargs", "[", "'lifecycle_state'", "]", "not", "in", "lifecycle_state_allowed_values", ":", "raise", "ValueError", "(", "\"Invalid value for `lifecycle_state`, must be one of {0}\"", ".", "format", "(", "lifecycle_state_allowed_values", ")", ")", "query_params", "=", "{", "\"compartmentId\"", ":", "compartment_id", ",", "\"limit\"", ":", "kwargs", ".", "get", "(", "\"limit\"", ",", "missing", ")", ",", "\"page\"", ":", "kwargs", ".", "get", "(", "\"page\"", ",", "missing", ")", ",", "\"sortBy\"", ":", "kwargs", ".", "get", "(", "\"sort_by\"", ",", "missing", ")", ",", "\"sortOrder\"", ":", "kwargs", ".", "get", "(", "\"sort_order\"", ",", "missing", ")", ",", "\"lifecycleState\"", ":", "kwargs", ".", "get", "(", "\"lifecycle_state\"", ",", "missing", ")", ",", "\"displayName\"", ":", "kwargs", ".", "get", "(", "\"display_name\"", ",", "missing", ")", "}", "query_params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "query_params", ")", "if", "v", "is", "not", "missing", "and", "v", "is", "not", "None", "}", "header_params", "=", "{", "\"accept\"", ":", "\"application/json\"", ",", "\"content-type\"", ":", "\"application/json\"", ",", "\"opc-request-id\"", ":", "kwargs", ".", "get", "(", "\"opc_request_id\"", ",", "missing", ")", "}", "header_params", "=", "{", "k", ":", "v", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "header_params", ")", "if", "v", "is", "not", "missing", "and", "v", "is", "not", "None", "}", "retry_strategy", "=", "self", ".", "base_client", ".", "get_preferred_retry_strategy", "(", "operation_retry_strategy", "=", "kwargs", ".", "get", "(", "'retry_strategy'", ")", ",", "client_retry_strategy", "=", "self", ".", "retry_strategy", ")", "if", "retry_strategy", ":", "if", "not", "isinstance", "(", "retry_strategy", ",", "retry", ".", "NoneRetryStrategy", ")", ":", "self", ".", "base_client", ".", "add_opc_client_retries_header", "(", "header_params", ")", "retry_strategy", ".", "add_circuit_breaker_callback", "(", "self", ".", "circuit_breaker_callback", ")", "return", "retry_strategy", ".", "make_retrying_call", "(", "self", ".", "base_client", ".", "call_api", ",", "resource_path", "=", "resource_path", ",", "method", "=", "method", ",", "query_params", "=", "query_params", ",", "header_params", "=", "header_params", ",", "response_type", "=", "\"list[ExadataInfrastructureSummary]\"", ")", "else", ":", "return", "self", ".", "base_client", ".", "call_api", "(", "resource_path", "=", "resource_path", ",", "method", "=", "method", ",", "query_params", "=", "query_params", ",", "header_params", "=", "header_params", ",", "response_type", "=", "\"list[ExadataInfrastructureSummary]\"", ")" ]
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/database/database_client.py#L16978-L17110
henkelis/sonospy
841f52010fd6e1e932d8f1a8896ad4e5a0667b8a
sonospy/brisa/upnp/control_point/msearch.py
python
MSearch.start
(self, interval=DEFAULT_SEARCH_TIME, search_type=DEFAULT_SEARCH_TYPE)
Starts the search. @param interval: interval between searchs. Default is 600.0 seconds @param search_type: type of the search, default is "ssdp:all" @type interval: float @type search_type: string
Starts the search.
[ "Starts", "the", "search", "." ]
def start(self, interval=DEFAULT_SEARCH_TIME, search_type=DEFAULT_SEARCH_TYPE): """ Starts the search. @param interval: interval between searchs. Default is 600.0 seconds @param search_type: type of the search, default is "ssdp:all" @type interval: float @type search_type: string """ # interval = 30.0 if not self.is_running(): self.search_type = search_type self.listen_udp.start() # print ">>>>>>>>> interval: " + str(interval) self.loopcall.start(interval, now=True) log.debug('MSearch started') else: log.warning(self.msg_already_started)
[ "def", "start", "(", "self", ",", "interval", "=", "DEFAULT_SEARCH_TIME", ",", "search_type", "=", "DEFAULT_SEARCH_TYPE", ")", ":", "# interval = 30.0", "if", "not", "self", ".", "is_running", "(", ")", ":", "self", ".", "search_type", "=", "search_type", "self", ".", "listen_udp", ".", "start", "(", ")", "# print \">>>>>>>>> interval: \" + str(interval)", "self", ".", "loopcall", ".", "start", "(", "interval", ",", "now", "=", "True", ")", "log", ".", "debug", "(", "'MSearch started'", ")", "else", ":", "log", ".", "warning", "(", "self", ".", "msg_already_started", ")" ]
https://github.com/henkelis/sonospy/blob/841f52010fd6e1e932d8f1a8896ad4e5a0667b8a/sonospy/brisa/upnp/control_point/msearch.py#L71-L93
stopstalk/stopstalk-deployment
10c3ab44c4ece33ae515f6888c15033db2004bb1
aws_lambda/spoj_aws_lambda_function/lambda_code/setuptools/dep_util.py
python
newer_pairwise_group
(sources_groups, targets)
return n_sources, n_targets
Walk both arguments in parallel, testing if each source group is newer than its corresponding target. Returns a pair of lists (sources_groups, targets) where sources is newer than target, according to the semantics of 'newer_group()'.
Walk both arguments in parallel, testing if each source group is newer than its corresponding target. Returns a pair of lists (sources_groups, targets) where sources is newer than target, according to the semantics of 'newer_group()'.
[ "Walk", "both", "arguments", "in", "parallel", "testing", "if", "each", "source", "group", "is", "newer", "than", "its", "corresponding", "target", ".", "Returns", "a", "pair", "of", "lists", "(", "sources_groups", "targets", ")", "where", "sources", "is", "newer", "than", "target", "according", "to", "the", "semantics", "of", "newer_group", "()", "." ]
def newer_pairwise_group(sources_groups, targets): """Walk both arguments in parallel, testing if each source group is newer than its corresponding target. Returns a pair of lists (sources_groups, targets) where sources is newer than target, according to the semantics of 'newer_group()'. """ if len(sources_groups) != len(targets): raise ValueError("'sources_group' and 'targets' must be the same length") # build a pair of lists (sources_groups, targets) where source is newer n_sources = [] n_targets = [] for i in range(len(sources_groups)): if newer_group(sources_groups[i], targets[i]): n_sources.append(sources_groups[i]) n_targets.append(targets[i]) return n_sources, n_targets
[ "def", "newer_pairwise_group", "(", "sources_groups", ",", "targets", ")", ":", "if", "len", "(", "sources_groups", ")", "!=", "len", "(", "targets", ")", ":", "raise", "ValueError", "(", "\"'sources_group' and 'targets' must be the same length\"", ")", "# build a pair of lists (sources_groups, targets) where source is newer", "n_sources", "=", "[", "]", "n_targets", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "sources_groups", ")", ")", ":", "if", "newer_group", "(", "sources_groups", "[", "i", "]", ",", "targets", "[", "i", "]", ")", ":", "n_sources", ".", "append", "(", "sources_groups", "[", "i", "]", ")", "n_targets", ".", "append", "(", "targets", "[", "i", "]", ")", "return", "n_sources", ",", "n_targets" ]
https://github.com/stopstalk/stopstalk-deployment/blob/10c3ab44c4ece33ae515f6888c15033db2004bb1/aws_lambda/spoj_aws_lambda_function/lambda_code/setuptools/dep_util.py#L6-L23
cylc/cylc-flow
5ec221143476c7c616c156b74158edfbcd83794a
cylc/flow/network/graphql.py
python
AstDocArguments.__init__
(self, schema, document_ast, variable_values)
[]
def __init__(self, schema, document_ast, variable_values): self.schema = schema self.operation_defs = {} self.fragment_defs = {} self.visited_fragments = set() for defn in document_ast.definitions: if isinstance(defn, ast.OperationDefinition): root_type = get_operation_root_type(schema, defn) definition_variables = defn.variable_definitions or [] if definition_variables: def_var_names = { v.variable.name.value for v in definition_variables } var_names_diff = def_var_names.difference({ k for k in variable_values if k in def_var_names }) # check if we are missing some of the definition variables if var_names_diff: msg = (f'Please check your query variables. The ' f'following variables are missing: ' f'[{", ".join(var_names_diff)}]') raise ValueError(msg) self.operation_defs[getattr(defn.name, 'value', root_type)] = { 'definition': defn, 'parent_type': root_type, 'variables': get_variable_values( schema, definition_variables, variable_values ), } elif isinstance(defn, ast.FragmentDefinition): self.fragment_defs[defn.name.value] = defn
[ "def", "__init__", "(", "self", ",", "schema", ",", "document_ast", ",", "variable_values", ")", ":", "self", ".", "schema", "=", "schema", "self", ".", "operation_defs", "=", "{", "}", "self", ".", "fragment_defs", "=", "{", "}", "self", ".", "visited_fragments", "=", "set", "(", ")", "for", "defn", "in", "document_ast", ".", "definitions", ":", "if", "isinstance", "(", "defn", ",", "ast", ".", "OperationDefinition", ")", ":", "root_type", "=", "get_operation_root_type", "(", "schema", ",", "defn", ")", "definition_variables", "=", "defn", ".", "variable_definitions", "or", "[", "]", "if", "definition_variables", ":", "def_var_names", "=", "{", "v", ".", "variable", ".", "name", ".", "value", "for", "v", "in", "definition_variables", "}", "var_names_diff", "=", "def_var_names", ".", "difference", "(", "{", "k", "for", "k", "in", "variable_values", "if", "k", "in", "def_var_names", "}", ")", "# check if we are missing some of the definition variables", "if", "var_names_diff", ":", "msg", "=", "(", "f'Please check your query variables. The '", "f'following variables are missing: '", "f'[{\", \".join(var_names_diff)}]'", ")", "raise", "ValueError", "(", "msg", ")", "self", ".", "operation_defs", "[", "getattr", "(", "defn", ".", "name", ",", "'value'", ",", "root_type", ")", "]", "=", "{", "'definition'", ":", "defn", ",", "'parent_type'", ":", "root_type", ",", "'variables'", ":", "get_variable_values", "(", "schema", ",", "definition_variables", ",", "variable_values", ")", ",", "}", "elif", "isinstance", "(", "defn", ",", "ast", ".", "FragmentDefinition", ")", ":", "self", ".", "fragment_defs", "[", "defn", ".", "name", ".", "value", "]", "=", "defn" ]
https://github.com/cylc/cylc-flow/blob/5ec221143476c7c616c156b74158edfbcd83794a/cylc/flow/network/graphql.py#L146-L182
aneisch/home-assistant-config
86e381fde9609cb8871c439c433c12989e4e225d
custom_components/alexa_media/switch.py
python
AlexaMediaSwitch.device_info
(self)
return { "identifiers": {(ALEXA_DOMAIN, self._client.unique_id)}, "via_device": (ALEXA_DOMAIN, self._client.unique_id), }
Return device_info for device registry.
Return device_info for device registry.
[ "Return", "device_info", "for", "device", "registry", "." ]
def device_info(self): """Return device_info for device registry.""" return { "identifiers": {(ALEXA_DOMAIN, self._client.unique_id)}, "via_device": (ALEXA_DOMAIN, self._client.unique_id), }
[ "def", "device_info", "(", "self", ")", ":", "return", "{", "\"identifiers\"", ":", "{", "(", "ALEXA_DOMAIN", ",", "self", ".", "_client", ".", "unique_id", ")", "}", ",", "\"via_device\"", ":", "(", "ALEXA_DOMAIN", ",", "self", ".", "_client", ".", "unique_id", ")", ",", "}" ]
https://github.com/aneisch/home-assistant-config/blob/86e381fde9609cb8871c439c433c12989e4e225d/custom_components/alexa_media/switch.py#L280-L285
Esri/ArcREST
ab240fde2b0200f61d4a5f6df033516e53f2f416
src/arcrest/ags/_geoprocessing.py
python
GPService.resultMapServerName
(self)
return self._resultMapServerName
returns the result mapserver name
returns the result mapserver name
[ "returns", "the", "result", "mapserver", "name" ]
def resultMapServerName(self): """ returns the result mapserver name """ if self._resultMapServerName is None: self.__init() return self._resultMapServerName
[ "def", "resultMapServerName", "(", "self", ")", ":", "if", "self", ".", "_resultMapServerName", "is", "None", ":", "self", ".", "__init", "(", ")", "return", "self", ".", "_resultMapServerName" ]
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_geoprocessing.py#L125-L129
snapcore/snapcraft
b81550376df7f2d0dfe65f7bfb006a3107252450
snapcraft/internal/repo/apt_sources_manager.py
python
AptSourcesManager._install_sources_ppa
( self, *, package_repo: package_repository.PackageRepositoryAptPpa )
return self._install_sources( components=["main"], formats=["deb"], name=f"ppa-{owner}_{name}", suites=[codename], url=f"http://ppa.launchpad.net/{owner}/{name}/ubuntu", )
Install PPA formatted repository. Create a sources list config by: - Looking up the codename of the host OS and using it as the "suites" entry. - Formulate deb URL to point to PPA. - Enable only "deb" formats. :returns: True if source configuration was changed.
Install PPA formatted repository.
[ "Install", "PPA", "formatted", "repository", "." ]
def _install_sources_ppa( self, *, package_repo: package_repository.PackageRepositoryAptPpa ) -> bool: """Install PPA formatted repository. Create a sources list config by: - Looking up the codename of the host OS and using it as the "suites" entry. - Formulate deb URL to point to PPA. - Enable only "deb" formats. :returns: True if source configuration was changed. """ owner, name = apt_ppa.split_ppa_parts(ppa=package_repo.ppa) codename = os_release.OsRelease().version_codename() return self._install_sources( components=["main"], formats=["deb"], name=f"ppa-{owner}_{name}", suites=[codename], url=f"http://ppa.launchpad.net/{owner}/{name}/ubuntu", )
[ "def", "_install_sources_ppa", "(", "self", ",", "*", ",", "package_repo", ":", "package_repository", ".", "PackageRepositoryAptPpa", ")", "->", "bool", ":", "owner", ",", "name", "=", "apt_ppa", ".", "split_ppa_parts", "(", "ppa", "=", "package_repo", ".", "ppa", ")", "codename", "=", "os_release", ".", "OsRelease", "(", ")", ".", "version_codename", "(", ")", "return", "self", ".", "_install_sources", "(", "components", "=", "[", "\"main\"", "]", ",", "formats", "=", "[", "\"deb\"", "]", ",", "name", "=", "f\"ppa-{owner}_{name}\"", ",", "suites", "=", "[", "codename", "]", ",", "url", "=", "f\"http://ppa.launchpad.net/{owner}/{name}/ubuntu\"", ",", ")" ]
https://github.com/snapcore/snapcraft/blob/b81550376df7f2d0dfe65f7bfb006a3107252450/snapcraft/internal/repo/apt_sources_manager.py#L204-L226
gramaziokohler/roslibpy
92b97d6daa78d30384e3a347d46be51c6e7fbd01
src/roslibpy/core.py
python
Time.nsecs
(self)
return self.data['nsecs']
Nanoseconds since seconds.
Nanoseconds since seconds.
[ "Nanoseconds", "since", "seconds", "." ]
def nsecs(self): """Nanoseconds since seconds.""" return self.data['nsecs']
[ "def", "nsecs", "(", "self", ")", ":", "return", "self", ".", "data", "[", "'nsecs'", "]" ]
https://github.com/gramaziokohler/roslibpy/blob/92b97d6daa78d30384e3a347d46be51c6e7fbd01/src/roslibpy/core.py#L56-L58
DataBiosphere/toil
2e148eee2114ece8dcc3ec8a83f36333266ece0d
src/toil/job.py
python
Job.checkJobGraphAcylic
(self)
:raises toil.job.JobGraphDeadlockException: if the connected component \ of jobs containing this job contains any cycles of child/followOn dependencies \ in the *augmented job graph* (see below). Such cycles are not allowed \ in valid job graphs. A follow-on edge (A, B) between two jobs A and B is equivalent \ to adding a child edge to B from (1) A, (2) from each child of A, \ and (3) from the successors of each child of A. We call each such edge \ an edge an "implied" edge. The augmented job graph is a job graph including \ all the implied edges. For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \ a graph with no follow-ons. The former follow-on case could be improved! Only deals with jobs created here, rather than loaded from the job store.
:raises toil.job.JobGraphDeadlockException: if the connected component \ of jobs containing this job contains any cycles of child/followOn dependencies \ in the *augmented job graph* (see below). Such cycles are not allowed \ in valid job graphs.
[ ":", "raises", "toil", ".", "job", ".", "JobGraphDeadlockException", ":", "if", "the", "connected", "component", "\\", "of", "jobs", "containing", "this", "job", "contains", "any", "cycles", "of", "child", "/", "followOn", "dependencies", "\\", "in", "the", "*", "augmented", "job", "graph", "*", "(", "see", "below", ")", ".", "Such", "cycles", "are", "not", "allowed", "\\", "in", "valid", "job", "graphs", "." ]
def checkJobGraphAcylic(self): """ :raises toil.job.JobGraphDeadlockException: if the connected component \ of jobs containing this job contains any cycles of child/followOn dependencies \ in the *augmented job graph* (see below). Such cycles are not allowed \ in valid job graphs. A follow-on edge (A, B) between two jobs A and B is equivalent \ to adding a child edge to B from (1) A, (2) from each child of A, \ and (3) from the successors of each child of A. We call each such edge \ an edge an "implied" edge. The augmented job graph is a job graph including \ all the implied edges. For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \ a graph with no follow-ons. The former follow-on case could be improved! Only deals with jobs created here, rather than loaded from the job store. """ #Get the root jobs roots = self.getRootJobs() if len(roots) == 0: raise JobGraphDeadlockException("Graph contains no root jobs due to cycles") #Get implied edges extraEdges = self._getImpliedEdges(roots) #Check for directed cycles in the augmented graph visited = set() for root in roots: root._checkJobGraphAcylicDFS([], visited, extraEdges)
[ "def", "checkJobGraphAcylic", "(", "self", ")", ":", "#Get the root jobs", "roots", "=", "self", ".", "getRootJobs", "(", ")", "if", "len", "(", "roots", ")", "==", "0", ":", "raise", "JobGraphDeadlockException", "(", "\"Graph contains no root jobs due to cycles\"", ")", "#Get implied edges", "extraEdges", "=", "self", ".", "_getImpliedEdges", "(", "roots", ")", "#Check for directed cycles in the augmented graph", "visited", "=", "set", "(", ")", "for", "root", "in", "roots", ":", "root", ".", "_checkJobGraphAcylicDFS", "(", "[", "]", ",", "visited", ",", "extraEdges", ")" ]
https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/job.py#L1585-L1614
iGio90/Dwarf
bb3011cdffd209c7e3f5febe558053bf649ca69c
dwarf_debugger/ui/panels/panel_objc_inspector.py
python
ObjCInspector._on_module_contextmenu
(self, pos)
Module ContextMenu
Module ContextMenu
[ "Module", "ContextMenu" ]
def _on_module_contextmenu(self, pos): """ Module ContextMenu """ index = self._ObjC_modules.indexAt(pos).row() glbl_pt = self._ObjC_modules.mapToGlobal(pos) context_menu = QMenu(self) context_menu.addAction('Refresh', self._enumerate_objc_modules) context_menu.exec_(glbl_pt)
[ "def", "_on_module_contextmenu", "(", "self", ",", "pos", ")", ":", "index", "=", "self", ".", "_ObjC_modules", ".", "indexAt", "(", "pos", ")", ".", "row", "(", ")", "glbl_pt", "=", "self", ".", "_ObjC_modules", ".", "mapToGlobal", "(", "pos", ")", "context_menu", "=", "QMenu", "(", "self", ")", "context_menu", ".", "addAction", "(", "'Refresh'", ",", "self", ".", "_enumerate_objc_modules", ")", "context_menu", ".", "exec_", "(", "glbl_pt", ")" ]
https://github.com/iGio90/Dwarf/blob/bb3011cdffd209c7e3f5febe558053bf649ca69c/dwarf_debugger/ui/panels/panel_objc_inspector.py#L257-L265
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/email/header.py
python
decode_header
(header)
return collapsed
Decode a message header value without converting charset. Returns a list of (string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. header may be a string that may or may not contain RFC2047 encoded words, or it may be a Header object. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception).
Decode a message header value without converting charset.
[ "Decode", "a", "message", "header", "value", "without", "converting", "charset", "." ]
def decode_header(header): """Decode a message header value without converting charset. Returns a list of (string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. header may be a string that may or may not contain RFC2047 encoded words, or it may be a Header object. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception). """ # If it is a Header object, we can just return the encoded chunks. if hasattr(header, '_chunks'): return [(_charset._encode(string, str(charset)), str(charset)) for string, charset in header._chunks] # If no encoding, just return the header with no charset. if not ecre.search(header): return [(header, None)] # First step is to parse all the encoded parts into triplets of the form # (encoded_string, encoding, charset). For unencoded strings, the last # two parts will be None. words = [] for line in header.splitlines(): parts = ecre.split(line) first = True while parts: unencoded = parts.pop(0) if first: unencoded = unencoded.lstrip() first = False if unencoded: words.append((unencoded, None, None)) if parts: charset = parts.pop(0).lower() encoding = parts.pop(0).lower() encoded = parts.pop(0) words.append((encoded, encoding, charset)) # Now loop over words and remove words that consist of whitespace # between two encoded strings. droplist = [] for n, w in enumerate(words): if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace(): droplist.append(n-1) for d in reversed(droplist): del words[d] # The next step is to decode each encoded word by applying the reverse # base64 or quopri transformation. decoded_words is now a list of the # form (decoded_word, charset). decoded_words = [] for encoded_string, encoding, charset in words: if encoding is None: # This is an unencoded word. decoded_words.append((encoded_string, charset)) elif encoding == 'q': word = email.quoprimime.header_decode(encoded_string) decoded_words.append((word, charset)) elif encoding == 'b': paderr = len(encoded_string) % 4 # Postel's law: add missing padding if paderr: encoded_string += '==='[:4 - paderr] try: word = email.base64mime.decode(encoded_string) except binascii.Error: raise HeaderParseError('Base64 decoding error') else: decoded_words.append((word, charset)) else: raise AssertionError('Unexpected encoding: ' + encoding) # Now convert all words to bytes and collapse consecutive runs of # similarly encoded words. collapsed = [] last_word = last_charset = None for word, charset in decoded_words: if isinstance(word, str): word = bytes(word, 'raw-unicode-escape') if last_word is None: last_word = word last_charset = charset elif charset != last_charset: collapsed.append((last_word, last_charset)) last_word = word last_charset = charset elif last_charset is None: last_word += BSPACE + word else: last_word += word collapsed.append((last_word, last_charset)) return collapsed
[ "def", "decode_header", "(", "header", ")", ":", "# If it is a Header object, we can just return the encoded chunks.", "if", "hasattr", "(", "header", ",", "'_chunks'", ")", ":", "return", "[", "(", "_charset", ".", "_encode", "(", "string", ",", "str", "(", "charset", ")", ")", ",", "str", "(", "charset", ")", ")", "for", "string", ",", "charset", "in", "header", ".", "_chunks", "]", "# If no encoding, just return the header with no charset.", "if", "not", "ecre", ".", "search", "(", "header", ")", ":", "return", "[", "(", "header", ",", "None", ")", "]", "# First step is to parse all the encoded parts into triplets of the form", "# (encoded_string, encoding, charset). For unencoded strings, the last", "# two parts will be None.", "words", "=", "[", "]", "for", "line", "in", "header", ".", "splitlines", "(", ")", ":", "parts", "=", "ecre", ".", "split", "(", "line", ")", "first", "=", "True", "while", "parts", ":", "unencoded", "=", "parts", ".", "pop", "(", "0", ")", "if", "first", ":", "unencoded", "=", "unencoded", ".", "lstrip", "(", ")", "first", "=", "False", "if", "unencoded", ":", "words", ".", "append", "(", "(", "unencoded", ",", "None", ",", "None", ")", ")", "if", "parts", ":", "charset", "=", "parts", ".", "pop", "(", "0", ")", ".", "lower", "(", ")", "encoding", "=", "parts", ".", "pop", "(", "0", ")", ".", "lower", "(", ")", "encoded", "=", "parts", ".", "pop", "(", "0", ")", "words", ".", "append", "(", "(", "encoded", ",", "encoding", ",", "charset", ")", ")", "# Now loop over words and remove words that consist of whitespace", "# between two encoded strings.", "droplist", "=", "[", "]", "for", "n", ",", "w", "in", "enumerate", "(", "words", ")", ":", "if", "n", ">", "1", "and", "w", "[", "1", "]", "and", "words", "[", "n", "-", "2", "]", "[", "1", "]", "and", "words", "[", "n", "-", "1", "]", "[", "0", "]", ".", "isspace", "(", ")", ":", "droplist", ".", "append", "(", "n", "-", "1", ")", "for", "d", "in", "reversed", "(", "droplist", ")", ":", "del", "words", "[", "d", "]", "# The next step is to decode each encoded word by applying the reverse", "# base64 or quopri transformation. decoded_words is now a list of the", "# form (decoded_word, charset).", "decoded_words", "=", "[", "]", "for", "encoded_string", ",", "encoding", ",", "charset", "in", "words", ":", "if", "encoding", "is", "None", ":", "# This is an unencoded word.", "decoded_words", ".", "append", "(", "(", "encoded_string", ",", "charset", ")", ")", "elif", "encoding", "==", "'q'", ":", "word", "=", "email", ".", "quoprimime", ".", "header_decode", "(", "encoded_string", ")", "decoded_words", ".", "append", "(", "(", "word", ",", "charset", ")", ")", "elif", "encoding", "==", "'b'", ":", "paderr", "=", "len", "(", "encoded_string", ")", "%", "4", "# Postel's law: add missing padding", "if", "paderr", ":", "encoded_string", "+=", "'==='", "[", ":", "4", "-", "paderr", "]", "try", ":", "word", "=", "email", ".", "base64mime", ".", "decode", "(", "encoded_string", ")", "except", "binascii", ".", "Error", ":", "raise", "HeaderParseError", "(", "'Base64 decoding error'", ")", "else", ":", "decoded_words", ".", "append", "(", "(", "word", ",", "charset", ")", ")", "else", ":", "raise", "AssertionError", "(", "'Unexpected encoding: '", "+", "encoding", ")", "# Now convert all words to bytes and collapse consecutive runs of", "# similarly encoded words.", "collapsed", "=", "[", "]", "last_word", "=", "last_charset", "=", "None", "for", "word", ",", "charset", "in", "decoded_words", ":", "if", "isinstance", "(", "word", ",", "str", ")", ":", "word", "=", "bytes", "(", "word", ",", "'raw-unicode-escape'", ")", "if", "last_word", "is", "None", ":", "last_word", "=", "word", "last_charset", "=", "charset", "elif", "charset", "!=", "last_charset", ":", "collapsed", ".", "append", "(", "(", "last_word", ",", "last_charset", ")", ")", "last_word", "=", "word", "last_charset", "=", "charset", "elif", "last_charset", "is", "None", ":", "last_word", "+=", "BSPACE", "+", "word", "else", ":", "last_word", "+=", "word", "collapsed", ".", "append", "(", "(", "last_word", ",", "last_charset", ")", ")", "return", "collapsed" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/email/header.py#L61-L152
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/site.py
python
addusersitepackages
(known_paths)
return known_paths
Add a per user site-package to sys.path Each user has its own python directory with site-packages in the home directory.
Add a per user site-package to sys.path
[ "Add", "a", "per", "user", "site", "-", "package", "to", "sys", ".", "path" ]
def addusersitepackages(known_paths): """Add a per user site-package to sys.path Each user has its own python directory with site-packages in the home directory. """ # get the per user site-package path # this call will also make sure USER_BASE and USER_SITE are set user_site = getusersitepackages() if ENABLE_USER_SITE and os.path.isdir(user_site): addsitedir(user_site, known_paths) return known_paths
[ "def", "addusersitepackages", "(", "known_paths", ")", ":", "# get the per user site-package path", "# this call will also make sure USER_BASE and USER_SITE are set", "user_site", "=", "getusersitepackages", "(", ")", "if", "ENABLE_USER_SITE", "and", "os", ".", "path", ".", "isdir", "(", "user_site", ")", ":", "addsitedir", "(", "user_site", ",", "known_paths", ")", "return", "known_paths" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site.py#L265-L277
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/full/gzip.py
python
main
()
[]
def main(): from argparse import ArgumentParser parser = ArgumentParser(description= "A simple command line interface for the gzip module: act like gzip, " "but do not delete the input file.") group = parser.add_mutually_exclusive_group() group.add_argument('--fast', action='store_true', help='compress faster') group.add_argument('--best', action='store_true', help='compress better') group.add_argument("-d", "--decompress", action="store_true", help="act like gunzip instead of gzip") parser.add_argument("args", nargs="*", default=["-"], metavar='file') args = parser.parse_args() compresslevel = _COMPRESS_LEVEL_TRADEOFF if args.fast: compresslevel = _COMPRESS_LEVEL_FAST elif args.best: compresslevel = _COMPRESS_LEVEL_BEST for arg in args.args: if args.decompress: if arg == "-": f = GzipFile(filename="", mode="rb", fileobj=sys.stdin.buffer) g = sys.stdout.buffer else: if arg[-3:] != ".gz": sys.exit(f"filename doesn't end in .gz: {arg!r}") f = open(arg, "rb") g = builtins.open(arg[:-3], "wb") else: if arg == "-": f = sys.stdin.buffer g = GzipFile(filename="", mode="wb", fileobj=sys.stdout.buffer, compresslevel=compresslevel) else: f = builtins.open(arg, "rb") g = open(arg + ".gz", "wb") while True: chunk = f.read(io.DEFAULT_BUFFER_SIZE) if not chunk: break g.write(chunk) if g is not sys.stdout.buffer: g.close() if f is not sys.stdin.buffer: f.close()
[ "def", "main", "(", ")", ":", "from", "argparse", "import", "ArgumentParser", "parser", "=", "ArgumentParser", "(", "description", "=", "\"A simple command line interface for the gzip module: act like gzip, \"", "\"but do not delete the input file.\"", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "group", ".", "add_argument", "(", "'--fast'", ",", "action", "=", "'store_true'", ",", "help", "=", "'compress faster'", ")", "group", ".", "add_argument", "(", "'--best'", ",", "action", "=", "'store_true'", ",", "help", "=", "'compress better'", ")", "group", ".", "add_argument", "(", "\"-d\"", ",", "\"--decompress\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"act like gunzip instead of gzip\"", ")", "parser", ".", "add_argument", "(", "\"args\"", ",", "nargs", "=", "\"*\"", ",", "default", "=", "[", "\"-\"", "]", ",", "metavar", "=", "'file'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "compresslevel", "=", "_COMPRESS_LEVEL_TRADEOFF", "if", "args", ".", "fast", ":", "compresslevel", "=", "_COMPRESS_LEVEL_FAST", "elif", "args", ".", "best", ":", "compresslevel", "=", "_COMPRESS_LEVEL_BEST", "for", "arg", "in", "args", ".", "args", ":", "if", "args", ".", "decompress", ":", "if", "arg", "==", "\"-\"", ":", "f", "=", "GzipFile", "(", "filename", "=", "\"\"", ",", "mode", "=", "\"rb\"", ",", "fileobj", "=", "sys", ".", "stdin", ".", "buffer", ")", "g", "=", "sys", ".", "stdout", ".", "buffer", "else", ":", "if", "arg", "[", "-", "3", ":", "]", "!=", "\".gz\"", ":", "sys", ".", "exit", "(", "f\"filename doesn't end in .gz: {arg!r}\"", ")", "f", "=", "open", "(", "arg", ",", "\"rb\"", ")", "g", "=", "builtins", ".", "open", "(", "arg", "[", ":", "-", "3", "]", ",", "\"wb\"", ")", "else", ":", "if", "arg", "==", "\"-\"", ":", "f", "=", "sys", ".", "stdin", ".", "buffer", "g", "=", "GzipFile", "(", "filename", "=", "\"\"", ",", "mode", "=", "\"wb\"", ",", "fileobj", "=", "sys", ".", "stdout", ".", "buffer", ",", "compresslevel", "=", "compresslevel", ")", "else", ":", "f", "=", "builtins", ".", "open", "(", "arg", ",", "\"rb\"", ")", "g", "=", "open", "(", "arg", "+", "\".gz\"", ",", "\"wb\"", ")", "while", "True", ":", "chunk", "=", "f", ".", "read", "(", "io", ".", "DEFAULT_BUFFER_SIZE", ")", "if", "not", "chunk", ":", "break", "g", ".", "write", "(", "chunk", ")", "if", "g", "is", "not", "sys", ".", "stdout", ".", "buffer", ":", "g", ".", "close", "(", ")", "if", "f", "is", "not", "sys", ".", "stdin", ".", "buffer", ":", "f", ".", "close", "(", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/gzip.py#L560-L606
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/rcsetup.py
python
validate_bool_maybe_none
(b)
Convert b to a boolean or raise
Convert b to a boolean or raise
[ "Convert", "b", "to", "a", "boolean", "or", "raise" ]
def validate_bool_maybe_none(b): 'Convert b to a boolean or raise' if type(b) is str: b = b.lower() if b == 'none': return None if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False else: raise ValueError('Could not convert "%s" to boolean' % b)
[ "def", "validate_bool_maybe_none", "(", "b", ")", ":", "if", "type", "(", "b", ")", "is", "str", ":", "b", "=", "b", ".", "lower", "(", ")", "if", "b", "==", "'none'", ":", "return", "None", "if", "b", "in", "(", "'t'", ",", "'y'", ",", "'yes'", ",", "'on'", ",", "'true'", ",", "'1'", ",", "1", ",", "True", ")", ":", "return", "True", "elif", "b", "in", "(", "'f'", ",", "'n'", ",", "'no'", ",", "'off'", ",", "'false'", ",", "'0'", ",", "0", ",", "False", ")", ":", "return", "False", "else", ":", "raise", "ValueError", "(", "'Could not convert \"%s\" to boolean'", "%", "b", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/rcsetup.py#L85-L96
Runbook/runbook
7b68622f75ef09f654046f0394540025f3ee7445
src/actions/actions/digitalocean-new-droplet/__init__.py
python
CallDO
(api_key, data)
return True
[]
def CallDO(api_key, data): headers = { 'Authorization': 'Bearer ' + api_key, 'Content-Type': 'application/json', } payload = json.dumps(data) r = requests.post(_DO_CREATE_DROPLET_URL, timeout=_HTTP_TIMEOUT, data=payload, headers=headers, verify=True) assert r.status_code >= 200 and r.status_code < 400, \ 'Invalid HTTP status code received: %d - %s.' % (r.status_code, r.text) return True
[ "def", "CallDO", "(", "api_key", ",", "data", ")", ":", "headers", "=", "{", "'Authorization'", ":", "'Bearer '", "+", "api_key", ",", "'Content-Type'", ":", "'application/json'", ",", "}", "payload", "=", "json", ".", "dumps", "(", "data", ")", "r", "=", "requests", ".", "post", "(", "_DO_CREATE_DROPLET_URL", ",", "timeout", "=", "_HTTP_TIMEOUT", ",", "data", "=", "payload", ",", "headers", "=", "headers", ",", "verify", "=", "True", ")", "assert", "r", ".", "status_code", ">=", "200", "and", "r", ".", "status_code", "<", "400", ",", "'Invalid HTTP status code received: %d - %s.'", "%", "(", "r", ".", "status_code", ",", "r", ".", "text", ")", "return", "True" ]
https://github.com/Runbook/runbook/blob/7b68622f75ef09f654046f0394540025f3ee7445/src/actions/actions/digitalocean-new-droplet/__init__.py#L39-L49
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/django/contrib/gis/maps/google/zoom.py
python
GoogleZoom.lonlat_to_pixel
(self, lonlat, zoom)
return (px_x, px_y)
Converts a longitude, latitude coordinate pair for the given zoom level.
Converts a longitude, latitude coordinate pair for the given zoom level.
[ "Converts", "a", "longitude", "latitude", "coordinate", "pair", "for", "the", "given", "zoom", "level", "." ]
def lonlat_to_pixel(self, lonlat, zoom): "Converts a longitude, latitude coordinate pair for the given zoom level." # Setting up, unpacking the longitude, latitude values and getting the # number of pixels for the given zoom level. lon, lat = self.get_lon_lat(lonlat) npix = self._npix[zoom] # Calculating the pixel x coordinate by multiplying the longitude value # with the number of degrees/pixel at the given zoom level. px_x = round(npix + (lon * self._degpp[zoom])) # Creating the factor, and ensuring that 1 or -1 is not passed in as the # base to the logarithm. Here's why: # if fac = -1, we'll get log(0) which is undefined; # if fac = 1, our logarithm base will be divided by 0, also undefined. fac = min(max(sin(DTOR * lat), -0.9999), 0.9999) # Calculating the pixel y coordinate. px_y = round(npix + (0.5 * log((1 + fac) / (1 - fac)) * (-1.0 * self._radpp[zoom]))) # Returning the pixel x, y to the caller of the function. return (px_x, px_y)
[ "def", "lonlat_to_pixel", "(", "self", ",", "lonlat", ",", "zoom", ")", ":", "# Setting up, unpacking the longitude, latitude values and getting the", "# number of pixels for the given zoom level.", "lon", ",", "lat", "=", "self", ".", "get_lon_lat", "(", "lonlat", ")", "npix", "=", "self", ".", "_npix", "[", "zoom", "]", "# Calculating the pixel x coordinate by multiplying the longitude value", "# with the number of degrees/pixel at the given zoom level.", "px_x", "=", "round", "(", "npix", "+", "(", "lon", "*", "self", ".", "_degpp", "[", "zoom", "]", ")", ")", "# Creating the factor, and ensuring that 1 or -1 is not passed in as the", "# base to the logarithm. Here's why:", "# if fac = -1, we'll get log(0) which is undefined;", "# if fac = 1, our logarithm base will be divided by 0, also undefined.", "fac", "=", "min", "(", "max", "(", "sin", "(", "DTOR", "*", "lat", ")", ",", "-", "0.9999", ")", ",", "0.9999", ")", "# Calculating the pixel y coordinate.", "px_y", "=", "round", "(", "npix", "+", "(", "0.5", "*", "log", "(", "(", "1", "+", "fac", ")", "/", "(", "1", "-", "fac", ")", ")", "*", "(", "-", "1.0", "*", "self", ".", "_radpp", "[", "zoom", "]", ")", ")", ")", "# Returning the pixel x, y to the caller of the function.", "return", "(", "px_x", ",", "px_y", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/contrib/gis/maps/google/zoom.py#L67-L88
tdeboissiere/Kaggle
10ea42a001e55186e11d065a9156d8420e9ff6a5
Ponpare/Ponpare_submission/supervised_learning.py
python
fit_SVM
()
return d_user_pred, list_user, list_coupon
Fit an SVMRank model (from pysofia) returns: d_user_pred, list_user, list_coupon list_coupon = list of test coupons list_user = list of user ID d_user_pred : key = user, value = predicted ranking of coupons in list_coupon
Fit an SVMRank model (from pysofia)
[ "Fit", "an", "SVMRank", "model", "(", "from", "pysofia", ")" ]
def fit_SVM(): """Fit an SVMRank model (from pysofia) returns: d_user_pred, list_user, list_coupon list_coupon = list of test coupons list_user = list of user ID d_user_pred : key = user, value = predicted ranking of coupons in list_coupon """ #Get data for classification X_train, y_train, d_info = prepare_data() list_user = d_info["list_user"] list_coupon = d_info["list_coupon"] no_cpt = d_info["no_cpt"] mapper = d_info["mapper"] list_col_xgb = d_info["list_col_xgb"] list_col_mapper = d_info["list_col_mapper"] # Create RankSVM RSVM = RankSVM(max_iter=10, alpha = 1) RSVM.fit(X_train, y_train) #Store predictions in a dict d_user_pred = {} #Load test data by chunks to avoid memory issues for index, test in enumerate(pd.read_csv("../Data/Data_translated/test_supervised_learning.csv", chunksize=1000*no_cpt)) : sys.stdout.write("\rProcessing row " + str(index*1000*no_cpt)+" to row "+str((index+1)*1000*no_cpt)) sys.stdout.flush() test = test.fillna(-1) temp_list_user = test["USER_ID_hash"].drop_duplicates().values test = mapper.transform(test) test = pd.DataFrame(test, index = None, columns = list_col_mapper ) test = test[list_col_xgb] X_test = test.values y_test = RSVM.rank(X_test) for i in range(min(1000, len(temp_list_user))): user = temp_list_user[i] d_user_pred[user] = y_test[i*no_cpt: (i+1)*no_cpt] print # Compute score for users, apply MinMaxScaler for blending later on for i, user in enumerate(list_user) : list_pred = d_user_pred[user] MMS = MinMaxScaler() pred = MMS.fit_transform(np.ravel(list_pred).astype(float)) d_user_pred[user] = pred # Pickle the predictions for future use d_pred = {"list_coupon" : list_coupon.tolist(), "d_user_pred" : d_user_pred} with open("../Data/Data_translated/d_pred_SVM.pickle", "w") as f: pickle.dump(d_pred, f, protocol = pickle.HIGHEST_PROTOCOL) return d_user_pred, list_user, list_coupon
[ "def", "fit_SVM", "(", ")", ":", "#Get data for classification", "X_train", ",", "y_train", ",", "d_info", "=", "prepare_data", "(", ")", "list_user", "=", "d_info", "[", "\"list_user\"", "]", "list_coupon", "=", "d_info", "[", "\"list_coupon\"", "]", "no_cpt", "=", "d_info", "[", "\"no_cpt\"", "]", "mapper", "=", "d_info", "[", "\"mapper\"", "]", "list_col_xgb", "=", "d_info", "[", "\"list_col_xgb\"", "]", "list_col_mapper", "=", "d_info", "[", "\"list_col_mapper\"", "]", "# Create RankSVM", "RSVM", "=", "RankSVM", "(", "max_iter", "=", "10", ",", "alpha", "=", "1", ")", "RSVM", ".", "fit", "(", "X_train", ",", "y_train", ")", "#Store predictions in a dict", "d_user_pred", "=", "{", "}", "#Load test data by chunks to avoid memory issues", "for", "index", ",", "test", "in", "enumerate", "(", "pd", ".", "read_csv", "(", "\"../Data/Data_translated/test_supervised_learning.csv\"", ",", "chunksize", "=", "1000", "*", "no_cpt", ")", ")", ":", "sys", ".", "stdout", ".", "write", "(", "\"\\rProcessing row \"", "+", "str", "(", "index", "*", "1000", "*", "no_cpt", ")", "+", "\" to row \"", "+", "str", "(", "(", "index", "+", "1", ")", "*", "1000", "*", "no_cpt", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "test", "=", "test", ".", "fillna", "(", "-", "1", ")", "temp_list_user", "=", "test", "[", "\"USER_ID_hash\"", "]", ".", "drop_duplicates", "(", ")", ".", "values", "test", "=", "mapper", ".", "transform", "(", "test", ")", "test", "=", "pd", ".", "DataFrame", "(", "test", ",", "index", "=", "None", ",", "columns", "=", "list_col_mapper", ")", "test", "=", "test", "[", "list_col_xgb", "]", "X_test", "=", "test", ".", "values", "y_test", "=", "RSVM", ".", "rank", "(", "X_test", ")", "for", "i", "in", "range", "(", "min", "(", "1000", ",", "len", "(", "temp_list_user", ")", ")", ")", ":", "user", "=", "temp_list_user", "[", "i", "]", "d_user_pred", "[", "user", "]", "=", "y_test", "[", "i", "*", "no_cpt", ":", "(", "i", "+", "1", ")", "*", "no_cpt", "]", "print", "# Compute score for users, apply MinMaxScaler for blending later on", "for", "i", ",", "user", "in", "enumerate", "(", "list_user", ")", ":", "list_pred", "=", "d_user_pred", "[", "user", "]", "MMS", "=", "MinMaxScaler", "(", ")", "pred", "=", "MMS", ".", "fit_transform", "(", "np", ".", "ravel", "(", "list_pred", ")", ".", "astype", "(", "float", ")", ")", "d_user_pred", "[", "user", "]", "=", "pred", "# Pickle the predictions for future use", "d_pred", "=", "{", "\"list_coupon\"", ":", "list_coupon", ".", "tolist", "(", ")", ",", "\"d_user_pred\"", ":", "d_user_pred", "}", "with", "open", "(", "\"../Data/Data_translated/d_pred_SVM.pickle\"", ",", "\"w\"", ")", "as", "f", ":", "pickle", ".", "dump", "(", "d_pred", ",", "f", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", "return", "d_user_pred", ",", "list_user", ",", "list_coupon" ]
https://github.com/tdeboissiere/Kaggle/blob/10ea42a001e55186e11d065a9156d8420e9ff6a5/Ponpare/Ponpare_submission/supervised_learning.py#L273-L327
CalebBell/thermo
572a47d1b03d49fe609b8d5f826fa6a7cde00828
thermo/mixture.py
python
Mixture.JTls
(self)
return [i.JTl for i in self.Chemicals]
r'''Pure component Joule Thomson coefficients of the chemicals in the mixture in the liquid phase at its current temperature and pressure, in units of [K/Pa]. .. math:: \mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p} \left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right] = \frac{V}{C_p}\left(\beta T-1\right) Examples -------- >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).JTls [-3.8633730709853161e-07, -3.464395792560331e-07]
r'''Pure component Joule Thomson coefficients of the chemicals in the mixture in the liquid phase at its current temperature and pressure, in units of [K/Pa].
[ "r", "Pure", "component", "Joule", "Thomson", "coefficients", "of", "the", "chemicals", "in", "the", "mixture", "in", "the", "liquid", "phase", "at", "its", "current", "temperature", "and", "pressure", "in", "units", "of", "[", "K", "/", "Pa", "]", "." ]
def JTls(self): r'''Pure component Joule Thomson coefficients of the chemicals in the mixture in the liquid phase at its current temperature and pressure, in units of [K/Pa]. .. math:: \mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p} \left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right] = \frac{V}{C_p}\left(\beta T-1\right) Examples -------- >>> Mixture(['benzene', 'hexane'], ws=[0.5, 0.5], T=320).JTls [-3.8633730709853161e-07, -3.464395792560331e-07] ''' return [i.JTl for i in self.Chemicals]
[ "def", "JTls", "(", "self", ")", ":", "return", "[", "i", ".", "JTl", "for", "i", "in", "self", ".", "Chemicals", "]" ]
https://github.com/CalebBell/thermo/blob/572a47d1b03d49fe609b8d5f826fa6a7cde00828/thermo/mixture.py#L2040-L2055
dmlc/gluon-cv
709bc139919c02f7454cb411311048be188cde64
gluoncv/auto/data/dataset.py
python
ImageClassificationDataset.show_images
(self, indices=None, nsample=16, ncol=4, shuffle=True, resize=224, fontsize=20)
r"""Display images in dataset. Parameters ---------- indices : iterable of int, optional The image indices to be displayed, if `None`, will generate `nsample` indices. If `shuffle` == `True`(default), the indices are random numbers. nsample : int, optional The number of samples to be displayed. ncol : int, optional The column size of ploted image matrix. shuffle : bool, optional If `shuffle` is False, will always sample from the begining. resize : int, optional The image will be resized to (resize, resize) for better visual experience. fontsize : int, optional The fontsize for the title
r"""Display images in dataset.
[ "r", "Display", "images", "in", "dataset", "." ]
def show_images(self, indices=None, nsample=16, ncol=4, shuffle=True, resize=224, fontsize=20): r"""Display images in dataset. Parameters ---------- indices : iterable of int, optional The image indices to be displayed, if `None`, will generate `nsample` indices. If `shuffle` == `True`(default), the indices are random numbers. nsample : int, optional The number of samples to be displayed. ncol : int, optional The column size of ploted image matrix. shuffle : bool, optional If `shuffle` is False, will always sample from the begining. resize : int, optional The image will be resized to (resize, resize) for better visual experience. fontsize : int, optional The fontsize for the title """ df = self.reset_index(drop=True) if indices is None: if not shuffle: indices = range(nsample) else: indices = list(range(len(df))) np.random.shuffle(indices) indices = indices[:min(nsample, len(indices))] images = [cv2.cvtColor(cv2.resize(cv2.imread(df.at[idx, df.IMG_COL]), (resize, resize), \ interpolation=cv2.INTER_AREA), cv2.COLOR_BGR2RGB) for idx in indices if idx < len(df)] titles = None if df.LABEL_COL in df.columns: if df.classes: titles = [df.classes[int(df.at[idx, df.LABEL_COL])] + ': ' + str(df.at[idx, df.LABEL_COL]) \ for idx in indices if idx < len(df)] else: titles = [str(df.at[idx, df.LABEL_COL]) for idx in indices if idx < len(df)] _show_images(images, cols=ncol, titles=titles, fontsize=fontsize)
[ "def", "show_images", "(", "self", ",", "indices", "=", "None", ",", "nsample", "=", "16", ",", "ncol", "=", "4", ",", "shuffle", "=", "True", ",", "resize", "=", "224", ",", "fontsize", "=", "20", ")", ":", "df", "=", "self", ".", "reset_index", "(", "drop", "=", "True", ")", "if", "indices", "is", "None", ":", "if", "not", "shuffle", ":", "indices", "=", "range", "(", "nsample", ")", "else", ":", "indices", "=", "list", "(", "range", "(", "len", "(", "df", ")", ")", ")", "np", ".", "random", ".", "shuffle", "(", "indices", ")", "indices", "=", "indices", "[", ":", "min", "(", "nsample", ",", "len", "(", "indices", ")", ")", "]", "images", "=", "[", "cv2", ".", "cvtColor", "(", "cv2", ".", "resize", "(", "cv2", ".", "imread", "(", "df", ".", "at", "[", "idx", ",", "df", ".", "IMG_COL", "]", ")", ",", "(", "resize", ",", "resize", ")", ",", "interpolation", "=", "cv2", ".", "INTER_AREA", ")", ",", "cv2", ".", "COLOR_BGR2RGB", ")", "for", "idx", "in", "indices", "if", "idx", "<", "len", "(", "df", ")", "]", "titles", "=", "None", "if", "df", ".", "LABEL_COL", "in", "df", ".", "columns", ":", "if", "df", ".", "classes", ":", "titles", "=", "[", "df", ".", "classes", "[", "int", "(", "df", ".", "at", "[", "idx", ",", "df", ".", "LABEL_COL", "]", ")", "]", "+", "': '", "+", "str", "(", "df", ".", "at", "[", "idx", ",", "df", ".", "LABEL_COL", "]", ")", "for", "idx", "in", "indices", "if", "idx", "<", "len", "(", "df", ")", "]", "else", ":", "titles", "=", "[", "str", "(", "df", ".", "at", "[", "idx", ",", "df", ".", "LABEL_COL", "]", ")", "for", "idx", "in", "indices", "if", "idx", "<", "len", "(", "df", ")", "]", "_show_images", "(", "images", ",", "cols", "=", "ncol", ",", "titles", "=", "titles", ",", "fontsize", "=", "fontsize", ")" ]
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/auto/data/dataset.py#L144-L180
playframework/play1
0ecac3bc2421ae2dbec27a368bf671eda1c9cba5
python/Lib/email/charset.py
python
add_alias
(alias, canonical)
Add a character set alias. alias is the alias name, e.g. latin-1 canonical is the character set's canonical name, e.g. iso-8859-1
Add a character set alias.
[ "Add", "a", "character", "set", "alias", "." ]
def add_alias(alias, canonical): """Add a character set alias. alias is the alias name, e.g. latin-1 canonical is the character set's canonical name, e.g. iso-8859-1 """ ALIASES[alias] = canonical
[ "def", "add_alias", "(", "alias", ",", "canonical", ")", ":", "ALIASES", "[", "alias", "]", "=", "canonical" ]
https://github.com/playframework/play1/blob/0ecac3bc2421ae2dbec27a368bf671eda1c9cba5/python/Lib/email/charset.py#L136-L142
axcore/tartube
36dd493642923fe8b9190a41db596c30c043ae90
tartube/config.py
python
SystemPrefWin.on_data_dir_move_down_button_clicked
(self, button, treeview, \ liststore, button2)
Called from callback in self.setup_files_database_tab(). Moves the selected data directory down one position in the list of alternative data directories. Args: button (Gtk.Button): The widget that was clicked (the down button) treeview (Gtk.TreeView): The widget in which a line was selected liststore (Gtk.ListStore): The treeview's liststore button2 (Gtk.Button): The up button
Called from callback in self.setup_files_database_tab().
[ "Called", "from", "callback", "in", "self", ".", "setup_files_database_tab", "()", "." ]
def on_data_dir_move_down_button_clicked(self, button, treeview, \ liststore, button2): """Called from callback in self.setup_files_database_tab(). Moves the selected data directory down one position in the list of alternative data directories. Args: button (Gtk.Button): The widget that was clicked (the down button) treeview (Gtk.TreeView): The widget in which a line was selected liststore (Gtk.ListStore): The treeview's liststore button2 (Gtk.Button): The up button """ selection = treeview.get_selection() (model, path_list) = selection.get_selected_rows() if not path_list: # Nothing selected return # (Keeping track of the first/last selected items helps us to # (de)sensitise buttons, in a moment) first_item = None last_item = None path_list.reverse() for path in path_list: this_iter = model.get_iter(path) last_item = model[this_iter][0] if first_item is None: first_item = model[this_iter][0] if model.iter_next(this_iter): liststore.move_after( this_iter, model.iter_next(this_iter), ) else: # If the first item won't move up, then successive items will # be moved above this one (which is not what we want) break # Update the IV dir_list = [] for row in liststore: dir_list.append(row[0]) self.app_obj.set_data_dir_alt_list(dir_list) # (De)sensitise the button(s), if required if dir_list.index(first_item) == 0: button2.set_sensitive(False) else: button2.set_sensitive(True) if dir_list.index(last_item) == (len(dir_list) - 1): button.set_sensitive(False) else: button.set_sensitive(True)
[ "def", "on_data_dir_move_down_button_clicked", "(", "self", ",", "button", ",", "treeview", ",", "liststore", ",", "button2", ")", ":", "selection", "=", "treeview", ".", "get_selection", "(", ")", "(", "model", ",", "path_list", ")", "=", "selection", ".", "get_selected_rows", "(", ")", "if", "not", "path_list", ":", "# Nothing selected", "return", "# (Keeping track of the first/last selected items helps us to", "# (de)sensitise buttons, in a moment)", "first_item", "=", "None", "last_item", "=", "None", "path_list", ".", "reverse", "(", ")", "for", "path", "in", "path_list", ":", "this_iter", "=", "model", ".", "get_iter", "(", "path", ")", "last_item", "=", "model", "[", "this_iter", "]", "[", "0", "]", "if", "first_item", "is", "None", ":", "first_item", "=", "model", "[", "this_iter", "]", "[", "0", "]", "if", "model", ".", "iter_next", "(", "this_iter", ")", ":", "liststore", ".", "move_after", "(", "this_iter", ",", "model", ".", "iter_next", "(", "this_iter", ")", ",", ")", "else", ":", "# If the first item won't move up, then successive items will", "# be moved above this one (which is not what we want)", "break", "# Update the IV", "dir_list", "=", "[", "]", "for", "row", "in", "liststore", ":", "dir_list", ".", "append", "(", "row", "[", "0", "]", ")", "self", ".", "app_obj", ".", "set_data_dir_alt_list", "(", "dir_list", ")", "# (De)sensitise the button(s), if required", "if", "dir_list", ".", "index", "(", "first_item", ")", "==", "0", ":", "button2", ".", "set_sensitive", "(", "False", ")", "else", ":", "button2", ".", "set_sensitive", "(", "True", ")", "if", "dir_list", ".", "index", "(", "last_item", ")", "==", "(", "len", "(", "dir_list", ")", "-", "1", ")", ":", "button", ".", "set_sensitive", "(", "False", ")", "else", ":", "button", ".", "set_sensitive", "(", "True", ")" ]
https://github.com/axcore/tartube/blob/36dd493642923fe8b9190a41db596c30c043ae90/tartube/config.py#L24662-L24732
dmlc/gluon-cv
709bc139919c02f7454cb411311048be188cde64
gluoncv/model_zoo/icnet.py
python
get_icnet_resnet50_mhpv1
(**kwargs)
return get_icnet(dataset='mhpv1', backbone='resnet50', **kwargs)
r"""Image Cascade Network Parameters ---------- dataset : str, default citys The dataset that model pretrained on. (default: cityscapes) backbone : string Pre-trained dilated backbone network type (default:'resnet50').
r"""Image Cascade Network
[ "r", "Image", "Cascade", "Network" ]
def get_icnet_resnet50_mhpv1(**kwargs): r"""Image Cascade Network Parameters ---------- dataset : str, default citys The dataset that model pretrained on. (default: cityscapes) backbone : string Pre-trained dilated backbone network type (default:'resnet50'). """ return get_icnet(dataset='mhpv1', backbone='resnet50', **kwargs)
[ "def", "get_icnet_resnet50_mhpv1", "(", "*", "*", "kwargs", ")", ":", "return", "get_icnet", "(", "dataset", "=", "'mhpv1'", ",", "backbone", "=", "'resnet50'", ",", "*", "*", "kwargs", ")" ]
https://github.com/dmlc/gluon-cv/blob/709bc139919c02f7454cb411311048be188cde64/gluoncv/model_zoo/icnet.py#L377-L388
HymanLiuTS/flaskTs
286648286976e85d9b9a5873632331efcafe0b21
flasky/lib/python2.7/site-packages/bleach/__init__.py
python
_render
(tree)
return force_unicode(_serialize(tree))
Try rendering as HTML, then XML, then give up.
Try rendering as HTML, then XML, then give up.
[ "Try", "rendering", "as", "HTML", "then", "XML", "then", "give", "up", "." ]
def _render(tree): """Try rendering as HTML, then XML, then give up.""" return force_unicode(_serialize(tree))
[ "def", "_render", "(", "tree", ")", ":", "return", "force_unicode", "(", "_serialize", "(", "tree", ")", ")" ]
https://github.com/HymanLiuTS/flaskTs/blob/286648286976e85d9b9a5873632331efcafe0b21/flasky/lib/python2.7/site-packages/bleach/__init__.py#L428-L430
deepmind/acme
9880719d9def1d87a194377b394a414a17d11064
acme/utils/loggers/base.py
python
Logger.write
(self, data: LoggingData)
Writes `data` to destination (file, terminal, database, etc).
Writes `data` to destination (file, terminal, database, etc).
[ "Writes", "data", "to", "destination", "(", "file", "terminal", "database", "etc", ")", "." ]
def write(self, data: LoggingData) -> None: """Writes `data` to destination (file, terminal, database, etc)."""
[ "def", "write", "(", "self", ",", "data", ":", "LoggingData", ")", "->", "None", ":" ]
https://github.com/deepmind/acme/blob/9880719d9def1d87a194377b394a414a17d11064/acme/utils/loggers/base.py#L30-L31
biolab/orange2
db40a9449cb45b507d63dcd5739b223f9cffb8e6
Orange/OrangeCanvas/scheme/signalmanager.py
python
SignalManager.stop
(self)
Stop the update loop. .. note:: If the `SignalManager` is currently in `process_queues` it will still update all current pending signals, but will not re-enter until `start()` is called again
Stop the update loop.
[ "Stop", "the", "update", "loop", "." ]
def stop(self): """ Stop the update loop. .. note:: If the `SignalManager` is currently in `process_queues` it will still update all current pending signals, but will not re-enter until `start()` is called again """ if self.__state != SignalManager.Stoped: self.__state = SignalManager.Stoped self.stateChanged.emit(SignalManager.Stoped)
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "__state", "!=", "SignalManager", ".", "Stoped", ":", "self", ".", "__state", "=", "SignalManager", ".", "Stoped", "self", ".", "stateChanged", ".", "emit", "(", "SignalManager", ".", "Stoped", ")" ]
https://github.com/biolab/orange2/blob/db40a9449cb45b507d63dcd5739b223f9cffb8e6/Orange/OrangeCanvas/scheme/signalmanager.py#L113-L124
kubernetes-client/python
47b9da9de2d02b2b7a34fbe05afb44afd130d73a
kubernetes/client/models/v1_custom_resource_definition_status.py
python
V1CustomResourceDefinitionStatus.accepted_names
(self)
return self._accepted_names
Gets the accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501 :return: The accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501 :rtype: V1CustomResourceDefinitionNames
Gets the accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501
[ "Gets", "the", "accepted_names", "of", "this", "V1CustomResourceDefinitionStatus", ".", "#", "noqa", ":", "E501" ]
def accepted_names(self): """Gets the accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501 :return: The accepted_names of this V1CustomResourceDefinitionStatus. # noqa: E501 :rtype: V1CustomResourceDefinitionNames """ return self._accepted_names
[ "def", "accepted_names", "(", "self", ")", ":", "return", "self", ".", "_accepted_names" ]
https://github.com/kubernetes-client/python/blob/47b9da9de2d02b2b7a34fbe05afb44afd130d73a/kubernetes/client/models/v1_custom_resource_definition_status.py#L66-L73