repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
PmagPy/PmagPy
pmagpy/ipmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L6411-L6424
def parse_fits(self, fit_name): '''USE PARSE_ALL_FITS unless otherwise necessary Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED ''' fits = self.fits.loc[self.fits.specimen_comp_name == fit_name].loc[self.fits.specimen_tilt_correction == 0] fits.reset_index(inplace=True) means = self.means.loc[self.means.site_comp_name == fit_name].loc[self.means.site_tilt_correction == 0] means.reset_index(inplace=True) mean_name = str(fit_name) + "_mean" setattr(self, fit_name, fits) setattr(self, mean_name, means)
[ "def", "parse_fits", "(", "self", ",", "fit_name", ")", ":", "fits", "=", "self", ".", "fits", ".", "loc", "[", "self", ".", "fits", ".", "specimen_comp_name", "==", "fit_name", "]", ".", "loc", "[", "self", ".", "fits", ".", "specimen_tilt_correction", "==", "0", "]", "fits", ".", "reset_index", "(", "inplace", "=", "True", ")", "means", "=", "self", ".", "means", ".", "loc", "[", "self", ".", "means", ".", "site_comp_name", "==", "fit_name", "]", ".", "loc", "[", "self", ".", "means", ".", "site_tilt_correction", "==", "0", "]", "means", ".", "reset_index", "(", "inplace", "=", "True", ")", "mean_name", "=", "str", "(", "fit_name", ")", "+", "\"_mean\"", "setattr", "(", "self", ",", "fit_name", ",", "fits", ")", "setattr", "(", "self", ",", "mean_name", ",", "means", ")" ]
USE PARSE_ALL_FITS unless otherwise necessary Isolate fits by the name of the fit; we also set 'specimen_tilt_correction' to zero in order to only include data in geographic coordinates - THIS NEEDS TO BE GENERALIZED
[ "USE", "PARSE_ALL_FITS", "unless", "otherwise", "necessary", "Isolate", "fits", "by", "the", "name", "of", "the", "fit", ";", "we", "also", "set", "specimen_tilt_correction", "to", "zero", "in", "order", "to", "only", "include", "data", "in", "geographic", "coordinates", "-", "THIS", "NEEDS", "TO", "BE", "GENERALIZED" ]
python
train
hustcc/wrapcache
wrapcache/__init__.py
https://github.com/hustcc/wrapcache/blob/3c6f52bb81a278e1dd60c27abe87d169cb4395aa/wrapcache/__init__.py#L41-L48
def get(key, adapter = MemoryAdapter): ''' get the cache value ''' try: return pickle.loads(adapter().get(key)) except CacheExpiredException: return None
[ "def", "get", "(", "key", ",", "adapter", "=", "MemoryAdapter", ")", ":", "try", ":", "return", "pickle", ".", "loads", "(", "adapter", "(", ")", ".", "get", "(", "key", ")", ")", "except", "CacheExpiredException", ":", "return", "None" ]
get the cache value
[ "get", "the", "cache", "value" ]
python
train
satellogic/telluric
telluric/georaster.py
https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1723-L1731
def mask_by_value(self, nodata): """ Return raster with a mask calculated based on provided value. Only pixels with value=nodata will be masked. :param nodata: value of the pixels that should be masked :return: GeoRaster2 """ return self.copy_with(image=np.ma.masked_array(self.image.data, mask=self.image.data == nodata))
[ "def", "mask_by_value", "(", "self", ",", "nodata", ")", ":", "return", "self", ".", "copy_with", "(", "image", "=", "np", ".", "ma", ".", "masked_array", "(", "self", ".", "image", ".", "data", ",", "mask", "=", "self", ".", "image", ".", "data", "==", "nodata", ")", ")" ]
Return raster with a mask calculated based on provided value. Only pixels with value=nodata will be masked. :param nodata: value of the pixels that should be masked :return: GeoRaster2
[ "Return", "raster", "with", "a", "mask", "calculated", "based", "on", "provided", "value", ".", "Only", "pixels", "with", "value", "=", "nodata", "will", "be", "masked", "." ]
python
train
cherrypy/cheroot
cheroot/ssl/pyopenssl.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/ssl/pyopenssl.py#L139-L145
def send(self, *args, **kwargs): """Send some part of message to the socket.""" return self._safe_call( False, super(SSLFileobjectMixin, self).send, *args, **kwargs )
[ "def", "send", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_safe_call", "(", "False", ",", "super", "(", "SSLFileobjectMixin", ",", "self", ")", ".", "send", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Send some part of message to the socket.
[ "Send", "some", "part", "of", "message", "to", "the", "socket", "." ]
python
train
shoebot/shoebot
shoebot/data/bezier.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/shoebot/data/bezier.py#L372-L408
def point(self, t, segments=None): """ Returns the PathElement at time t (0.0-1.0) on the path. Returns coordinates for point at t on the path. Gets the length of the path, based on the length of each curve and line in the path. Determines in what segment t falls. Gets the point on that segment. When you supply the list of segment lengths yourself, as returned from length(path, segmented=True), point() works about thirty times faster in a for-loop since it doesn't need to recalculate the length during each iteration. """ # Originally from nodebox-gl if len(self._elements) == 0: raise PathError("The given path is empty") if self._segments is None: self._segments = self._get_length(segmented=True, precision=10) i, t, closeto = self._locate(t, segments=self._segments) x0, y0 = self[i].x, self[i].y p1 = self[i + 1] if p1.cmd == CLOSE: x, y = self._linepoint(t, x0, y0, closeto.x, closeto.y) return PathElement(LINETO, x, y) elif p1.cmd in (LINETO, MOVETO): x1, y1 = p1.x, p1.y x, y = self._linepoint(t, x0, y0, x1, y1) return PathElement(LINETO, x, y) elif p1.cmd == CURVETO: # Note: the handles need to be interpreted differenty than in a BezierPath. # In a BezierPath, ctrl1 is how the curve started, and ctrl2 how it arrives in this point. # Here, ctrl1 is how the curve arrives, and ctrl2 how it continues to the next point. x3, y3, x1, y1, x2, y2 = p1.x, p1.y, p1.ctrl1.x, p1.ctrl1.y, p1.ctrl2.x, p1.ctrl2.y x, y, c1x, c1y, c2x, c2y = self._curvepoint(t, x0, y0, x1, y1, x2, y2, x3, y3) return PathElement(CURVETO, c1x, c1y, c2x, c2y, x, y) else: raise PathError("Unknown cmd '%s' for p1 %s" % (p1.cmd, p1))
[ "def", "point", "(", "self", ",", "t", ",", "segments", "=", "None", ")", ":", "# Originally from nodebox-gl", "if", "len", "(", "self", ".", "_elements", ")", "==", "0", ":", "raise", "PathError", "(", "\"The given path is empty\"", ")", "if", "self", ".", "_segments", "is", "None", ":", "self", ".", "_segments", "=", "self", ".", "_get_length", "(", "segmented", "=", "True", ",", "precision", "=", "10", ")", "i", ",", "t", ",", "closeto", "=", "self", ".", "_locate", "(", "t", ",", "segments", "=", "self", ".", "_segments", ")", "x0", ",", "y0", "=", "self", "[", "i", "]", ".", "x", ",", "self", "[", "i", "]", ".", "y", "p1", "=", "self", "[", "i", "+", "1", "]", "if", "p1", ".", "cmd", "==", "CLOSE", ":", "x", ",", "y", "=", "self", ".", "_linepoint", "(", "t", ",", "x0", ",", "y0", ",", "closeto", ".", "x", ",", "closeto", ".", "y", ")", "return", "PathElement", "(", "LINETO", ",", "x", ",", "y", ")", "elif", "p1", ".", "cmd", "in", "(", "LINETO", ",", "MOVETO", ")", ":", "x1", ",", "y1", "=", "p1", ".", "x", ",", "p1", ".", "y", "x", ",", "y", "=", "self", ".", "_linepoint", "(", "t", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ")", "return", "PathElement", "(", "LINETO", ",", "x", ",", "y", ")", "elif", "p1", ".", "cmd", "==", "CURVETO", ":", "# Note: the handles need to be interpreted differenty than in a BezierPath.", "# In a BezierPath, ctrl1 is how the curve started, and ctrl2 how it arrives in this point.", "# Here, ctrl1 is how the curve arrives, and ctrl2 how it continues to the next point.", "x3", ",", "y3", ",", "x1", ",", "y1", ",", "x2", ",", "y2", "=", "p1", ".", "x", ",", "p1", ".", "y", ",", "p1", ".", "ctrl1", ".", "x", ",", "p1", ".", "ctrl1", ".", "y", ",", "p1", ".", "ctrl2", ".", "x", ",", "p1", ".", "ctrl2", ".", "y", "x", ",", "y", ",", "c1x", ",", "c1y", ",", "c2x", ",", "c2y", "=", "self", ".", "_curvepoint", "(", "t", ",", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "x3", ",", "y3", ")", "return", "PathElement", "(", "CURVETO", ",", "c1x", ",", "c1y", ",", "c2x", ",", "c2y", ",", "x", ",", "y", ")", "else", ":", "raise", "PathError", "(", "\"Unknown cmd '%s' for p1 %s\"", "%", "(", "p1", ".", "cmd", ",", "p1", ")", ")" ]
Returns the PathElement at time t (0.0-1.0) on the path. Returns coordinates for point at t on the path. Gets the length of the path, based on the length of each curve and line in the path. Determines in what segment t falls. Gets the point on that segment. When you supply the list of segment lengths yourself, as returned from length(path, segmented=True), point() works about thirty times faster in a for-loop since it doesn't need to recalculate the length during each iteration.
[ "Returns", "the", "PathElement", "at", "time", "t", "(", "0", ".", "0", "-", "1", ".", "0", ")", "on", "the", "path", "." ]
python
valid
luckydonald/pytgbot
pytgbot/bot.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/bot.py#L3286-L3392
def answer_inline_query(self, inline_query_id, results, cache_time=None, is_personal=None, next_offset=None, switch_pm_text=None, switch_pm_parameter=None): """ Use this method to send answers to an inline query. On success, True is returned. No more than 50 results per query are allowed. https://core.telegram.org/bots/api#answerinlinequery Parameters: :param inline_query_id: Unique identifier for the answered query :type inline_query_id: str|unicode :param results: A JSON-serialized array of results for the inline query :type results: list of pytgbot.api_types.sendable.inline.InlineQueryResult Optional keyword parameters: :param cache_time: The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. :type cache_time: int :param is_personal: Pass True, if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query :type is_personal: bool :param next_offset: Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don‘t support pagination. Offset length can’t exceed 64 bytes. :type next_offset: str|unicode :param switch_pm_text: If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter :type switch_pm_text: str|unicode :param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only `A-Z`, `a-z`, `0-9`, `_` and `-` are allowed. Example: An inline bot that sends YouTube videos can ask the user to connect the bot to their YouTube account to adapt search results accordingly. To do this, it displays a "Connect your YouTube account" button above the results, or even before showing any. The user presses the button, switches to a private chat with the bot and, in doing so, passes a start parameter that instructs the bot to return an oauth link. Once done, the bot can offer a switch_inline button so that the user can easily return to the chat where they wanted to use the bot's inline capabilities. :type switch_pm_parameter: str|unicode Returns: :return: On success, True is returned :rtype: bool """ assert_type_or_raise(inline_query_id, int, unicode_type, parameter_name="inline_query_id") if isinstance(inline_query_id, int): inline_query_id = u(inline_query_id) # end if assert isinstance(inline_query_id, unicode_type) assert_type_or_raise(results, list, tuple, InlineQueryResult, parameter_name="results") if isinstance(results, InlineQueryResult): results = [results] assert(isinstance(results, (list, tuple))) # list of InlineQueryResult result_objects = [] for result in results: if not isinstance(result, InlineQueryResult): # checks all elements of results raise ValueError("Parameter results is not list of InlineQueryResult") # end if result_objects.append(result.to_array()) # end for results assert_type_or_raise(cache_time, None, int, parameter_name="cache_time") assert_type_or_raise(is_personal, None, bool, parameter_name="is_personal") assert_type_or_raise(next_offset, None, unicode_type, str, int, parameter_name="next_offset") if next_offset is not None: assert(isinstance(next_offset, (str, unicode_type, int))) next_offset = u(next_offset) # end if assert_type_or_raise(switch_pm_text, None, unicode_type, parameter_name="switch_pm_text") assert_type_or_raise(switch_pm_parameter, None, unicode_type, parameter_name="switch_pm_parameter") result = self.do( "answerInlineQuery", inline_query_id=inline_query_id, results=json.dumps(result_objects), cache_time=cache_time, is_personal=is_personal, next_offset=next_offset, switch_pm_text=switch_pm_text, switch_pm_parameter=switch_pm_parameter ) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) try: return from_array_list(bool, result, list_level=0, is_builtin=True) except TgApiParseException: logger.debug("Failed parsing as primitive bool", exc_info=True) # end try # no valid parsing so far raise TgApiParseException("Could not parse result.") # See debug log for details! # end if return_python_objects return result
[ "def", "answer_inline_query", "(", "self", ",", "inline_query_id", ",", "results", ",", "cache_time", "=", "None", ",", "is_personal", "=", "None", ",", "next_offset", "=", "None", ",", "switch_pm_text", "=", "None", ",", "switch_pm_parameter", "=", "None", ")", ":", "assert_type_or_raise", "(", "inline_query_id", ",", "int", ",", "unicode_type", ",", "parameter_name", "=", "\"inline_query_id\"", ")", "if", "isinstance", "(", "inline_query_id", ",", "int", ")", ":", "inline_query_id", "=", "u", "(", "inline_query_id", ")", "# end if", "assert", "isinstance", "(", "inline_query_id", ",", "unicode_type", ")", "assert_type_or_raise", "(", "results", ",", "list", ",", "tuple", ",", "InlineQueryResult", ",", "parameter_name", "=", "\"results\"", ")", "if", "isinstance", "(", "results", ",", "InlineQueryResult", ")", ":", "results", "=", "[", "results", "]", "assert", "(", "isinstance", "(", "results", ",", "(", "list", ",", "tuple", ")", ")", ")", "# list of InlineQueryResult", "result_objects", "=", "[", "]", "for", "result", "in", "results", ":", "if", "not", "isinstance", "(", "result", ",", "InlineQueryResult", ")", ":", "# checks all elements of results", "raise", "ValueError", "(", "\"Parameter results is not list of InlineQueryResult\"", ")", "# end if", "result_objects", ".", "append", "(", "result", ".", "to_array", "(", ")", ")", "# end for results", "assert_type_or_raise", "(", "cache_time", ",", "None", ",", "int", ",", "parameter_name", "=", "\"cache_time\"", ")", "assert_type_or_raise", "(", "is_personal", ",", "None", ",", "bool", ",", "parameter_name", "=", "\"is_personal\"", ")", "assert_type_or_raise", "(", "next_offset", ",", "None", ",", "unicode_type", ",", "str", ",", "int", ",", "parameter_name", "=", "\"next_offset\"", ")", "if", "next_offset", "is", "not", "None", ":", "assert", "(", "isinstance", "(", "next_offset", ",", "(", "str", ",", "unicode_type", ",", "int", ")", ")", ")", "next_offset", "=", "u", "(", "next_offset", ")", "# end if", "assert_type_or_raise", "(", "switch_pm_text", ",", "None", ",", "unicode_type", ",", "parameter_name", "=", "\"switch_pm_text\"", ")", "assert_type_or_raise", "(", "switch_pm_parameter", ",", "None", ",", "unicode_type", ",", "parameter_name", "=", "\"switch_pm_parameter\"", ")", "result", "=", "self", ".", "do", "(", "\"answerInlineQuery\"", ",", "inline_query_id", "=", "inline_query_id", ",", "results", "=", "json", ".", "dumps", "(", "result_objects", ")", ",", "cache_time", "=", "cache_time", ",", "is_personal", "=", "is_personal", ",", "next_offset", "=", "next_offset", ",", "switch_pm_text", "=", "switch_pm_text", ",", "switch_pm_parameter", "=", "switch_pm_parameter", ")", "if", "self", ".", "return_python_objects", ":", "logger", ".", "debug", "(", "\"Trying to parse {data}\"", ".", "format", "(", "data", "=", "repr", "(", "result", ")", ")", ")", "try", ":", "return", "from_array_list", "(", "bool", ",", "result", ",", "list_level", "=", "0", ",", "is_builtin", "=", "True", ")", "except", "TgApiParseException", ":", "logger", ".", "debug", "(", "\"Failed parsing as primitive bool\"", ",", "exc_info", "=", "True", ")", "# end try", "# no valid parsing so far", "raise", "TgApiParseException", "(", "\"Could not parse result.\"", ")", "# See debug log for details!", "# end if return_python_objects", "return", "result" ]
Use this method to send answers to an inline query. On success, True is returned. No more than 50 results per query are allowed. https://core.telegram.org/bots/api#answerinlinequery Parameters: :param inline_query_id: Unique identifier for the answered query :type inline_query_id: str|unicode :param results: A JSON-serialized array of results for the inline query :type results: list of pytgbot.api_types.sendable.inline.InlineQueryResult Optional keyword parameters: :param cache_time: The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. :type cache_time: int :param is_personal: Pass True, if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query :type is_personal: bool :param next_offset: Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don‘t support pagination. Offset length can’t exceed 64 bytes. :type next_offset: str|unicode :param switch_pm_text: If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter :type switch_pm_text: str|unicode :param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only `A-Z`, `a-z`, `0-9`, `_` and `-` are allowed. Example: An inline bot that sends YouTube videos can ask the user to connect the bot to their YouTube account to adapt search results accordingly. To do this, it displays a "Connect your YouTube account" button above the results, or even before showing any. The user presses the button, switches to a private chat with the bot and, in doing so, passes a start parameter that instructs the bot to return an oauth link. Once done, the bot can offer a switch_inline button so that the user can easily return to the chat where they wanted to use the bot's inline capabilities. :type switch_pm_parameter: str|unicode Returns: :return: On success, True is returned :rtype: bool
[ "Use", "this", "method", "to", "send", "answers", "to", "an", "inline", "query", ".", "On", "success", "True", "is", "returned", ".", "No", "more", "than", "50", "results", "per", "query", "are", "allowed", "." ]
python
train
amsehili/auditok
auditok/io.py
https://github.com/amsehili/auditok/blob/df6eb1d80f8cd9034be47b24869ce59b74f5f4db/auditok/io.py#L261-L274
def append_data(self, data_buffer): """ Append data to this audio stream :Parameters: `data_buffer` : str, basestring, Bytes a buffer with a length multiple of (sample_width * channels) """ if len(data_buffer) % (self.sample_width * self.channels) != 0: raise ValueError("length of data_buffer must be a multiple of (sample_width * channels)") self._buffer += data_buffer self._left += len(data_buffer)
[ "def", "append_data", "(", "self", ",", "data_buffer", ")", ":", "if", "len", "(", "data_buffer", ")", "%", "(", "self", ".", "sample_width", "*", "self", ".", "channels", ")", "!=", "0", ":", "raise", "ValueError", "(", "\"length of data_buffer must be a multiple of (sample_width * channels)\"", ")", "self", ".", "_buffer", "+=", "data_buffer", "self", ".", "_left", "+=", "len", "(", "data_buffer", ")" ]
Append data to this audio stream :Parameters: `data_buffer` : str, basestring, Bytes a buffer with a length multiple of (sample_width * channels)
[ "Append", "data", "to", "this", "audio", "stream" ]
python
train
althonos/pronto
pronto/parser/owl.py
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/owl.py#L164-L171
def _extract_obo_relation(cls, rawterm): """Extract the relationships defined in the rawterm. """ relations = {} if 'subClassOf' in rawterm: relations[Relationship('is_a')] = l = [] l.extend(map(cls._get_id_from_url, rawterm.pop('subClassOf'))) return relations
[ "def", "_extract_obo_relation", "(", "cls", ",", "rawterm", ")", ":", "relations", "=", "{", "}", "if", "'subClassOf'", "in", "rawterm", ":", "relations", "[", "Relationship", "(", "'is_a'", ")", "]", "=", "l", "=", "[", "]", "l", ".", "extend", "(", "map", "(", "cls", ".", "_get_id_from_url", ",", "rawterm", ".", "pop", "(", "'subClassOf'", ")", ")", ")", "return", "relations" ]
Extract the relationships defined in the rawterm.
[ "Extract", "the", "relationships", "defined", "in", "the", "rawterm", "." ]
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py#L884-L887
def update_resources(self, dstpath, names=None, languages=None): """ Update or add manifest resource in dll/exe file dstpath """ UpdateManifestResourcesFromXML(dstpath, self.toprettyxml(), names, languages)
[ "def", "update_resources", "(", "self", ",", "dstpath", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "UpdateManifestResourcesFromXML", "(", "dstpath", ",", "self", ".", "toprettyxml", "(", ")", ",", "names", ",", "languages", ")" ]
Update or add manifest resource in dll/exe file dstpath
[ "Update", "or", "add", "manifest", "resource", "in", "dll", "/", "exe", "file", "dstpath" ]
python
train
cltk/cltk
cltk/corpus/greek/tlg/parse_tlg_indices.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/greek/tlg/parse_tlg_indices.py#L43-L50
def select_authors_by_epithet(query): """Pass exact name (case insensitive) of epithet name, return ordered set of author ids. """ for epithet, ids in AUTHOR_EPITHET.items(): if epithet.casefold() == query.casefold(): return set(ids)
[ "def", "select_authors_by_epithet", "(", "query", ")", ":", "for", "epithet", ",", "ids", "in", "AUTHOR_EPITHET", ".", "items", "(", ")", ":", "if", "epithet", ".", "casefold", "(", ")", "==", "query", ".", "casefold", "(", ")", ":", "return", "set", "(", "ids", ")" ]
Pass exact name (case insensitive) of epithet name, return ordered set of author ids.
[ "Pass", "exact", "name", "(", "case", "insensitive", ")", "of", "epithet", "name", "return", "ordered", "set", "of", "author", "ids", "." ]
python
train
danielhrisca/asammdf
asammdf/blocks/utils.py
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L503-L555
def fmt_to_datatype_v4(fmt, shape, array=False): """convert numpy dtype format string to mdf version 4 channel data type and size Parameters ---------- fmt : numpy.dtype numpy data type shape : tuple numpy array shape array : bool disambiguate between bytearray and channel array Returns ------- data_type, size : int, int integer data type as defined by ASAM MDF and bit size """ size = fmt.itemsize * 8 if not array and shape[1:] and fmt.itemsize == 1 and fmt.kind == "u": data_type = v4c.DATA_TYPE_BYTEARRAY for dim in shape[1:]: size *= dim else: if fmt.kind == "u": if fmt.byteorder in "=<|": data_type = v4c.DATA_TYPE_UNSIGNED_INTEL else: data_type = v4c.DATA_TYPE_UNSIGNED_MOTOROLA elif fmt.kind == "i": if fmt.byteorder in "=<|": data_type = v4c.DATA_TYPE_SIGNED_INTEL else: data_type = v4c.DATA_TYPE_SIGNED_MOTOROLA elif fmt.kind == "f": if fmt.byteorder in "=<": data_type = v4c.DATA_TYPE_REAL_INTEL else: data_type = v4c.DATA_TYPE_REAL_MOTOROLA elif fmt.kind in "SV": data_type = v4c.DATA_TYPE_STRING_LATIN_1 elif fmt.kind == "b": data_type = v4c.DATA_TYPE_UNSIGNED_INTEL size = 1 else: message = f"Unknown type: dtype={fmt}, shape={shape}" logger.exception(message) raise MdfException(message) return data_type, size
[ "def", "fmt_to_datatype_v4", "(", "fmt", ",", "shape", ",", "array", "=", "False", ")", ":", "size", "=", "fmt", ".", "itemsize", "*", "8", "if", "not", "array", "and", "shape", "[", "1", ":", "]", "and", "fmt", ".", "itemsize", "==", "1", "and", "fmt", ".", "kind", "==", "\"u\"", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_BYTEARRAY", "for", "dim", "in", "shape", "[", "1", ":", "]", ":", "size", "*=", "dim", "else", ":", "if", "fmt", ".", "kind", "==", "\"u\"", ":", "if", "fmt", ".", "byteorder", "in", "\"=<|\"", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_UNSIGNED_INTEL", "else", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_UNSIGNED_MOTOROLA", "elif", "fmt", ".", "kind", "==", "\"i\"", ":", "if", "fmt", ".", "byteorder", "in", "\"=<|\"", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_SIGNED_INTEL", "else", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_SIGNED_MOTOROLA", "elif", "fmt", ".", "kind", "==", "\"f\"", ":", "if", "fmt", ".", "byteorder", "in", "\"=<\"", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_REAL_INTEL", "else", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_REAL_MOTOROLA", "elif", "fmt", ".", "kind", "in", "\"SV\"", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_STRING_LATIN_1", "elif", "fmt", ".", "kind", "==", "\"b\"", ":", "data_type", "=", "v4c", ".", "DATA_TYPE_UNSIGNED_INTEL", "size", "=", "1", "else", ":", "message", "=", "f\"Unknown type: dtype={fmt}, shape={shape}\"", "logger", ".", "exception", "(", "message", ")", "raise", "MdfException", "(", "message", ")", "return", "data_type", ",", "size" ]
convert numpy dtype format string to mdf version 4 channel data type and size Parameters ---------- fmt : numpy.dtype numpy data type shape : tuple numpy array shape array : bool disambiguate between bytearray and channel array Returns ------- data_type, size : int, int integer data type as defined by ASAM MDF and bit size
[ "convert", "numpy", "dtype", "format", "string", "to", "mdf", "version", "4", "channel", "data", "type", "and", "size" ]
python
train
docker/docker-py
docker/models/containers.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/containers.py#L98-L112
def attach_socket(self, **kwargs): """ Like :py:meth:`attach`, but returns the underlying socket-like object for the HTTP request. Args: params (dict): Dictionary of request parameters (e.g. ``stdout``, ``stderr``, ``stream``). ws (bool): Use websockets instead of raw HTTP. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self.client.api.attach_socket(self.id, **kwargs)
[ "def", "attach_socket", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "client", ".", "api", ".", "attach_socket", "(", "self", ".", "id", ",", "*", "*", "kwargs", ")" ]
Like :py:meth:`attach`, but returns the underlying socket-like object for the HTTP request. Args: params (dict): Dictionary of request parameters (e.g. ``stdout``, ``stderr``, ``stream``). ws (bool): Use websockets instead of raw HTTP. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
[ "Like", ":", "py", ":", "meth", ":", "attach", "but", "returns", "the", "underlying", "socket", "-", "like", "object", "for", "the", "HTTP", "request", "." ]
python
train
tamasgal/km3pipe
km3pipe/db.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L894-L902
def base(self, du): """Return the base CLB for a given DU""" parameter = 'base' if parameter not in self._by: self._by[parameter] = {} for clb in self.upi.values(): if clb.floor == 0: self._by[parameter][clb.du] = clb return self._by[parameter][du]
[ "def", "base", "(", "self", ",", "du", ")", ":", "parameter", "=", "'base'", "if", "parameter", "not", "in", "self", ".", "_by", ":", "self", ".", "_by", "[", "parameter", "]", "=", "{", "}", "for", "clb", "in", "self", ".", "upi", ".", "values", "(", ")", ":", "if", "clb", ".", "floor", "==", "0", ":", "self", ".", "_by", "[", "parameter", "]", "[", "clb", ".", "du", "]", "=", "clb", "return", "self", ".", "_by", "[", "parameter", "]", "[", "du", "]" ]
Return the base CLB for a given DU
[ "Return", "the", "base", "CLB", "for", "a", "given", "DU" ]
python
train
zetaops/pyoko
pyoko/db/adapter/db_riak.py
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/db/adapter/db_riak.py#L182-L196
def _clear(self, wait): """ clear outs the all content of current bucket only for development purposes """ i = 0 t1 = time.time() for k in self.bucket.get_keys(): i += 1 self.bucket.get(k).delete() print("\nDELETION TOOK: %s" % round(time.time() - t1, 2)) if wait: while self._model_class.objects.count(): time.sleep(0.3) return i
[ "def", "_clear", "(", "self", ",", "wait", ")", ":", "i", "=", "0", "t1", "=", "time", ".", "time", "(", ")", "for", "k", "in", "self", ".", "bucket", ".", "get_keys", "(", ")", ":", "i", "+=", "1", "self", ".", "bucket", ".", "get", "(", "k", ")", ".", "delete", "(", ")", "print", "(", "\"\\nDELETION TOOK: %s\"", "%", "round", "(", "time", ".", "time", "(", ")", "-", "t1", ",", "2", ")", ")", "if", "wait", ":", "while", "self", ".", "_model_class", ".", "objects", ".", "count", "(", ")", ":", "time", ".", "sleep", "(", "0.3", ")", "return", "i" ]
clear outs the all content of current bucket only for development purposes
[ "clear", "outs", "the", "all", "content", "of", "current", "bucket", "only", "for", "development", "purposes" ]
python
train
JamesPHoughton/pysd
pysd/py_backend/utils.py
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/utils.py#L138-L291
def make_python_identifier(string, namespace=None, reserved_words=None, convert='drop', handle='force'): """ Takes an arbitrary string and creates a valid Python identifier. If the input string is in the namespace, return its value. If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to the same python identifier) or if the identifier is a reserved word in the reserved_words list, or is a python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string : <basestring> The text to be converted into a valid python identifier namespace : <dictionary> Map of existing translations into python safe identifiers. This is to ensure that two strings are not translated into the same python identifier reserved_words : <list of strings> List of words that are reserved (because they have other meanings in this particular program, such as also being the names of libraries, etc. convert : <string> Tells the function what to do with characters that are not valid in python identifiers - 'hex' implies that they will be converted to their hexidecimal representation. This is handy if you have variables that have a lot of reserved characters, or you don't want the name to be dependent on when things were added to the namespace - 'drop' implies that they will just be dropped altogether handle : <string> Tells the function how to deal with namespace conflicts - 'force' will create a representation which is not in conflict by appending _n to the resulting variable where n is the lowest number necessary to avoid a conflict - 'throw' will raise an exception Returns ------- identifier : <string> A vaild python identifier based on the input string namespace : <dictionary> An updated map of the translations of words to python identifiers, including the passed in 'string'. Examples -------- >>> make_python_identifier('Capital') ('capital', {'Capital': 'capital'}) >>> make_python_identifier('multiple words') ('multiple_words', {'multiple words': 'multiple_words'}) >>> make_python_identifier('multiple spaces') ('multiple_spaces', {'multiple spaces': 'multiple_spaces'}) When the name is a python keyword, add '_1' to differentiate it >>> make_python_identifier('for') ('for_1', {'for': 'for_1'}) Remove leading and trailing whitespace >>> make_python_identifier(' whitespace ') ('whitespace', {' whitespace ': 'whitespace'}) Remove most special characters outright: >>> make_python_identifier('H@t tr!ck') ('ht_trck', {'H@t tr!ck': 'ht_trck'}) Replace special characters with their hex representations >>> make_python_identifier('H@t tr!ck', convert='hex') ('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'}) remove leading digits >>> make_python_identifier('123abc') ('abc', {'123abc': 'abc'}) already in namespace >>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'}) ('variable', {'Variable$': 'variable'}) namespace conflicts >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}) ('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'}) >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable', >>> 'Variable%': 'variable_1'}) ('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'}) throw exception instead >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw') Traceback (most recent call last): ... NameError: variable already exists in namespace or is a reserved word References ---------- Identifiers must follow the convention outlined here: https://docs.python.org/2/reference/lexical_analysis.html#identifiers """ if namespace is None: namespace = dict() if reserved_words is None: reserved_words = list() if string in namespace: return namespace[string], namespace # create a working copy (and make it lowercase, while we're at it) s = string.lower() # remove leading and trailing whitespace s = s.strip() # Make spaces into underscores s = re.sub('[\\s\\t\\n]+', '_', s) if convert == 'hex': # Convert invalid characters to hex. Note: \p{l} designates all Unicode letter characters (any language), # \p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final) # and \p{n} designates all numbers. We allow any of these to be present in the regex. s = ''.join([c.encode("hex") if re.findall('[^\p{l}\p{m}\p{n}_]', c) else c for c in s]) elif convert == 'drop': # Remove invalid characters s = re.sub('[^\p{l}\p{m}\p{n}_]', '', s) # Remove leading characters until we find a letter or underscore. Only letters can be leading characters. s = re.sub('^[^\p{l}_]+', '', s) # Check that the string is not a python identifier while (s in keyword.kwlist or s in namespace.values() or s in reserved_words): if handle == 'throw': raise NameError(s + ' already exists in namespace or is a reserved word') if handle == 'force': if re.match(".*?_\d+$", s): i = re.match(".*?_(\d+)$", s).groups()[0] s = s.strip('_' + i) + '_' + str(int(i) + 1) else: s += '_1' namespace[string] = s return s, namespace
[ "def", "make_python_identifier", "(", "string", ",", "namespace", "=", "None", ",", "reserved_words", "=", "None", ",", "convert", "=", "'drop'", ",", "handle", "=", "'force'", ")", ":", "if", "namespace", "is", "None", ":", "namespace", "=", "dict", "(", ")", "if", "reserved_words", "is", "None", ":", "reserved_words", "=", "list", "(", ")", "if", "string", "in", "namespace", ":", "return", "namespace", "[", "string", "]", ",", "namespace", "# create a working copy (and make it lowercase, while we're at it)", "s", "=", "string", ".", "lower", "(", ")", "# remove leading and trailing whitespace", "s", "=", "s", ".", "strip", "(", ")", "# Make spaces into underscores", "s", "=", "re", ".", "sub", "(", "'[\\\\s\\\\t\\\\n]+'", ",", "'_'", ",", "s", ")", "if", "convert", "==", "'hex'", ":", "# Convert invalid characters to hex. Note: \\p{l} designates all Unicode letter characters (any language),", "# \\p{m} designates all mark symbols (e.g., vowel marks in Indian scrips, such as the final)", "# and \\p{n} designates all numbers. We allow any of these to be present in the regex.", "s", "=", "''", ".", "join", "(", "[", "c", ".", "encode", "(", "\"hex\"", ")", "if", "re", ".", "findall", "(", "'[^\\p{l}\\p{m}\\p{n}_]'", ",", "c", ")", "else", "c", "for", "c", "in", "s", "]", ")", "elif", "convert", "==", "'drop'", ":", "# Remove invalid characters", "s", "=", "re", ".", "sub", "(", "'[^\\p{l}\\p{m}\\p{n}_]'", ",", "''", ",", "s", ")", "# Remove leading characters until we find a letter or underscore. Only letters can be leading characters.", "s", "=", "re", ".", "sub", "(", "'^[^\\p{l}_]+'", ",", "''", ",", "s", ")", "# Check that the string is not a python identifier", "while", "(", "s", "in", "keyword", ".", "kwlist", "or", "s", "in", "namespace", ".", "values", "(", ")", "or", "s", "in", "reserved_words", ")", ":", "if", "handle", "==", "'throw'", ":", "raise", "NameError", "(", "s", "+", "' already exists in namespace or is a reserved word'", ")", "if", "handle", "==", "'force'", ":", "if", "re", ".", "match", "(", "\".*?_\\d+$\"", ",", "s", ")", ":", "i", "=", "re", ".", "match", "(", "\".*?_(\\d+)$\"", ",", "s", ")", ".", "groups", "(", ")", "[", "0", "]", "s", "=", "s", ".", "strip", "(", "'_'", "+", "i", ")", "+", "'_'", "+", "str", "(", "int", "(", "i", ")", "+", "1", ")", "else", ":", "s", "+=", "'_1'", "namespace", "[", "string", "]", "=", "s", "return", "s", ",", "namespace" ]
Takes an arbitrary string and creates a valid Python identifier. If the input string is in the namespace, return its value. If the python identifier created is already in the namespace, but the input string is not (ie, two similar strings resolve to the same python identifier) or if the identifier is a reserved word in the reserved_words list, or is a python default reserved word, adds _1, or if _1 is in the namespace, _2, etc. Parameters ---------- string : <basestring> The text to be converted into a valid python identifier namespace : <dictionary> Map of existing translations into python safe identifiers. This is to ensure that two strings are not translated into the same python identifier reserved_words : <list of strings> List of words that are reserved (because they have other meanings in this particular program, such as also being the names of libraries, etc. convert : <string> Tells the function what to do with characters that are not valid in python identifiers - 'hex' implies that they will be converted to their hexidecimal representation. This is handy if you have variables that have a lot of reserved characters, or you don't want the name to be dependent on when things were added to the namespace - 'drop' implies that they will just be dropped altogether handle : <string> Tells the function how to deal with namespace conflicts - 'force' will create a representation which is not in conflict by appending _n to the resulting variable where n is the lowest number necessary to avoid a conflict - 'throw' will raise an exception Returns ------- identifier : <string> A vaild python identifier based on the input string namespace : <dictionary> An updated map of the translations of words to python identifiers, including the passed in 'string'. Examples -------- >>> make_python_identifier('Capital') ('capital', {'Capital': 'capital'}) >>> make_python_identifier('multiple words') ('multiple_words', {'multiple words': 'multiple_words'}) >>> make_python_identifier('multiple spaces') ('multiple_spaces', {'multiple spaces': 'multiple_spaces'}) When the name is a python keyword, add '_1' to differentiate it >>> make_python_identifier('for') ('for_1', {'for': 'for_1'}) Remove leading and trailing whitespace >>> make_python_identifier(' whitespace ') ('whitespace', {' whitespace ': 'whitespace'}) Remove most special characters outright: >>> make_python_identifier('H@t tr!ck') ('ht_trck', {'H@t tr!ck': 'ht_trck'}) Replace special characters with their hex representations >>> make_python_identifier('H@t tr!ck', convert='hex') ('h40t_tr21ck', {'H@t tr!ck': 'h40t_tr21ck'}) remove leading digits >>> make_python_identifier('123abc') ('abc', {'123abc': 'abc'}) already in namespace >>> make_python_identifier('Variable$', namespace={'Variable$': 'variable'}) ('variable', {'Variable$': 'variable'}) namespace conflicts >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}) ('variable_1', {'Variable@': 'variable', 'Variable$': 'variable_1'}) >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable', >>> 'Variable%': 'variable_1'}) ('variable_2', {'Variable@': 'variable', 'Variable%': 'variable_1', 'Variable$': 'variable_2'}) throw exception instead >>> make_python_identifier('Variable$', namespace={'Variable@': 'variable'}, handle='throw') Traceback (most recent call last): ... NameError: variable already exists in namespace or is a reserved word References ---------- Identifiers must follow the convention outlined here: https://docs.python.org/2/reference/lexical_analysis.html#identifiers
[ "Takes", "an", "arbitrary", "string", "and", "creates", "a", "valid", "Python", "identifier", "." ]
python
train
yyuu/botornado
boto/pyami/installers/ubuntu/installer.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/pyami/installers/ubuntu/installer.py#L82-L88
def create_user(self, user): """ Create a user on the local system """ self.run("useradd -m %s" % user) usr = getpwnam(user) return usr
[ "def", "create_user", "(", "self", ",", "user", ")", ":", "self", ".", "run", "(", "\"useradd -m %s\"", "%", "user", ")", "usr", "=", "getpwnam", "(", "user", ")", "return", "usr" ]
Create a user on the local system
[ "Create", "a", "user", "on", "the", "local", "system" ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/askbot.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/askbot.py#L362-L390
def parse_question_container(html_question): """Parse the question info container of a given HTML question. The method parses the information available in the question information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param html_question: raw HTML question element :returns: an object with the parsed information """ container_info = {} bs_question = bs4.BeautifulSoup(html_question, "html.parser") question = AskbotParser._find_question_container(bs_question) container = question.select("div.post-update-info") created = container[0] container_info['author'] = AskbotParser.parse_user_info(created) try: container[1] except IndexError: pass else: updated = container[1] if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info
[ "def", "parse_question_container", "(", "html_question", ")", ":", "container_info", "=", "{", "}", "bs_question", "=", "bs4", ".", "BeautifulSoup", "(", "html_question", ",", "\"html.parser\"", ")", "question", "=", "AskbotParser", ".", "_find_question_container", "(", "bs_question", ")", "container", "=", "question", ".", "select", "(", "\"div.post-update-info\"", ")", "created", "=", "container", "[", "0", "]", "container_info", "[", "'author'", "]", "=", "AskbotParser", ".", "parse_user_info", "(", "created", ")", "try", ":", "container", "[", "1", "]", "except", "IndexError", ":", "pass", "else", ":", "updated", "=", "container", "[", "1", "]", "if", "AskbotParser", ".", "parse_user_info", "(", "updated", ")", ":", "container_info", "[", "'updated_by'", "]", "=", "AskbotParser", ".", "parse_user_info", "(", "updated", ")", "return", "container_info" ]
Parse the question info container of a given HTML question. The method parses the information available in the question information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param html_question: raw HTML question element :returns: an object with the parsed information
[ "Parse", "the", "question", "info", "container", "of", "a", "given", "HTML", "question", "." ]
python
test
ccubed/Shosetsu
Shosetsu/Parsing.py
https://github.com/ccubed/Shosetsu/blob/eba01c058100ec8806129b11a2859f3126a1b101/Shosetsu/Parsing.py#L16-L35
async def parse_release_results(soup): """ Parse Releases search pages. :param soup: The BS4 class object :return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel. It contains a Date released, Platform, Ages group and Name. """ soup = list(soup.find_all('table', class_='stripe')[0].children)[1:] releases = [] for item in soup: child = list(item.children) temp_rel = {'date': None, 'ages': None, 'platform': None, 'name': None} temp_rel['date'] = child[0].string temp_rel['ages'] = child[1].string temp_rel['platform'] = child[2].abbr.get('title') temp_rel['name'] = child[3].a.string releases.append(temp_rel) del temp_rel return releases
[ "async", "def", "parse_release_results", "(", "soup", ")", ":", "soup", "=", "list", "(", "soup", ".", "find_all", "(", "'table'", ",", "class_", "=", "'stripe'", ")", "[", "0", "]", ".", "children", ")", "[", "1", ":", "]", "releases", "=", "[", "]", "for", "item", "in", "soup", ":", "child", "=", "list", "(", "item", ".", "children", ")", "temp_rel", "=", "{", "'date'", ":", "None", ",", "'ages'", ":", "None", ",", "'platform'", ":", "None", ",", "'name'", ":", "None", "}", "temp_rel", "[", "'date'", "]", "=", "child", "[", "0", "]", ".", "string", "temp_rel", "[", "'ages'", "]", "=", "child", "[", "1", "]", ".", "string", "temp_rel", "[", "'platform'", "]", "=", "child", "[", "2", "]", ".", "abbr", ".", "get", "(", "'title'", ")", "temp_rel", "[", "'name'", "]", "=", "child", "[", "3", "]", ".", "a", ".", "string", "releases", ".", "append", "(", "temp_rel", ")", "del", "temp_rel", "return", "releases" ]
Parse Releases search pages. :param soup: The BS4 class object :return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel. It contains a Date released, Platform, Ages group and Name.
[ "Parse", "Releases", "search", "pages", "." ]
python
test
ttinies/sc2ladderMgmt
sc2ladderMgmt/functions.py
https://github.com/ttinies/sc2ladderMgmt/blob/230292e18c54e43129c162116bbdf743b3e9dcf1/sc2ladderMgmt/functions.py#L18-L23
def addLadder(settings): """define a new Ladder setting and save to disk file""" ladder = Ladder(settings) ladder.save() getKnownLadders()[ladder.name] = ladder return ladder
[ "def", "addLadder", "(", "settings", ")", ":", "ladder", "=", "Ladder", "(", "settings", ")", "ladder", ".", "save", "(", ")", "getKnownLadders", "(", ")", "[", "ladder", ".", "name", "]", "=", "ladder", "return", "ladder" ]
define a new Ladder setting and save to disk file
[ "define", "a", "new", "Ladder", "setting", "and", "save", "to", "disk", "file" ]
python
train
PyThaiNLP/pythainlp
pythainlp/tools/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/tools/__init__.py#L21-L28
def get_pythainlp_data_path() -> str: """ Return full path where PyThaiNLP keeps its (downloaded) data """ path = os.path.join(os.path.expanduser("~"), PYTHAINLP_DATA_DIR) if not os.path.exists(path): os.makedirs(path) return path
[ "def", "get_pythainlp_data_path", "(", ")", "->", "str", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "expanduser", "(", "\"~\"", ")", ",", "PYTHAINLP_DATA_DIR", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")", "return", "path" ]
Return full path where PyThaiNLP keeps its (downloaded) data
[ "Return", "full", "path", "where", "PyThaiNLP", "keeps", "its", "(", "downloaded", ")", "data" ]
python
train
runfalk/spans
spans/types.py
https://github.com/runfalk/spans/blob/59ed73407a569c3be86cfdb4b8f438cb8c794540/spans/types.py#L972-L1006
def offset(self, offset): """ Shift the range to the left or right with the given offset >>> intrange(0, 5).offset(5) intrange([5,10)) >>> intrange(5, 10).offset(-5) intrange([0,5)) >>> intrange.empty().offset(5) intrange(empty) Note that range objects are immutable and are never modified in place. :param offset: Scalar to offset by. .. versionadded:: 0.1.3 """ # If range is empty it can't be offset if not self: return self offset_type = self.type if self.offset_type is None else self.offset_type if offset is not None and not isinstance(offset, offset_type): raise TypeError(( "Invalid type for offset '{offset_type.__name__}'" " expected '{expected_type.__name__}'").format( expected_type=offset_type, offset_type=offset.__class__)) lower = None if self.lower is None else self.lower + offset upper = None if self.upper is None else self.upper + offset return self.replace(lower=lower, upper=upper)
[ "def", "offset", "(", "self", ",", "offset", ")", ":", "# If range is empty it can't be offset", "if", "not", "self", ":", "return", "self", "offset_type", "=", "self", ".", "type", "if", "self", ".", "offset_type", "is", "None", "else", "self", ".", "offset_type", "if", "offset", "is", "not", "None", "and", "not", "isinstance", "(", "offset", ",", "offset_type", ")", ":", "raise", "TypeError", "(", "(", "\"Invalid type for offset '{offset_type.__name__}'\"", "\" expected '{expected_type.__name__}'\"", ")", ".", "format", "(", "expected_type", "=", "offset_type", ",", "offset_type", "=", "offset", ".", "__class__", ")", ")", "lower", "=", "None", "if", "self", ".", "lower", "is", "None", "else", "self", ".", "lower", "+", "offset", "upper", "=", "None", "if", "self", ".", "upper", "is", "None", "else", "self", ".", "upper", "+", "offset", "return", "self", ".", "replace", "(", "lower", "=", "lower", ",", "upper", "=", "upper", ")" ]
Shift the range to the left or right with the given offset >>> intrange(0, 5).offset(5) intrange([5,10)) >>> intrange(5, 10).offset(-5) intrange([0,5)) >>> intrange.empty().offset(5) intrange(empty) Note that range objects are immutable and are never modified in place. :param offset: Scalar to offset by. .. versionadded:: 0.1.3
[ "Shift", "the", "range", "to", "the", "left", "or", "right", "with", "the", "given", "offset" ]
python
train
SheffieldML/GPy
GPy/inference/latent_function_inference/posterior.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/inference/latent_function_inference/posterior.py#L199-L209
def woodbury_vector(self): """ Woodbury vector in the gaussian likelihood case only is defined as $$ (K_{xx} + \Sigma)^{-1}Y \Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance} $$ """ if self._woodbury_vector is None: self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean) return self._woodbury_vector
[ "def", "woodbury_vector", "(", "self", ")", ":", "if", "self", ".", "_woodbury_vector", "is", "None", ":", "self", ".", "_woodbury_vector", ",", "_", "=", "dpotrs", "(", "self", ".", "K_chol", ",", "self", ".", "mean", "-", "self", ".", "_prior_mean", ")", "return", "self", ".", "_woodbury_vector" ]
Woodbury vector in the gaussian likelihood case only is defined as $$ (K_{xx} + \Sigma)^{-1}Y \Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance} $$
[ "Woodbury", "vector", "in", "the", "gaussian", "likelihood", "case", "only", "is", "defined", "as", "$$", "(", "K_", "{", "xx", "}", "+", "\\", "Sigma", ")", "^", "{", "-", "1", "}", "Y", "\\", "Sigma", ":", "=", "\\", "texttt", "{", "Likelihood", ".", "variance", "/", "Approximate", "likelihood", "covariance", "}", "$$" ]
python
train
rootpy/rootpy
rootpy/plotting/utils.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/utils.py#L197-L378
def get_limits(plottables, xpadding=0, ypadding=0.1, xerror_in_padding=True, yerror_in_padding=True, snap=True, logx=False, logy=False, logx_crop_value=1E-5, logy_crop_value=1E-5, logx_base=10, logy_base=10): """ Get the axes limits that should be used for a 1D histogram, graph, or stack of histograms. Parameters ---------- plottables : Hist, Graph, HistStack, or list of such objects The object(s) for which visually pleasing plot boundaries are requested. xpadding : float or 2-tuple, optional (default=0) The horizontal padding as a fraction of the final plot width. ypadding : float or 2-tuple, optional (default=0.1) The vertical padding as a fraction of the final plot height. xerror_in_padding : bool, optional (default=True) If False then exclude the x error bars from the calculation of the plot width. yerror_in_padding : bool, optional (default=True) If False then exclude the y error bars from the calculation of the plot height. snap : bool, optional (default=True) Make the minimum or maximum of the vertical range the x-axis depending on if the plot maximum and minimum are above or below the x-axis. If the plot maximum is above the x-axis while the minimum is below the x-axis, then this option will have no effect. logx : bool, optional (default=False) If True, then the x-axis is log scale. logy : bool, optional (default=False) If True, then the y-axis is log scale. logx_crop_value : float, optional (default=1E-5) If an x-axis is using a logarithmic scale then crop all non-positive values with this value. logy_crop_value : float, optional (default=1E-5) If the y-axis is using a logarithmic scale then crop all non-positive values with this value. logx_base : float, optional (default=10) The base used for the logarithmic scale of the x-axis. logy_base : float, optional (default=10) The base used for the logarithmic scale of the y-axis. Returns ------- xmin, xmax, ymin, ymax : tuple of plot boundaries The computed x and y-axis ranges. """ try: import numpy as np use_numpy = True except ImportError: use_numpy = False if not isinstance(plottables, (list, tuple)): plottables = [plottables] xmin = float('+inf') xmax = float('-inf') ymin = float('+inf') ymax = float('-inf') for h in plottables: if isinstance(h, HistStack): h = h.sum if not isinstance(h, (_Hist, _Graph1DBase)): raise TypeError( "unable to determine plot axes ranges " "from object of type `{0}`".format( type(h))) if use_numpy: y_array_min = y_array_max = np.array(list(h.y())) if yerror_in_padding: y_array_min = y_array_min - np.array(list(h.yerrl())) y_array_max = y_array_max + np.array(list(h.yerrh())) _ymin = y_array_min.min() _ymax = y_array_max.max() else: y_array_min = y_array_max = list(h.y()) if yerror_in_padding: y_array_min = multisub(y_array_min, list(h.yerrl())) y_array_max = multiadd(y_array_max, list(h.yerrh())) _ymin = min(y_array_min) _ymax = max(y_array_max) if isinstance(h, _Graph1DBase): if use_numpy: x_array_min = x_array_max = np.array(list(h.x())) if xerror_in_padding: x_array_min = x_array_min - np.array(list(h.xerrl())) x_array_max = x_array_max + np.array(list(h.xerrh())) _xmin = x_array_min.min() _xmax = x_array_max.max() else: x_array_min = x_array_max = list(h.x()) if xerror_in_padding: x_array_min = multisub(x_array_min, list(h.xerrl())) x_array_max = multiadd(x_array_max, list(h.xerrh())) _xmin = min(x_array_min) _xmax = max(x_array_max) else: _xmin = h.xedgesl(1) _xmax = h.xedgesh(h.nbins(0)) if logy: _ymin = max(logy_crop_value, _ymin) _ymax = max(logy_crop_value, _ymax) if logx: _xmin = max(logx_crop_value, _xmin) _xmax = max(logx_crop_value, _xmax) if _xmin < xmin: xmin = _xmin if _xmax > xmax: xmax = _xmax if _ymin < ymin: ymin = _ymin if _ymax > ymax: ymax = _ymax if isinstance(xpadding, (list, tuple)): if len(xpadding) != 2: raise ValueError("xpadding must be of length 2") xpadding_left = xpadding[0] xpadding_right = xpadding[1] else: xpadding_left = xpadding_right = xpadding if isinstance(ypadding, (list, tuple)): if len(ypadding) != 2: raise ValueError("ypadding must be of length 2") ypadding_top = ypadding[0] ypadding_bottom = ypadding[1] else: ypadding_top = ypadding_bottom = ypadding if logx: x0, x3 = _limits_helper( log(xmin, logx_base), log(xmax, logx_base), xpadding_left, xpadding_right) xmin = logx_base ** x0 xmax = logx_base ** x3 else: xmin, xmax = _limits_helper( xmin, xmax, xpadding_left, xpadding_right) if logy: y0, y3 = _limits_helper( log(ymin, logy_base), log(ymax, logy_base), ypadding_bottom, ypadding_top, snap=False) ymin = logy_base ** y0 ymax = logy_base ** y3 else: ymin, ymax = _limits_helper( ymin, ymax, ypadding_bottom, ypadding_top, snap=snap) return xmin, xmax, ymin, ymax
[ "def", "get_limits", "(", "plottables", ",", "xpadding", "=", "0", ",", "ypadding", "=", "0.1", ",", "xerror_in_padding", "=", "True", ",", "yerror_in_padding", "=", "True", ",", "snap", "=", "True", ",", "logx", "=", "False", ",", "logy", "=", "False", ",", "logx_crop_value", "=", "1E-5", ",", "logy_crop_value", "=", "1E-5", ",", "logx_base", "=", "10", ",", "logy_base", "=", "10", ")", ":", "try", ":", "import", "numpy", "as", "np", "use_numpy", "=", "True", "except", "ImportError", ":", "use_numpy", "=", "False", "if", "not", "isinstance", "(", "plottables", ",", "(", "list", ",", "tuple", ")", ")", ":", "plottables", "=", "[", "plottables", "]", "xmin", "=", "float", "(", "'+inf'", ")", "xmax", "=", "float", "(", "'-inf'", ")", "ymin", "=", "float", "(", "'+inf'", ")", "ymax", "=", "float", "(", "'-inf'", ")", "for", "h", "in", "plottables", ":", "if", "isinstance", "(", "h", ",", "HistStack", ")", ":", "h", "=", "h", ".", "sum", "if", "not", "isinstance", "(", "h", ",", "(", "_Hist", ",", "_Graph1DBase", ")", ")", ":", "raise", "TypeError", "(", "\"unable to determine plot axes ranges \"", "\"from object of type `{0}`\"", ".", "format", "(", "type", "(", "h", ")", ")", ")", "if", "use_numpy", ":", "y_array_min", "=", "y_array_max", "=", "np", ".", "array", "(", "list", "(", "h", ".", "y", "(", ")", ")", ")", "if", "yerror_in_padding", ":", "y_array_min", "=", "y_array_min", "-", "np", ".", "array", "(", "list", "(", "h", ".", "yerrl", "(", ")", ")", ")", "y_array_max", "=", "y_array_max", "+", "np", ".", "array", "(", "list", "(", "h", ".", "yerrh", "(", ")", ")", ")", "_ymin", "=", "y_array_min", ".", "min", "(", ")", "_ymax", "=", "y_array_max", ".", "max", "(", ")", "else", ":", "y_array_min", "=", "y_array_max", "=", "list", "(", "h", ".", "y", "(", ")", ")", "if", "yerror_in_padding", ":", "y_array_min", "=", "multisub", "(", "y_array_min", ",", "list", "(", "h", ".", "yerrl", "(", ")", ")", ")", "y_array_max", "=", "multiadd", "(", "y_array_max", ",", "list", "(", "h", ".", "yerrh", "(", ")", ")", ")", "_ymin", "=", "min", "(", "y_array_min", ")", "_ymax", "=", "max", "(", "y_array_max", ")", "if", "isinstance", "(", "h", ",", "_Graph1DBase", ")", ":", "if", "use_numpy", ":", "x_array_min", "=", "x_array_max", "=", "np", ".", "array", "(", "list", "(", "h", ".", "x", "(", ")", ")", ")", "if", "xerror_in_padding", ":", "x_array_min", "=", "x_array_min", "-", "np", ".", "array", "(", "list", "(", "h", ".", "xerrl", "(", ")", ")", ")", "x_array_max", "=", "x_array_max", "+", "np", ".", "array", "(", "list", "(", "h", ".", "xerrh", "(", ")", ")", ")", "_xmin", "=", "x_array_min", ".", "min", "(", ")", "_xmax", "=", "x_array_max", ".", "max", "(", ")", "else", ":", "x_array_min", "=", "x_array_max", "=", "list", "(", "h", ".", "x", "(", ")", ")", "if", "xerror_in_padding", ":", "x_array_min", "=", "multisub", "(", "x_array_min", ",", "list", "(", "h", ".", "xerrl", "(", ")", ")", ")", "x_array_max", "=", "multiadd", "(", "x_array_max", ",", "list", "(", "h", ".", "xerrh", "(", ")", ")", ")", "_xmin", "=", "min", "(", "x_array_min", ")", "_xmax", "=", "max", "(", "x_array_max", ")", "else", ":", "_xmin", "=", "h", ".", "xedgesl", "(", "1", ")", "_xmax", "=", "h", ".", "xedgesh", "(", "h", ".", "nbins", "(", "0", ")", ")", "if", "logy", ":", "_ymin", "=", "max", "(", "logy_crop_value", ",", "_ymin", ")", "_ymax", "=", "max", "(", "logy_crop_value", ",", "_ymax", ")", "if", "logx", ":", "_xmin", "=", "max", "(", "logx_crop_value", ",", "_xmin", ")", "_xmax", "=", "max", "(", "logx_crop_value", ",", "_xmax", ")", "if", "_xmin", "<", "xmin", ":", "xmin", "=", "_xmin", "if", "_xmax", ">", "xmax", ":", "xmax", "=", "_xmax", "if", "_ymin", "<", "ymin", ":", "ymin", "=", "_ymin", "if", "_ymax", ">", "ymax", ":", "ymax", "=", "_ymax", "if", "isinstance", "(", "xpadding", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "xpadding", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"xpadding must be of length 2\"", ")", "xpadding_left", "=", "xpadding", "[", "0", "]", "xpadding_right", "=", "xpadding", "[", "1", "]", "else", ":", "xpadding_left", "=", "xpadding_right", "=", "xpadding", "if", "isinstance", "(", "ypadding", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "ypadding", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"ypadding must be of length 2\"", ")", "ypadding_top", "=", "ypadding", "[", "0", "]", "ypadding_bottom", "=", "ypadding", "[", "1", "]", "else", ":", "ypadding_top", "=", "ypadding_bottom", "=", "ypadding", "if", "logx", ":", "x0", ",", "x3", "=", "_limits_helper", "(", "log", "(", "xmin", ",", "logx_base", ")", ",", "log", "(", "xmax", ",", "logx_base", ")", ",", "xpadding_left", ",", "xpadding_right", ")", "xmin", "=", "logx_base", "**", "x0", "xmax", "=", "logx_base", "**", "x3", "else", ":", "xmin", ",", "xmax", "=", "_limits_helper", "(", "xmin", ",", "xmax", ",", "xpadding_left", ",", "xpadding_right", ")", "if", "logy", ":", "y0", ",", "y3", "=", "_limits_helper", "(", "log", "(", "ymin", ",", "logy_base", ")", ",", "log", "(", "ymax", ",", "logy_base", ")", ",", "ypadding_bottom", ",", "ypadding_top", ",", "snap", "=", "False", ")", "ymin", "=", "logy_base", "**", "y0", "ymax", "=", "logy_base", "**", "y3", "else", ":", "ymin", ",", "ymax", "=", "_limits_helper", "(", "ymin", ",", "ymax", ",", "ypadding_bottom", ",", "ypadding_top", ",", "snap", "=", "snap", ")", "return", "xmin", ",", "xmax", ",", "ymin", ",", "ymax" ]
Get the axes limits that should be used for a 1D histogram, graph, or stack of histograms. Parameters ---------- plottables : Hist, Graph, HistStack, or list of such objects The object(s) for which visually pleasing plot boundaries are requested. xpadding : float or 2-tuple, optional (default=0) The horizontal padding as a fraction of the final plot width. ypadding : float or 2-tuple, optional (default=0.1) The vertical padding as a fraction of the final plot height. xerror_in_padding : bool, optional (default=True) If False then exclude the x error bars from the calculation of the plot width. yerror_in_padding : bool, optional (default=True) If False then exclude the y error bars from the calculation of the plot height. snap : bool, optional (default=True) Make the minimum or maximum of the vertical range the x-axis depending on if the plot maximum and minimum are above or below the x-axis. If the plot maximum is above the x-axis while the minimum is below the x-axis, then this option will have no effect. logx : bool, optional (default=False) If True, then the x-axis is log scale. logy : bool, optional (default=False) If True, then the y-axis is log scale. logx_crop_value : float, optional (default=1E-5) If an x-axis is using a logarithmic scale then crop all non-positive values with this value. logy_crop_value : float, optional (default=1E-5) If the y-axis is using a logarithmic scale then crop all non-positive values with this value. logx_base : float, optional (default=10) The base used for the logarithmic scale of the x-axis. logy_base : float, optional (default=10) The base used for the logarithmic scale of the y-axis. Returns ------- xmin, xmax, ymin, ymax : tuple of plot boundaries The computed x and y-axis ranges.
[ "Get", "the", "axes", "limits", "that", "should", "be", "used", "for", "a", "1D", "histogram", "graph", "or", "stack", "of", "histograms", "." ]
python
train
saltstack/salt
salt/modules/extfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/extfs.py#L222-L285
def dump(device, args=None): ''' Return all contents of dumpe2fs for a specified device CLI Example: .. code-block:: bash salt '*' extfs.dump /dev/sda1 ''' cmd = 'dumpe2fs {0}'.format(device) if args: cmd = cmd + ' -' + args ret = {'attributes': {}, 'blocks': {}} out = __salt__['cmd.run'](cmd, python_shell=False).splitlines() mode = 'opts' group = None for line in out: if not line: continue if line.startswith('dumpe2fs'): continue if mode == 'opts': line = line.replace('\t', ' ') comps = line.split(': ') if line.startswith('Filesystem features'): ret['attributes'][comps[0]] = comps[1].split() elif line.startswith('Group') and not line.startswith('Group descriptor size'): mode = 'blocks' else: if len(comps) < 2: continue ret['attributes'][comps[0]] = comps[1].strip() if mode == 'blocks': if line.startswith('Group'): line = line.replace(':', '') line = line.replace('(', '') line = line.replace(')', '') line = line.replace('[', '') line = line.replace(']', '') comps = line.split() blkgrp = comps[1] group = 'Group {0}'.format(blkgrp) ret['blocks'][group] = {} ret['blocks'][group]['group'] = blkgrp ret['blocks'][group]['range'] = comps[3] # TODO: comps[4:], which may look one one of the following: # ITABLE_ZEROED # INODE_UNINIT, ITABLE_ZEROED # Does anyone know what to call these? ret['blocks'][group]['extra'] = [] elif 'Free blocks:' in line: comps = line.split(': ') free_blocks = comps[1].split(', ') ret['blocks'][group]['free blocks'] = free_blocks elif 'Free inodes:' in line: comps = line.split(': ') inodes = comps[1].split(', ') ret['blocks'][group]['free inodes'] = inodes else: line = line.strip() ret['blocks'][group]['extra'].append(line) return ret
[ "def", "dump", "(", "device", ",", "args", "=", "None", ")", ":", "cmd", "=", "'dumpe2fs {0}'", ".", "format", "(", "device", ")", "if", "args", ":", "cmd", "=", "cmd", "+", "' -'", "+", "args", "ret", "=", "{", "'attributes'", ":", "{", "}", ",", "'blocks'", ":", "{", "}", "}", "out", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", ".", "splitlines", "(", ")", "mode", "=", "'opts'", "group", "=", "None", "for", "line", "in", "out", ":", "if", "not", "line", ":", "continue", "if", "line", ".", "startswith", "(", "'dumpe2fs'", ")", ":", "continue", "if", "mode", "==", "'opts'", ":", "line", "=", "line", ".", "replace", "(", "'\\t'", ",", "' '", ")", "comps", "=", "line", ".", "split", "(", "': '", ")", "if", "line", ".", "startswith", "(", "'Filesystem features'", ")", ":", "ret", "[", "'attributes'", "]", "[", "comps", "[", "0", "]", "]", "=", "comps", "[", "1", "]", ".", "split", "(", ")", "elif", "line", ".", "startswith", "(", "'Group'", ")", "and", "not", "line", ".", "startswith", "(", "'Group descriptor size'", ")", ":", "mode", "=", "'blocks'", "else", ":", "if", "len", "(", "comps", ")", "<", "2", ":", "continue", "ret", "[", "'attributes'", "]", "[", "comps", "[", "0", "]", "]", "=", "comps", "[", "1", "]", ".", "strip", "(", ")", "if", "mode", "==", "'blocks'", ":", "if", "line", ".", "startswith", "(", "'Group'", ")", ":", "line", "=", "line", ".", "replace", "(", "':'", ",", "''", ")", "line", "=", "line", ".", "replace", "(", "'('", ",", "''", ")", "line", "=", "line", ".", "replace", "(", "')'", ",", "''", ")", "line", "=", "line", ".", "replace", "(", "'['", ",", "''", ")", "line", "=", "line", ".", "replace", "(", "']'", ",", "''", ")", "comps", "=", "line", ".", "split", "(", ")", "blkgrp", "=", "comps", "[", "1", "]", "group", "=", "'Group {0}'", ".", "format", "(", "blkgrp", ")", "ret", "[", "'blocks'", "]", "[", "group", "]", "=", "{", "}", "ret", "[", "'blocks'", "]", "[", "group", "]", "[", "'group'", "]", "=", "blkgrp", "ret", "[", "'blocks'", "]", "[", "group", "]", "[", "'range'", "]", "=", "comps", "[", "3", "]", "# TODO: comps[4:], which may look one one of the following:", "# ITABLE_ZEROED", "# INODE_UNINIT, ITABLE_ZEROED", "# Does anyone know what to call these?", "ret", "[", "'blocks'", "]", "[", "group", "]", "[", "'extra'", "]", "=", "[", "]", "elif", "'Free blocks:'", "in", "line", ":", "comps", "=", "line", ".", "split", "(", "': '", ")", "free_blocks", "=", "comps", "[", "1", "]", ".", "split", "(", "', '", ")", "ret", "[", "'blocks'", "]", "[", "group", "]", "[", "'free blocks'", "]", "=", "free_blocks", "elif", "'Free inodes:'", "in", "line", ":", "comps", "=", "line", ".", "split", "(", "': '", ")", "inodes", "=", "comps", "[", "1", "]", ".", "split", "(", "', '", ")", "ret", "[", "'blocks'", "]", "[", "group", "]", "[", "'free inodes'", "]", "=", "inodes", "else", ":", "line", "=", "line", ".", "strip", "(", ")", "ret", "[", "'blocks'", "]", "[", "group", "]", "[", "'extra'", "]", ".", "append", "(", "line", ")", "return", "ret" ]
Return all contents of dumpe2fs for a specified device CLI Example: .. code-block:: bash salt '*' extfs.dump /dev/sda1
[ "Return", "all", "contents", "of", "dumpe2fs", "for", "a", "specified", "device" ]
python
train
dereneaton/ipyrad
ipyrad/analysis/bucky.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/bucky.py#L466-L538
def run_bucky(self, ipyclient, force=False, quiet=False, subname=False): """ Runs bucky for a given set of parameters and stores the result to the ipa.bucky object. The results will be stored by default with the name '{name}-{alpha}' unless a argument is passed for 'subname' to customize the output name. Parameters: ----------- subname (str): A custom name prefix for the output files produced by the bucky analysis and output into the {workdir}/{name} directory. force (bool): If True then existing result files with the same name prefix will be overwritten. quiet (bool): If True the progress bars will be suppressed. ipyclient (ipyparallel.Client) An active ipyparallel client to distribute jobs to. """ ## check for existing results files minidir = os.path.realpath(os.path.join(self.workdir, self.name)) infiles = glob.glob(os.path.join(minidir, "*.sumt")) outroot = os.path.realpath(os.path.join(self.workdir, self.name)) ## build alpha list if isinstance(self.params.bucky_alpha, list): alphas = self.params.bucky_alpha else: alphas = [self.params.bucky_alpha] ## load balancer lbview = ipyclient.load_balanced_view() ## submit each to be processed asyncs = [] for alpha in alphas: pathname = os.path.join(outroot, "CF-a"+str(alpha)) if (os.path.exists(pathname)) and (force!=True): print("BUCKy results already exist for this object at alpha={}\n".format(alpha) +\ "use force=True to overwrite existing results") else: args = [ alpha, self.params.bucky_nchains, self.params.bucky_nreps, self.params.bucky_niter, pathname, infiles] async = lbview.apply(_call_bucky, *args) asyncs.append(async) ## track progress start = time.time() printstr = "[bucky] infer CF posteriors | {} | " while 1: ready = [i.ready() for i in asyncs] elapsed = datetime.timedelta(seconds=int(time.time()-start)) if not quiet: progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer="") if len(ready) == sum(ready): if not quiet: print("") break else: time.sleep(0.1) ## check success for async in asyncs: if not async.successful(): raise IPyradWarningExit(async.result())
[ "def", "run_bucky", "(", "self", ",", "ipyclient", ",", "force", "=", "False", ",", "quiet", "=", "False", ",", "subname", "=", "False", ")", ":", "## check for existing results files", "minidir", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "self", ".", "name", ")", ")", "infiles", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "minidir", ",", "\"*.sumt\"", ")", ")", "outroot", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "self", ".", "name", ")", ")", "## build alpha list", "if", "isinstance", "(", "self", ".", "params", ".", "bucky_alpha", ",", "list", ")", ":", "alphas", "=", "self", ".", "params", ".", "bucky_alpha", "else", ":", "alphas", "=", "[", "self", ".", "params", ".", "bucky_alpha", "]", "## load balancer", "lbview", "=", "ipyclient", ".", "load_balanced_view", "(", ")", "## submit each to be processed", "asyncs", "=", "[", "]", "for", "alpha", "in", "alphas", ":", "pathname", "=", "os", ".", "path", ".", "join", "(", "outroot", ",", "\"CF-a\"", "+", "str", "(", "alpha", ")", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "pathname", ")", ")", "and", "(", "force", "!=", "True", ")", ":", "print", "(", "\"BUCKy results already exist for this object at alpha={}\\n\"", ".", "format", "(", "alpha", ")", "+", "\"use force=True to overwrite existing results\"", ")", "else", ":", "args", "=", "[", "alpha", ",", "self", ".", "params", ".", "bucky_nchains", ",", "self", ".", "params", ".", "bucky_nreps", ",", "self", ".", "params", ".", "bucky_niter", ",", "pathname", ",", "infiles", "]", "async", "=", "lbview", ".", "apply", "(", "_call_bucky", ",", "*", "args", ")", "asyncs", ".", "append", "(", "async", ")", "## track progress", "start", "=", "time", ".", "time", "(", ")", "printstr", "=", "\"[bucky] infer CF posteriors | {} | \"", "while", "1", ":", "ready", "=", "[", "i", ".", "ready", "(", ")", "for", "i", "in", "asyncs", "]", "elapsed", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "int", "(", "time", ".", "time", "(", ")", "-", "start", ")", ")", "if", "not", "quiet", ":", "progressbar", "(", "len", "(", "ready", ")", ",", "sum", "(", "ready", ")", ",", "printstr", ".", "format", "(", "elapsed", ")", ",", "spacer", "=", "\"\"", ")", "if", "len", "(", "ready", ")", "==", "sum", "(", "ready", ")", ":", "if", "not", "quiet", ":", "print", "(", "\"\"", ")", "break", "else", ":", "time", ".", "sleep", "(", "0.1", ")", "## check success", "for", "async", "in", "asyncs", ":", "if", "not", "async", ".", "successful", "(", ")", ":", "raise", "IPyradWarningExit", "(", "async", ".", "result", "(", ")", ")" ]
Runs bucky for a given set of parameters and stores the result to the ipa.bucky object. The results will be stored by default with the name '{name}-{alpha}' unless a argument is passed for 'subname' to customize the output name. Parameters: ----------- subname (str): A custom name prefix for the output files produced by the bucky analysis and output into the {workdir}/{name} directory. force (bool): If True then existing result files with the same name prefix will be overwritten. quiet (bool): If True the progress bars will be suppressed. ipyclient (ipyparallel.Client) An active ipyparallel client to distribute jobs to.
[ "Runs", "bucky", "for", "a", "given", "set", "of", "parameters", "and", "stores", "the", "result", "to", "the", "ipa", ".", "bucky", "object", ".", "The", "results", "will", "be", "stored", "by", "default", "with", "the", "name", "{", "name", "}", "-", "{", "alpha", "}", "unless", "a", "argument", "is", "passed", "for", "subname", "to", "customize", "the", "output", "name", "." ]
python
valid
mogproject/mog-commons-python
src/mog_commons/command.py
https://github.com/mogproject/mog-commons-python/blob/951cf0fa9a56248b4d45be720be25f1d4b7e1bff/src/mog_commons/command.py#L70-L85
def capture_command(args, shell=False, cwd=None, env=None, stdin=None, cmd_encoding='utf-8'): """ Execute external command and capture output :param args: command line arguments : [string] :param shell: True when using shell : boolean :param cwd: working directory : string :param env: environment variables : dict :param stdin: standard input :param cmd_encoding: command line encoding: string :return: tuple of return code, stdout data and stderr data """ p = subprocess.Popen( __convert_args(args, shell, cmd_encoding), shell=shell, cwd=cwd, env=__convert_env(env, cmd_encoding), stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout_data, stderr_data = p.communicate() return p.returncode, stdout_data, stderr_data
[ "def", "capture_command", "(", "args", ",", "shell", "=", "False", ",", "cwd", "=", "None", ",", "env", "=", "None", ",", "stdin", "=", "None", ",", "cmd_encoding", "=", "'utf-8'", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "__convert_args", "(", "args", ",", "shell", ",", "cmd_encoding", ")", ",", "shell", "=", "shell", ",", "cwd", "=", "cwd", ",", "env", "=", "__convert_env", "(", "env", ",", "cmd_encoding", ")", ",", "stdin", "=", "stdin", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "stdout_data", ",", "stderr_data", "=", "p", ".", "communicate", "(", ")", "return", "p", ".", "returncode", ",", "stdout_data", ",", "stderr_data" ]
Execute external command and capture output :param args: command line arguments : [string] :param shell: True when using shell : boolean :param cwd: working directory : string :param env: environment variables : dict :param stdin: standard input :param cmd_encoding: command line encoding: string :return: tuple of return code, stdout data and stderr data
[ "Execute", "external", "command", "and", "capture", "output", ":", "param", "args", ":", "command", "line", "arguments", ":", "[", "string", "]", ":", "param", "shell", ":", "True", "when", "using", "shell", ":", "boolean", ":", "param", "cwd", ":", "working", "directory", ":", "string", ":", "param", "env", ":", "environment", "variables", ":", "dict", ":", "param", "stdin", ":", "standard", "input", ":", "param", "cmd_encoding", ":", "command", "line", "encoding", ":", "string", ":", "return", ":", "tuple", "of", "return", "code", "stdout", "data", "and", "stderr", "data" ]
python
train
iamjarret/pystockfish
pystockfish.py
https://github.com/iamjarret/pystockfish/blob/ae34a4b4d29c577c888b72691fcf0cb5a89b1792/pystockfish.py#L235-L254
def _bestmove_get_info(text): """ Parse stockfish evaluation output as dictionary. Examples of input: "info depth 2 seldepth 3 multipv 1 score cp -656 nodes 43 nps 43000 tbhits 0 \ time 1 pv g7g6 h3g3 g6f7" "info depth 10 seldepth 12 multipv 1 score mate 5 nodes 2378 nps 1189000 tbhits 0 \ time 2 pv h3g3 g6f7 g3c7 b5d7 d1d7 f7g6 c7g3 g6h5 e6f4" """ result_dict = Engine._get_info_pv(text) result_dict.update(Engine._get_info_score(text)) single_value_fields = ['depth', 'seldepth', 'multipv', 'nodes', 'nps', 'tbhits', 'time'] for field in single_value_fields: result_dict.update(Engine._get_info_singlevalue_subfield(text, field)) return result_dict
[ "def", "_bestmove_get_info", "(", "text", ")", ":", "result_dict", "=", "Engine", ".", "_get_info_pv", "(", "text", ")", "result_dict", ".", "update", "(", "Engine", ".", "_get_info_score", "(", "text", ")", ")", "single_value_fields", "=", "[", "'depth'", ",", "'seldepth'", ",", "'multipv'", ",", "'nodes'", ",", "'nps'", ",", "'tbhits'", ",", "'time'", "]", "for", "field", "in", "single_value_fields", ":", "result_dict", ".", "update", "(", "Engine", ".", "_get_info_singlevalue_subfield", "(", "text", ",", "field", ")", ")", "return", "result_dict" ]
Parse stockfish evaluation output as dictionary. Examples of input: "info depth 2 seldepth 3 multipv 1 score cp -656 nodes 43 nps 43000 tbhits 0 \ time 1 pv g7g6 h3g3 g6f7" "info depth 10 seldepth 12 multipv 1 score mate 5 nodes 2378 nps 1189000 tbhits 0 \ time 2 pv h3g3 g6f7 g3c7 b5d7 d1d7 f7g6 c7g3 g6h5 e6f4"
[ "Parse", "stockfish", "evaluation", "output", "as", "dictionary", "." ]
python
train
noahbenson/neuropythy
neuropythy/util/core.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/util/core.py#L1344-L1367
def to_curve_spline(obj): ''' to_curve_spline(obj) obj if obj is a curve spline and otherwise attempts to coerce obj into a curve spline, raising an error if it cannot. ''' if is_curve_spline(obj): return obj elif is_tuple(obj) and len(obj) == 2: (crds,opts) = obj else: (crds,opts) = (obj,{}) if pimms.is_matrix(crds) or is_curve_spline(crds): crds = [crds] spls = [c for c in crds if is_curve_spline(c)] opts = dict(opts) if 'weights' not in opts and len(spls) == len(crds): if all(c.weights is not None for c in crds): opts['weights'] = np.concatenate([c.weights for c in crds]) if 'order' not in opts and len(spls) > 0: opts['order'] = np.min([c.order for c in spls]) if 'smoothing' not in opts and len(spls) > 0: sm = set([c.smoothing for c in spls]) if len(sm) == 1: opts['smoothing'] = list(sm)[0] else: opts['smoothing'] = None crds = [x.crds if is_curve_spline(crds) else np.asarray(x) for x in crds] crds = [x if x.shape[0] == 2 else x.T for x in crds] crds = np.hstack(crds) return curve_spline(crds, **opts)
[ "def", "to_curve_spline", "(", "obj", ")", ":", "if", "is_curve_spline", "(", "obj", ")", ":", "return", "obj", "elif", "is_tuple", "(", "obj", ")", "and", "len", "(", "obj", ")", "==", "2", ":", "(", "crds", ",", "opts", ")", "=", "obj", "else", ":", "(", "crds", ",", "opts", ")", "=", "(", "obj", ",", "{", "}", ")", "if", "pimms", ".", "is_matrix", "(", "crds", ")", "or", "is_curve_spline", "(", "crds", ")", ":", "crds", "=", "[", "crds", "]", "spls", "=", "[", "c", "for", "c", "in", "crds", "if", "is_curve_spline", "(", "c", ")", "]", "opts", "=", "dict", "(", "opts", ")", "if", "'weights'", "not", "in", "opts", "and", "len", "(", "spls", ")", "==", "len", "(", "crds", ")", ":", "if", "all", "(", "c", ".", "weights", "is", "not", "None", "for", "c", "in", "crds", ")", ":", "opts", "[", "'weights'", "]", "=", "np", ".", "concatenate", "(", "[", "c", ".", "weights", "for", "c", "in", "crds", "]", ")", "if", "'order'", "not", "in", "opts", "and", "len", "(", "spls", ")", ">", "0", ":", "opts", "[", "'order'", "]", "=", "np", ".", "min", "(", "[", "c", ".", "order", "for", "c", "in", "spls", "]", ")", "if", "'smoothing'", "not", "in", "opts", "and", "len", "(", "spls", ")", ">", "0", ":", "sm", "=", "set", "(", "[", "c", ".", "smoothing", "for", "c", "in", "spls", "]", ")", "if", "len", "(", "sm", ")", "==", "1", ":", "opts", "[", "'smoothing'", "]", "=", "list", "(", "sm", ")", "[", "0", "]", "else", ":", "opts", "[", "'smoothing'", "]", "=", "None", "crds", "=", "[", "x", ".", "crds", "if", "is_curve_spline", "(", "crds", ")", "else", "np", ".", "asarray", "(", "x", ")", "for", "x", "in", "crds", "]", "crds", "=", "[", "x", "if", "x", ".", "shape", "[", "0", "]", "==", "2", "else", "x", ".", "T", "for", "x", "in", "crds", "]", "crds", "=", "np", ".", "hstack", "(", "crds", ")", "return", "curve_spline", "(", "crds", ",", "*", "*", "opts", ")" ]
to_curve_spline(obj) obj if obj is a curve spline and otherwise attempts to coerce obj into a curve spline, raising an error if it cannot.
[ "to_curve_spline", "(", "obj", ")", "obj", "if", "obj", "is", "a", "curve", "spline", "and", "otherwise", "attempts", "to", "coerce", "obj", "into", "a", "curve", "spline", "raising", "an", "error", "if", "it", "cannot", "." ]
python
train
HttpRunner/HttpRunner
httprunner/utils.py
https://github.com/HttpRunner/HttpRunner/blob/f259551bf9c8ba905eae5c1afcf2efea20ae0871/httprunner/utils.py#L584-L595
def dump_logs(json_data, project_mapping, tag_name): """ dump tests data to json file. the dumped file is located in PWD/logs folder. Args: json_data (list/dict): json data to dump project_mapping (dict): project info tag_name (str): tag name, loaded/parsed/summary """ pwd_dir_path, dump_file_name = _prepare_dump_info(project_mapping, tag_name) dump_json_file(json_data, pwd_dir_path, dump_file_name)
[ "def", "dump_logs", "(", "json_data", ",", "project_mapping", ",", "tag_name", ")", ":", "pwd_dir_path", ",", "dump_file_name", "=", "_prepare_dump_info", "(", "project_mapping", ",", "tag_name", ")", "dump_json_file", "(", "json_data", ",", "pwd_dir_path", ",", "dump_file_name", ")" ]
dump tests data to json file. the dumped file is located in PWD/logs folder. Args: json_data (list/dict): json data to dump project_mapping (dict): project info tag_name (str): tag name, loaded/parsed/summary
[ "dump", "tests", "data", "to", "json", "file", ".", "the", "dumped", "file", "is", "located", "in", "PWD", "/", "logs", "folder", "." ]
python
train
CI-WATER/gsshapy
gsshapy/orm/spn.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L181-L199
def _createSjunc(self, sjuncs): """ Create GSSHAPY SuperJunction Objects Method """ for sjunc in sjuncs: # Create GSSHAPY SuperJunction object superJunction = SuperJunction(sjuncNumber=sjunc['sjuncNumber'], groundSurfaceElev=sjunc['groundSurfaceElev'], invertElev=sjunc['invertElev'], manholeSA=sjunc['manholeSA'], inletCode=sjunc['inletCode'], linkOrCellI=sjunc['linkOrCellI'], nodeOrCellJ=sjunc['nodeOrCellJ'], weirSideLength=sjunc['weirSideLength'], orificeDiameter=sjunc['orificeDiameter']) # Associate SuperJunction with StormPipeNetworkFile superJunction.stormPipeNetworkFile = self
[ "def", "_createSjunc", "(", "self", ",", "sjuncs", ")", ":", "for", "sjunc", "in", "sjuncs", ":", "# Create GSSHAPY SuperJunction object", "superJunction", "=", "SuperJunction", "(", "sjuncNumber", "=", "sjunc", "[", "'sjuncNumber'", "]", ",", "groundSurfaceElev", "=", "sjunc", "[", "'groundSurfaceElev'", "]", ",", "invertElev", "=", "sjunc", "[", "'invertElev'", "]", ",", "manholeSA", "=", "sjunc", "[", "'manholeSA'", "]", ",", "inletCode", "=", "sjunc", "[", "'inletCode'", "]", ",", "linkOrCellI", "=", "sjunc", "[", "'linkOrCellI'", "]", ",", "nodeOrCellJ", "=", "sjunc", "[", "'nodeOrCellJ'", "]", ",", "weirSideLength", "=", "sjunc", "[", "'weirSideLength'", "]", ",", "orificeDiameter", "=", "sjunc", "[", "'orificeDiameter'", "]", ")", "# Associate SuperJunction with StormPipeNetworkFile", "superJunction", ".", "stormPipeNetworkFile", "=", "self" ]
Create GSSHAPY SuperJunction Objects Method
[ "Create", "GSSHAPY", "SuperJunction", "Objects", "Method" ]
python
train
gccxml/pygccxml
pygccxml/parser/project_reader.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/project_reader.py#L373-L417
def read_xml(self, file_configuration): """parses C++ code, defined on the file_configurations and returns GCCXML generated file content""" xml_file_path = None delete_xml_file = True fc = file_configuration reader = source_reader.source_reader_t( self.__config, None, self.__decl_factory) try: if fc.content_type == fc.CONTENT_TYPE.STANDARD_SOURCE_FILE: self.logger.info('Parsing source file "%s" ... ', fc.data) xml_file_path = reader.create_xml_file(fc.data) elif fc.content_type == \ file_configuration_t.CONTENT_TYPE.GCCXML_GENERATED_FILE: self.logger.info('Parsing xml file "%s" ... ', fc.data) xml_file_path = fc.data delete_xml_file = False elif fc.content_type == fc.CONTENT_TYPE.CACHED_SOURCE_FILE: # TODO: raise error when header file does not exist if not os.path.exists(fc.cached_source_file): dir_ = os.path.split(fc.cached_source_file)[0] if dir_ and not os.path.exists(dir_): os.makedirs(dir_) self.logger.info( 'Creating xml file "%s" from source file "%s" ... ', fc.cached_source_file, fc.data) xml_file_path = reader.create_xml_file( fc.data, fc.cached_source_file) else: xml_file_path = fc.cached_source_file else: xml_file_path = reader.create_xml_file_from_string(fc.data) with open(xml_file_path, "r") as xml_file: xml = xml_file.read() utils.remove_file_no_raise(xml_file_path, self.__config) self.__xml_generator_from_xml_file = \ reader.xml_generator_from_xml_file return xml finally: if xml_file_path and delete_xml_file: utils.remove_file_no_raise(xml_file_path, self.__config)
[ "def", "read_xml", "(", "self", ",", "file_configuration", ")", ":", "xml_file_path", "=", "None", "delete_xml_file", "=", "True", "fc", "=", "file_configuration", "reader", "=", "source_reader", ".", "source_reader_t", "(", "self", ".", "__config", ",", "None", ",", "self", ".", "__decl_factory", ")", "try", ":", "if", "fc", ".", "content_type", "==", "fc", ".", "CONTENT_TYPE", ".", "STANDARD_SOURCE_FILE", ":", "self", ".", "logger", ".", "info", "(", "'Parsing source file \"%s\" ... '", ",", "fc", ".", "data", ")", "xml_file_path", "=", "reader", ".", "create_xml_file", "(", "fc", ".", "data", ")", "elif", "fc", ".", "content_type", "==", "file_configuration_t", ".", "CONTENT_TYPE", ".", "GCCXML_GENERATED_FILE", ":", "self", ".", "logger", ".", "info", "(", "'Parsing xml file \"%s\" ... '", ",", "fc", ".", "data", ")", "xml_file_path", "=", "fc", ".", "data", "delete_xml_file", "=", "False", "elif", "fc", ".", "content_type", "==", "fc", ".", "CONTENT_TYPE", ".", "CACHED_SOURCE_FILE", ":", "# TODO: raise error when header file does not exist", "if", "not", "os", ".", "path", ".", "exists", "(", "fc", ".", "cached_source_file", ")", ":", "dir_", "=", "os", ".", "path", ".", "split", "(", "fc", ".", "cached_source_file", ")", "[", "0", "]", "if", "dir_", "and", "not", "os", ".", "path", ".", "exists", "(", "dir_", ")", ":", "os", ".", "makedirs", "(", "dir_", ")", "self", ".", "logger", ".", "info", "(", "'Creating xml file \"%s\" from source file \"%s\" ... '", ",", "fc", ".", "cached_source_file", ",", "fc", ".", "data", ")", "xml_file_path", "=", "reader", ".", "create_xml_file", "(", "fc", ".", "data", ",", "fc", ".", "cached_source_file", ")", "else", ":", "xml_file_path", "=", "fc", ".", "cached_source_file", "else", ":", "xml_file_path", "=", "reader", ".", "create_xml_file_from_string", "(", "fc", ".", "data", ")", "with", "open", "(", "xml_file_path", ",", "\"r\"", ")", "as", "xml_file", ":", "xml", "=", "xml_file", ".", "read", "(", ")", "utils", ".", "remove_file_no_raise", "(", "xml_file_path", ",", "self", ".", "__config", ")", "self", ".", "__xml_generator_from_xml_file", "=", "reader", ".", "xml_generator_from_xml_file", "return", "xml", "finally", ":", "if", "xml_file_path", "and", "delete_xml_file", ":", "utils", ".", "remove_file_no_raise", "(", "xml_file_path", ",", "self", ".", "__config", ")" ]
parses C++ code, defined on the file_configurations and returns GCCXML generated file content
[ "parses", "C", "++", "code", "defined", "on", "the", "file_configurations", "and", "returns", "GCCXML", "generated", "file", "content" ]
python
train
frictionlessdata/tableschema-pandas-py
tableschema_pandas/mapper.py
https://github.com/frictionlessdata/tableschema-pandas-py/blob/ef941dbc12f5d346e9612f8fec1b4b356b8493ca/tableschema_pandas/mapper.py#L89-L117
def convert_type(self, type): """Convert type to Pandas """ # Mapping mapping = { 'any': np.dtype('O'), 'array': np.dtype(list), 'boolean': np.dtype(bool), 'date': np.dtype('O'), 'datetime': np.dtype('datetime64[ns]'), 'duration': np.dtype('O'), 'geojson': np.dtype('O'), 'geopoint': np.dtype('O'), 'integer': np.dtype(int), 'number': np.dtype(float), 'object': np.dtype(dict), 'string': np.dtype('O'), 'time': np.dtype('O'), 'year': np.dtype(int), 'yearmonth': np.dtype('O'), } # Get type if type not in mapping: message = 'Type "%s" is not supported' % type raise tableschema.exceptions.StorageError(message) return mapping[type]
[ "def", "convert_type", "(", "self", ",", "type", ")", ":", "# Mapping", "mapping", "=", "{", "'any'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'array'", ":", "np", ".", "dtype", "(", "list", ")", ",", "'boolean'", ":", "np", ".", "dtype", "(", "bool", ")", ",", "'date'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'datetime'", ":", "np", ".", "dtype", "(", "'datetime64[ns]'", ")", ",", "'duration'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'geojson'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'geopoint'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'integer'", ":", "np", ".", "dtype", "(", "int", ")", ",", "'number'", ":", "np", ".", "dtype", "(", "float", ")", ",", "'object'", ":", "np", ".", "dtype", "(", "dict", ")", ",", "'string'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'time'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "'year'", ":", "np", ".", "dtype", "(", "int", ")", ",", "'yearmonth'", ":", "np", ".", "dtype", "(", "'O'", ")", ",", "}", "# Get type", "if", "type", "not", "in", "mapping", ":", "message", "=", "'Type \"%s\" is not supported'", "%", "type", "raise", "tableschema", ".", "exceptions", ".", "StorageError", "(", "message", ")", "return", "mapping", "[", "type", "]" ]
Convert type to Pandas
[ "Convert", "type", "to", "Pandas" ]
python
train
glitchassassin/lackey
lackey/RegionMatching.py
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1293-L1297
def saveLastScreenImage(self): """ Saves the last image taken on this region's screen to a temporary file """ bitmap = self.getLastScreenImage() _, target_file = tempfile.mkstemp(".png") cv2.imwrite(target_file, bitmap)
[ "def", "saveLastScreenImage", "(", "self", ")", ":", "bitmap", "=", "self", ".", "getLastScreenImage", "(", ")", "_", ",", "target_file", "=", "tempfile", ".", "mkstemp", "(", "\".png\"", ")", "cv2", ".", "imwrite", "(", "target_file", ",", "bitmap", ")" ]
Saves the last image taken on this region's screen to a temporary file
[ "Saves", "the", "last", "image", "taken", "on", "this", "region", "s", "screen", "to", "a", "temporary", "file" ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/order/place_quote.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/order/place_quote.py#L30-L96
def cli(env, package_keyname, location, preset, name, send_email, complex_type, extras, order_items): """Place a quote. This CLI command is used for creating a quote of the specified package in the given location (denoted by a datacenter's long name). Orders made via the CLI can then be converted to be made programmatically by calling SoftLayer.OrderingManager.place_quote() with the same keynames. Packages for ordering can be retrieved from `slcli order package-list` Presets for ordering can be retrieved from `slcli order preset-list` (not all packages have presets) Items can be retrieved from `slcli order item-list`. In order to find required items for the order, use `slcli order category-list`, and then provide the --category option for each category code in `slcli order item-list`. Example:: # Place quote a VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk, # Ubuntu 16.04, and 1 Gbps public & private uplink in dal13 slcli order place-quote --name "foobar" --send-email CLOUD_SERVER DALLAS13 \\ GUEST_CORES_4 \\ RAM_16_GB \\ REBOOT_REMOTE_CONSOLE \\ 1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\ BANDWIDTH_0_GB_2 \\ 1_IP_ADDRESS \\ GUEST_DISK_100_GB_SAN \\ OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\ MONITORING_HOST_PING \\ NOTIFICATION_EMAIL_AND_TICKET \\ AUTOMATED_NOTIFICATION \\ UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\ NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\ --extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\ --complex-type SoftLayer_Container_Product_Order_Virtual_Guest """ manager = ordering.OrderingManager(env.client) if extras: try: extras = json.loads(extras) except ValueError as err: raise exceptions.CLIAbort("There was an error when parsing the --extras value: {}".format(err)) args = (package_keyname, location, order_items) kwargs = {'preset_keyname': preset, 'extras': extras, 'quantity': 1, 'quote_name': name, 'send_email': send_email, 'complex_type': complex_type} order = manager.place_quote(*args, **kwargs) table = formatting.KeyValueTable(['name', 'value']) table.align['name'] = 'r' table.align['value'] = 'l' table.add_row(['id', order['quote']['id']]) table.add_row(['name', order['quote']['name']]) table.add_row(['created', order['orderDate']]) table.add_row(['expires', order['quote']['expirationDate']]) table.add_row(['status', order['quote']['status']]) env.fout(table)
[ "def", "cli", "(", "env", ",", "package_keyname", ",", "location", ",", "preset", ",", "name", ",", "send_email", ",", "complex_type", ",", "extras", ",", "order_items", ")", ":", "manager", "=", "ordering", ".", "OrderingManager", "(", "env", ".", "client", ")", "if", "extras", ":", "try", ":", "extras", "=", "json", ".", "loads", "(", "extras", ")", "except", "ValueError", "as", "err", ":", "raise", "exceptions", ".", "CLIAbort", "(", "\"There was an error when parsing the --extras value: {}\"", ".", "format", "(", "err", ")", ")", "args", "=", "(", "package_keyname", ",", "location", ",", "order_items", ")", "kwargs", "=", "{", "'preset_keyname'", ":", "preset", ",", "'extras'", ":", "extras", ",", "'quantity'", ":", "1", ",", "'quote_name'", ":", "name", ",", "'send_email'", ":", "send_email", ",", "'complex_type'", ":", "complex_type", "}", "order", "=", "manager", ".", "place_quote", "(", "*", "args", ",", "*", "*", "kwargs", ")", "table", "=", "formatting", ".", "KeyValueTable", "(", "[", "'name'", ",", "'value'", "]", ")", "table", ".", "align", "[", "'name'", "]", "=", "'r'", "table", ".", "align", "[", "'value'", "]", "=", "'l'", "table", ".", "add_row", "(", "[", "'id'", ",", "order", "[", "'quote'", "]", "[", "'id'", "]", "]", ")", "table", ".", "add_row", "(", "[", "'name'", ",", "order", "[", "'quote'", "]", "[", "'name'", "]", "]", ")", "table", ".", "add_row", "(", "[", "'created'", ",", "order", "[", "'orderDate'", "]", "]", ")", "table", ".", "add_row", "(", "[", "'expires'", ",", "order", "[", "'quote'", "]", "[", "'expirationDate'", "]", "]", ")", "table", ".", "add_row", "(", "[", "'status'", ",", "order", "[", "'quote'", "]", "[", "'status'", "]", "]", ")", "env", ".", "fout", "(", "table", ")" ]
Place a quote. This CLI command is used for creating a quote of the specified package in the given location (denoted by a datacenter's long name). Orders made via the CLI can then be converted to be made programmatically by calling SoftLayer.OrderingManager.place_quote() with the same keynames. Packages for ordering can be retrieved from `slcli order package-list` Presets for ordering can be retrieved from `slcli order preset-list` (not all packages have presets) Items can be retrieved from `slcli order item-list`. In order to find required items for the order, use `slcli order category-list`, and then provide the --category option for each category code in `slcli order item-list`. Example:: # Place quote a VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk, # Ubuntu 16.04, and 1 Gbps public & private uplink in dal13 slcli order place-quote --name "foobar" --send-email CLOUD_SERVER DALLAS13 \\ GUEST_CORES_4 \\ RAM_16_GB \\ REBOOT_REMOTE_CONSOLE \\ 1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\ BANDWIDTH_0_GB_2 \\ 1_IP_ADDRESS \\ GUEST_DISK_100_GB_SAN \\ OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\ MONITORING_HOST_PING \\ NOTIFICATION_EMAIL_AND_TICKET \\ AUTOMATED_NOTIFICATION \\ UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\ NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\ --extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\ --complex-type SoftLayer_Container_Product_Order_Virtual_Guest
[ "Place", "a", "quote", "." ]
python
train
twisted/txacme
src/txacme/client.py
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L700-L748
def _check_response(cls, response, content_type=JSON_CONTENT_TYPE): """ Check response content and its type. .. note:: Unlike :mod:`acme.client`, checking is strict. :param bytes content_type: Expected Content-Type response header. If the response Content-Type does not match, :exc:`ClientError` is raised. :raises .ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises ~acme.errors.ClientError: In case of other networking errors. """ def _got_failure(f): f.trap(ValueError) return None def _got_json(jobj): if 400 <= response.code < 600: if response_ct == JSON_ERROR_CONTENT_TYPE and jobj is not None: raise ServerError( messages.Error.from_json(jobj), response) else: # response is not JSON object raise errors.ClientError(response) elif response_ct != content_type: raise errors.ClientError( 'Unexpected response Content-Type: {0!r}'.format( response_ct)) elif content_type == JSON_CONTENT_TYPE and jobj is None: raise errors.ClientError(response) return response response_ct = response.headers.getRawHeaders( b'Content-Type', [None])[0] action = LOG_JWS_CHECK_RESPONSE( expected_content_type=content_type, response_content_type=response_ct) with action.context(): # TODO: response.json() is called twice, once here, and # once in _get and _post clients return ( DeferredContext(response.json()) .addErrback(_got_failure) .addCallback(_got_json) .addActionFinish())
[ "def", "_check_response", "(", "cls", ",", "response", ",", "content_type", "=", "JSON_CONTENT_TYPE", ")", ":", "def", "_got_failure", "(", "f", ")", ":", "f", ".", "trap", "(", "ValueError", ")", "return", "None", "def", "_got_json", "(", "jobj", ")", ":", "if", "400", "<=", "response", ".", "code", "<", "600", ":", "if", "response_ct", "==", "JSON_ERROR_CONTENT_TYPE", "and", "jobj", "is", "not", "None", ":", "raise", "ServerError", "(", "messages", ".", "Error", ".", "from_json", "(", "jobj", ")", ",", "response", ")", "else", ":", "# response is not JSON object", "raise", "errors", ".", "ClientError", "(", "response", ")", "elif", "response_ct", "!=", "content_type", ":", "raise", "errors", ".", "ClientError", "(", "'Unexpected response Content-Type: {0!r}'", ".", "format", "(", "response_ct", ")", ")", "elif", "content_type", "==", "JSON_CONTENT_TYPE", "and", "jobj", "is", "None", ":", "raise", "errors", ".", "ClientError", "(", "response", ")", "return", "response", "response_ct", "=", "response", ".", "headers", ".", "getRawHeaders", "(", "b'Content-Type'", ",", "[", "None", "]", ")", "[", "0", "]", "action", "=", "LOG_JWS_CHECK_RESPONSE", "(", "expected_content_type", "=", "content_type", ",", "response_content_type", "=", "response_ct", ")", "with", "action", ".", "context", "(", ")", ":", "# TODO: response.json() is called twice, once here, and", "# once in _get and _post clients", "return", "(", "DeferredContext", "(", "response", ".", "json", "(", ")", ")", ".", "addErrback", "(", "_got_failure", ")", ".", "addCallback", "(", "_got_json", ")", ".", "addActionFinish", "(", ")", ")" ]
Check response content and its type. .. note:: Unlike :mod:`acme.client`, checking is strict. :param bytes content_type: Expected Content-Type response header. If the response Content-Type does not match, :exc:`ClientError` is raised. :raises .ServerError: If server response body carries HTTP Problem (draft-ietf-appsawg-http-problem-00). :raises ~acme.errors.ClientError: In case of other networking errors.
[ "Check", "response", "content", "and", "its", "type", "." ]
python
train
chaoss/grimoirelab-sortinghat
sortinghat/parsing/sh.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/parsing/sh.py#L88-L124
def __parse_blacklist(self, json): """Parse blacklist entries using Sorting Hat format. The Sorting Hat blacklist format is a JSON stream that stores a list of blacklisted entries. Next, there is an example of a valid stream: { "blacklist": [ "John Doe", "John Smith", "[email protected]" ] } :param stream: stream to parse :raises InvalidFormatError: raised when the format of the stream is not valid. """ try: for entry in json['blacklist']: if not entry: msg = "invalid json format. Blacklist entries cannot be null or empty" raise InvalidFormatError(cause=msg) excluded = self.__encode(entry) bl = self._blacklist.get(excluded, None) if not bl: bl = MatchingBlacklist(excluded=excluded) self._blacklist[excluded] = bl except KeyError as e: msg = "invalid json format. Attribute %s not found" % e.args raise InvalidFormatError(cause=msg)
[ "def", "__parse_blacklist", "(", "self", ",", "json", ")", ":", "try", ":", "for", "entry", "in", "json", "[", "'blacklist'", "]", ":", "if", "not", "entry", ":", "msg", "=", "\"invalid json format. Blacklist entries cannot be null or empty\"", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")", "excluded", "=", "self", ".", "__encode", "(", "entry", ")", "bl", "=", "self", ".", "_blacklist", ".", "get", "(", "excluded", ",", "None", ")", "if", "not", "bl", ":", "bl", "=", "MatchingBlacklist", "(", "excluded", "=", "excluded", ")", "self", ".", "_blacklist", "[", "excluded", "]", "=", "bl", "except", "KeyError", "as", "e", ":", "msg", "=", "\"invalid json format. Attribute %s not found\"", "%", "e", ".", "args", "raise", "InvalidFormatError", "(", "cause", "=", "msg", ")" ]
Parse blacklist entries using Sorting Hat format. The Sorting Hat blacklist format is a JSON stream that stores a list of blacklisted entries. Next, there is an example of a valid stream: { "blacklist": [ "John Doe", "John Smith", "[email protected]" ] } :param stream: stream to parse :raises InvalidFormatError: raised when the format of the stream is not valid.
[ "Parse", "blacklist", "entries", "using", "Sorting", "Hat", "format", "." ]
python
train
saltstack/salt
salt/modules/bigip.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bigip.py#L622-L773
def create_pool(hostname, username, password, name, members=None, allow_nat=None, allow_snat=None, description=None, gateway_failsafe_device=None, ignore_persisted_weight=None, ip_tos_to_client=None, ip_tos_to_server=None, link_qos_to_client=None, link_qos_to_server=None, load_balancing_mode=None, min_active_members=None, min_up_members=None, min_up_members_action=None, min_up_members_checking=None, monitor=None, profiles=None, queue_depth_limit=None, queue_on_connection_limit=None, queue_time_limit=None, reselect_tries=None, service_down_action=None, slow_ramp_time=None): ''' A function to connect to a bigip device and create a pool. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool to create. members List of comma delimited pool members to add to the pool. i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 allow_nat [yes | no] allow_snat [yes | no] description [string] gateway_failsafe_device [string] ignore_persisted_weight [enabled | disabled] ip_tos_to_client [pass-through | [integer]] ip_tos_to_server [pass-through | [integer]] link_qos_to_client [pass-through | [integer]] link_qos_to_server [pass-through | [integer]] load_balancing_mode [dynamic-ratio-member | dynamic-ratio-node | fastest-app-response | fastest-node | least-connections-members | least-connections-node | least-sessions | observed-member | observed-node | predictive-member | predictive-node | ratio-least-connections-member | ratio-least-connections-node | ratio-member | ratio-node | ratio-session | round-robin | weighted-least-connections-member | weighted-least-connections-node] min_active_members [integer] min_up_members [integer] min_up_members_action [failover | reboot | restart-all] min_up_members_checking [enabled | disabled] monitor [name] profiles [none | profile_name] queue_depth_limit [integer] queue_on_connection_limit [enabled | disabled] queue_time_limit [integer] reselect_tries [integer] service_down_action [drop | none | reselect | reset] slow_ramp_time [integer] CLI Example:: salt '*' bigip.create_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 monitor=http ''' params = { 'description': description, 'gateway-failsafe-device': gateway_failsafe_device, 'ignore-persisted-weight': ignore_persisted_weight, 'ip-tos-to-client': ip_tos_to_client, 'ip-tos-to-server': ip_tos_to_server, 'link-qos-to-client': link_qos_to_client, 'link-qos-to-server': link_qos_to_server, 'load-balancing-mode': load_balancing_mode, 'min-active-members': min_active_members, 'min-up-members': min_up_members, 'min-up-members-action': min_up_members_action, 'min-up-members-checking': min_up_members_checking, 'monitor': monitor, 'profiles': profiles, 'queue-on-connection-limit': queue_on_connection_limit, 'queue-depth-limit': queue_depth_limit, 'queue-time-limit': queue_time_limit, 'reselect-tries': reselect_tries, 'service-down-action': service_down_action, 'slow-ramp-time': slow_ramp_time } # some options take yes no others take true false. Figure out when to use which without # confusing the end user toggles = { 'allow-nat': {'type': 'yes_no', 'value': allow_nat}, 'allow-snat': {'type': 'yes_no', 'value': allow_snat} } #build payload payload = _loop_payload(params) payload['name'] = name #determine toggles payload = _determine_toggles(payload, toggles) #specify members if provided if members is not None: payload['members'] = _build_list(members, 'ltm:pool:members') #build session bigip_session = _build_session(username, password) #post to REST try: response = bigip_session.post( BIG_IP_URL_BASE.format(host=hostname) + '/ltm/pool', data=salt.utils.json.dumps(payload) ) except requests.exceptions.ConnectionError as e: return _load_connection_error(hostname, e) return _load_response(response)
[ "def", "create_pool", "(", "hostname", ",", "username", ",", "password", ",", "name", ",", "members", "=", "None", ",", "allow_nat", "=", "None", ",", "allow_snat", "=", "None", ",", "description", "=", "None", ",", "gateway_failsafe_device", "=", "None", ",", "ignore_persisted_weight", "=", "None", ",", "ip_tos_to_client", "=", "None", ",", "ip_tos_to_server", "=", "None", ",", "link_qos_to_client", "=", "None", ",", "link_qos_to_server", "=", "None", ",", "load_balancing_mode", "=", "None", ",", "min_active_members", "=", "None", ",", "min_up_members", "=", "None", ",", "min_up_members_action", "=", "None", ",", "min_up_members_checking", "=", "None", ",", "monitor", "=", "None", ",", "profiles", "=", "None", ",", "queue_depth_limit", "=", "None", ",", "queue_on_connection_limit", "=", "None", ",", "queue_time_limit", "=", "None", ",", "reselect_tries", "=", "None", ",", "service_down_action", "=", "None", ",", "slow_ramp_time", "=", "None", ")", ":", "params", "=", "{", "'description'", ":", "description", ",", "'gateway-failsafe-device'", ":", "gateway_failsafe_device", ",", "'ignore-persisted-weight'", ":", "ignore_persisted_weight", ",", "'ip-tos-to-client'", ":", "ip_tos_to_client", ",", "'ip-tos-to-server'", ":", "ip_tos_to_server", ",", "'link-qos-to-client'", ":", "link_qos_to_client", ",", "'link-qos-to-server'", ":", "link_qos_to_server", ",", "'load-balancing-mode'", ":", "load_balancing_mode", ",", "'min-active-members'", ":", "min_active_members", ",", "'min-up-members'", ":", "min_up_members", ",", "'min-up-members-action'", ":", "min_up_members_action", ",", "'min-up-members-checking'", ":", "min_up_members_checking", ",", "'monitor'", ":", "monitor", ",", "'profiles'", ":", "profiles", ",", "'queue-on-connection-limit'", ":", "queue_on_connection_limit", ",", "'queue-depth-limit'", ":", "queue_depth_limit", ",", "'queue-time-limit'", ":", "queue_time_limit", ",", "'reselect-tries'", ":", "reselect_tries", ",", "'service-down-action'", ":", "service_down_action", ",", "'slow-ramp-time'", ":", "slow_ramp_time", "}", "# some options take yes no others take true false. Figure out when to use which without", "# confusing the end user", "toggles", "=", "{", "'allow-nat'", ":", "{", "'type'", ":", "'yes_no'", ",", "'value'", ":", "allow_nat", "}", ",", "'allow-snat'", ":", "{", "'type'", ":", "'yes_no'", ",", "'value'", ":", "allow_snat", "}", "}", "#build payload", "payload", "=", "_loop_payload", "(", "params", ")", "payload", "[", "'name'", "]", "=", "name", "#determine toggles", "payload", "=", "_determine_toggles", "(", "payload", ",", "toggles", ")", "#specify members if provided", "if", "members", "is", "not", "None", ":", "payload", "[", "'members'", "]", "=", "_build_list", "(", "members", ",", "'ltm:pool:members'", ")", "#build session", "bigip_session", "=", "_build_session", "(", "username", ",", "password", ")", "#post to REST", "try", ":", "response", "=", "bigip_session", ".", "post", "(", "BIG_IP_URL_BASE", ".", "format", "(", "host", "=", "hostname", ")", "+", "'/ltm/pool'", ",", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "payload", ")", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "e", ":", "return", "_load_connection_error", "(", "hostname", ",", "e", ")", "return", "_load_response", "(", "response", ")" ]
A function to connect to a bigip device and create a pool. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool to create. members List of comma delimited pool members to add to the pool. i.e. 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 allow_nat [yes | no] allow_snat [yes | no] description [string] gateway_failsafe_device [string] ignore_persisted_weight [enabled | disabled] ip_tos_to_client [pass-through | [integer]] ip_tos_to_server [pass-through | [integer]] link_qos_to_client [pass-through | [integer]] link_qos_to_server [pass-through | [integer]] load_balancing_mode [dynamic-ratio-member | dynamic-ratio-node | fastest-app-response | fastest-node | least-connections-members | least-connections-node | least-sessions | observed-member | observed-node | predictive-member | predictive-node | ratio-least-connections-member | ratio-least-connections-node | ratio-member | ratio-node | ratio-session | round-robin | weighted-least-connections-member | weighted-least-connections-node] min_active_members [integer] min_up_members [integer] min_up_members_action [failover | reboot | restart-all] min_up_members_checking [enabled | disabled] monitor [name] profiles [none | profile_name] queue_depth_limit [integer] queue_on_connection_limit [enabled | disabled] queue_time_limit [integer] reselect_tries [integer] service_down_action [drop | none | reselect | reset] slow_ramp_time [integer] CLI Example:: salt '*' bigip.create_pool bigip admin admin my-pool 10.1.1.1:80,10.1.1.2:80,10.1.1.3:80 monitor=http
[ "A", "function", "to", "connect", "to", "a", "bigip", "device", "and", "create", "a", "pool", "." ]
python
train
pandas-dev/pandas
pandas/core/panel.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1308-L1330
def shift(self, periods=1, freq=None, axis='major'): """ Shift index by desired number of periods with an optional time freq. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. This is different from the behavior of DataFrame.shift() Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, optional axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- shifted : Panel """ if freq: return self.tshift(periods, freq, axis=axis) return super().slice_shift(periods, axis=axis)
[ "def", "shift", "(", "self", ",", "periods", "=", "1", ",", "freq", "=", "None", ",", "axis", "=", "'major'", ")", ":", "if", "freq", ":", "return", "self", ".", "tshift", "(", "periods", ",", "freq", ",", "axis", "=", "axis", ")", "return", "super", "(", ")", ".", "slice_shift", "(", "periods", ",", "axis", "=", "axis", ")" ]
Shift index by desired number of periods with an optional time freq. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. This is different from the behavior of DataFrame.shift() Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, optional axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- shifted : Panel
[ "Shift", "index", "by", "desired", "number", "of", "periods", "with", "an", "optional", "time", "freq", "." ]
python
train
rene-aguirre/pywinusb
pywinusb/hid/core.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L266-L279
def get_parent_device(self): """Retreive parent device string id""" if not self.parent_instance_id: return "" dev_buffer_type = winapi.c_tchar * MAX_DEVICE_ID_LEN dev_buffer = dev_buffer_type() try: if winapi.CM_Get_Device_ID(self.parent_instance_id, byref(dev_buffer), MAX_DEVICE_ID_LEN, 0) == 0: #success return dev_buffer.value return "" finally: del dev_buffer del dev_buffer_type
[ "def", "get_parent_device", "(", "self", ")", ":", "if", "not", "self", ".", "parent_instance_id", ":", "return", "\"\"", "dev_buffer_type", "=", "winapi", ".", "c_tchar", "*", "MAX_DEVICE_ID_LEN", "dev_buffer", "=", "dev_buffer_type", "(", ")", "try", ":", "if", "winapi", ".", "CM_Get_Device_ID", "(", "self", ".", "parent_instance_id", ",", "byref", "(", "dev_buffer", ")", ",", "MAX_DEVICE_ID_LEN", ",", "0", ")", "==", "0", ":", "#success\r", "return", "dev_buffer", ".", "value", "return", "\"\"", "finally", ":", "del", "dev_buffer", "del", "dev_buffer_type" ]
Retreive parent device string id
[ "Retreive", "parent", "device", "string", "id" ]
python
train
jlmadurga/permabots
permabots/views/api/bot.py
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/bot.py#L172-L180
def delete(self, request, bot_id, id, format=None): """ Delete existing Telegram Bot --- responseMessages: - code: 401 message: Not authenticated """ return super(TelegramBotDetail, self).delete(request, bot_id, id, format)
[ "def", "delete", "(", "self", ",", "request", ",", "bot_id", ",", "id", ",", "format", "=", "None", ")", ":", "return", "super", "(", "TelegramBotDetail", ",", "self", ")", ".", "delete", "(", "request", ",", "bot_id", ",", "id", ",", "format", ")" ]
Delete existing Telegram Bot --- responseMessages: - code: 401 message: Not authenticated
[ "Delete", "existing", "Telegram", "Bot", "---", "responseMessages", ":", "-", "code", ":", "401", "message", ":", "Not", "authenticated" ]
python
train
thusoy/pwm
pwm/core.py
https://github.com/thusoy/pwm/blob/fff7d755c34f3a7235a8bf217ffa2ff5aed4926f/pwm/core.py#L166-L181
def get_domain(self, domain_name): """ Get the :class:`Domain <pwm.Domain>` object from a name. :param domain_name: The domain name to fetch the object for. :returns: The :class:`Domain <pwm.core.Domain>` class with this domain_name if found, else None. """ protocol = self.database_uri.split(':', 1)[0] if protocol in ('https', 'http'): return self._get_domain_from_rest_api(domain_name) else: domain = self._get_domain_from_db(domain_name) if domain: return domain else: raise NoSuchDomainException
[ "def", "get_domain", "(", "self", ",", "domain_name", ")", ":", "protocol", "=", "self", ".", "database_uri", ".", "split", "(", "':'", ",", "1", ")", "[", "0", "]", "if", "protocol", "in", "(", "'https'", ",", "'http'", ")", ":", "return", "self", ".", "_get_domain_from_rest_api", "(", "domain_name", ")", "else", ":", "domain", "=", "self", ".", "_get_domain_from_db", "(", "domain_name", ")", "if", "domain", ":", "return", "domain", "else", ":", "raise", "NoSuchDomainException" ]
Get the :class:`Domain <pwm.Domain>` object from a name. :param domain_name: The domain name to fetch the object for. :returns: The :class:`Domain <pwm.core.Domain>` class with this domain_name if found, else None.
[ "Get", "the", ":", "class", ":", "Domain", "<pwm", ".", "Domain", ">", "object", "from", "a", "name", "." ]
python
test
google/tangent
tangent/grads.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/grads.py#L335-L341
def adet(z, x): """d|A|/dA = adj(A).T See Jacobi's formula: https://en.wikipedia.org/wiki/Jacobi%27s_formula """ adjugate = numpy.linalg.det(x) * numpy.linalg.pinv(x) d[x] = d[z] * numpy.transpose(adjugate)
[ "def", "adet", "(", "z", ",", "x", ")", ":", "adjugate", "=", "numpy", ".", "linalg", ".", "det", "(", "x", ")", "*", "numpy", ".", "linalg", ".", "pinv", "(", "x", ")", "d", "[", "x", "]", "=", "d", "[", "z", "]", "*", "numpy", ".", "transpose", "(", "adjugate", ")" ]
d|A|/dA = adj(A).T See Jacobi's formula: https://en.wikipedia.org/wiki/Jacobi%27s_formula
[ "d|A|", "/", "dA", "=", "adj", "(", "A", ")", ".", "T" ]
python
train
google/grr
grr/server/grr_response_server/check_lib/checks.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/check_lib/checks.py#L138-L155
def Parse(self, rdf_data): """Process rdf data through the filter. Filters sift data according to filter rules. Data that passes the filter rule is kept, other data is dropped. If no filter method is provided, the data is returned as a list. Otherwise, a items that meet filter conditions are returned in a list. Args: rdf_data: Host data that has already been processed by a Parser into RDF. Returns: A list containing data items that matched the filter rules. """ if self._filter: return list(self._filter.Parse(rdf_data, self.expression)) return rdf_data
[ "def", "Parse", "(", "self", ",", "rdf_data", ")", ":", "if", "self", ".", "_filter", ":", "return", "list", "(", "self", ".", "_filter", ".", "Parse", "(", "rdf_data", ",", "self", ".", "expression", ")", ")", "return", "rdf_data" ]
Process rdf data through the filter. Filters sift data according to filter rules. Data that passes the filter rule is kept, other data is dropped. If no filter method is provided, the data is returned as a list. Otherwise, a items that meet filter conditions are returned in a list. Args: rdf_data: Host data that has already been processed by a Parser into RDF. Returns: A list containing data items that matched the filter rules.
[ "Process", "rdf", "data", "through", "the", "filter", "." ]
python
train
mwhooker/jones
jones/jones.py
https://github.com/mwhooker/jones/blob/121e89572ca063f456b8e94cbb8cbee26c307a8f/jones/jones.py#L223-L237
def get_associations(self, env): """ Get all the associations for this env. Root cannot have associations, so return None for root. returns a map of hostnames to environments. """ if env.is_root: return None associations = self.associations.get_all() return [assoc for assoc in associations if associations[assoc] == self._get_view_path(env)]
[ "def", "get_associations", "(", "self", ",", "env", ")", ":", "if", "env", ".", "is_root", ":", "return", "None", "associations", "=", "self", ".", "associations", ".", "get_all", "(", ")", "return", "[", "assoc", "for", "assoc", "in", "associations", "if", "associations", "[", "assoc", "]", "==", "self", ".", "_get_view_path", "(", "env", ")", "]" ]
Get all the associations for this env. Root cannot have associations, so return None for root. returns a map of hostnames to environments.
[ "Get", "all", "the", "associations", "for", "this", "env", "." ]
python
train
dossier/dossier.web
dossier/web/interface.py
https://github.com/dossier/dossier.web/blob/1cad1cce3c37d3a4e956abc710a2bc1afe16a092/dossier/web/interface.py#L129-L141
def set_query_params(self, query_params): '''Set the query parameters. The query parameters should be a dictionary mapping keys to strings or lists of strings. :param query_params: query parameters :type query_params: ``name |--> (str | [str])`` :rtype: :class:`Queryable` ''' self.query_params = as_multi_dict(query_params) self.apply_param_schema() return self
[ "def", "set_query_params", "(", "self", ",", "query_params", ")", ":", "self", ".", "query_params", "=", "as_multi_dict", "(", "query_params", ")", "self", ".", "apply_param_schema", "(", ")", "return", "self" ]
Set the query parameters. The query parameters should be a dictionary mapping keys to strings or lists of strings. :param query_params: query parameters :type query_params: ``name |--> (str | [str])`` :rtype: :class:`Queryable`
[ "Set", "the", "query", "parameters", "." ]
python
train
spyder-ide/spyder
spyder/utils/workers.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/workers.py#L203-L210
def terminate(self): """Terminate running processes.""" if self._process.state() == QProcess.Running: try: self._process.terminate() except Exception: pass self._fired = True
[ "def", "terminate", "(", "self", ")", ":", "if", "self", ".", "_process", ".", "state", "(", ")", "==", "QProcess", ".", "Running", ":", "try", ":", "self", ".", "_process", ".", "terminate", "(", ")", "except", "Exception", ":", "pass", "self", ".", "_fired", "=", "True" ]
Terminate running processes.
[ "Terminate", "running", "processes", "." ]
python
train
slarse/pdfebc-core
pdfebc_core/config_utils.py
https://github.com/slarse/pdfebc-core/blob/fc40857bc42365b7434714333e37d7a3487603a0/pdfebc_core/config_utils.py#L150-L169
def get_attribute_from_config(config, section, attribute): """Try to parse an attribute of the config file. Args: config (defaultdict): A defaultdict. section (str): The section of the config file to get information from. attribute (str): The attribute of the section to fetch. Returns: str: The string corresponding to the section and attribute. Raises: ConfigurationError """ section = config.get(section) if section: option = section.get(attribute) if option: return option raise ConfigurationError("Config file badly formed!\n" "Failed to get attribute '{}' from section '{}'!" .format(attribute, section))
[ "def", "get_attribute_from_config", "(", "config", ",", "section", ",", "attribute", ")", ":", "section", "=", "config", ".", "get", "(", "section", ")", "if", "section", ":", "option", "=", "section", ".", "get", "(", "attribute", ")", "if", "option", ":", "return", "option", "raise", "ConfigurationError", "(", "\"Config file badly formed!\\n\"", "\"Failed to get attribute '{}' from section '{}'!\"", ".", "format", "(", "attribute", ",", "section", ")", ")" ]
Try to parse an attribute of the config file. Args: config (defaultdict): A defaultdict. section (str): The section of the config file to get information from. attribute (str): The attribute of the section to fetch. Returns: str: The string corresponding to the section and attribute. Raises: ConfigurationError
[ "Try", "to", "parse", "an", "attribute", "of", "the", "config", "file", "." ]
python
train
opendatateam/udata
udata/core/spatial/models.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/spatial/models.py#L60-L81
def resolve(self, geoid, id_only=False): ''' Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone. ''' level, code, validity = geoids.parse(geoid) qs = self(level=level, code=code) if id_only: qs = qs.only('id') if validity == 'latest': result = qs.latest() else: result = qs.valid_at(validity).first() return result.id if id_only and result else result
[ "def", "resolve", "(", "self", ",", "geoid", ",", "id_only", "=", "False", ")", ":", "level", ",", "code", ",", "validity", "=", "geoids", ".", "parse", "(", "geoid", ")", "qs", "=", "self", "(", "level", "=", "level", ",", "code", "=", "code", ")", "if", "id_only", ":", "qs", "=", "qs", ".", "only", "(", "'id'", ")", "if", "validity", "==", "'latest'", ":", "result", "=", "qs", ".", "latest", "(", ")", "else", ":", "result", "=", "qs", ".", "valid_at", "(", "validity", ")", ".", "first", "(", ")", "return", "result", ".", "id", "if", "id_only", "and", "result", "else", "result" ]
Resolve a GeoZone given a GeoID. The start date is resolved from the given GeoID, ie. it find there is a zone valid a the geoid validity, resolve the `latest` alias or use `latest` when no validity is given. If `id_only` is True, the result will be the resolved GeoID instead of the resolved zone.
[ "Resolve", "a", "GeoZone", "given", "a", "GeoID", "." ]
python
train
PyconUK/ConferenceScheduler
src/conference_scheduler/lp_problem/utils.py
https://github.com/PyconUK/ConferenceScheduler/blob/fb139f0ef2eab5ac8f4919aa4994d94d4e040030/src/conference_scheduler/lp_problem/utils.py#L33-L45
def tag_array(events): """ Return a numpy array mapping events to tags - Rows corresponds to events - Columns correspond to tags """ all_tags = sorted(set(tag for event in events for tag in event.tags)) array = np.zeros((len(events), len(all_tags))) for row, event in enumerate(events): for tag in event.tags: array[row, all_tags.index(tag)] = 1 return array
[ "def", "tag_array", "(", "events", ")", ":", "all_tags", "=", "sorted", "(", "set", "(", "tag", "for", "event", "in", "events", "for", "tag", "in", "event", ".", "tags", ")", ")", "array", "=", "np", ".", "zeros", "(", "(", "len", "(", "events", ")", ",", "len", "(", "all_tags", ")", ")", ")", "for", "row", ",", "event", "in", "enumerate", "(", "events", ")", ":", "for", "tag", "in", "event", ".", "tags", ":", "array", "[", "row", ",", "all_tags", ".", "index", "(", "tag", ")", "]", "=", "1", "return", "array" ]
Return a numpy array mapping events to tags - Rows corresponds to events - Columns correspond to tags
[ "Return", "a", "numpy", "array", "mapping", "events", "to", "tags" ]
python
train
twilio/twilio-python
twilio/rest/api/v2010/account/recording/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/recording/__init__.py#L165-L174
def get(self, sid): """ Constructs a RecordingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.recording.RecordingContext :rtype: twilio.rest.api.v2010.account.recording.RecordingContext """ return RecordingContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "RecordingContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a RecordingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.recording.RecordingContext :rtype: twilio.rest.api.v2010.account.recording.RecordingContext
[ "Constructs", "a", "RecordingContext" ]
python
train
pantsbuild/pants
src/python/pants/engine/scheduler.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/engine/scheduler.py#L342-L346
def new_session(self, zipkin_trace_v2, v2_ui=False): """Creates a new SchedulerSession for this Scheduler.""" return SchedulerSession(self, self._native.new_session( self._scheduler, zipkin_trace_v2, v2_ui, multiprocessing.cpu_count()) )
[ "def", "new_session", "(", "self", ",", "zipkin_trace_v2", ",", "v2_ui", "=", "False", ")", ":", "return", "SchedulerSession", "(", "self", ",", "self", ".", "_native", ".", "new_session", "(", "self", ".", "_scheduler", ",", "zipkin_trace_v2", ",", "v2_ui", ",", "multiprocessing", ".", "cpu_count", "(", ")", ")", ")" ]
Creates a new SchedulerSession for this Scheduler.
[ "Creates", "a", "new", "SchedulerSession", "for", "this", "Scheduler", "." ]
python
train
sebp/scikit-survival
sksurv/nonparametric.py
https://github.com/sebp/scikit-survival/blob/cfc99fd20454cdd6f4f20fe331b39f2191ccaabc/sksurv/nonparametric.py#L170-L228
def kaplan_meier_estimator(event, time_exit, time_enter=None, time_min=None): """Kaplan-Meier estimator of survival function. Parameters ---------- event : array-like, shape = (n_samples,) Contains binary event indicators. time_exit : array-like, shape = (n_samples,) Contains event/censoring times. time_enter : array-like, shape = (n_samples,), optional Contains time when each individual entered the study for left truncated survival data. time_min : float, optional Compute estimator conditional on survival at least up to the specified time. Returns ------- time : array, shape = (n_times,) Unique times. prob_survival : array, shape = (n_times,) Survival probability at each unique time point. If `time_enter` is provided, estimates are conditional probabilities. Examples -------- Creating a Kaplan-Meier curve: >>> x, y = kaplan_meier_estimator(event, time) >>> plt.step(x, y, where="post") >>> plt.ylim(0, 1) >>> plt.show() References ---------- .. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations", Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958. """ event, time_enter, time_exit = check_y_survival(event, time_enter, time_exit, allow_all_censored=True) check_consistent_length(event, time_enter, time_exit) if time_enter is None: uniq_times, n_events, n_at_risk = _compute_counts(event, time_exit) else: uniq_times, n_events, n_at_risk = _compute_counts_truncated(event, time_enter, time_exit) values = 1 - n_events / n_at_risk if time_min is not None: mask = uniq_times >= time_min uniq_times = numpy.compress(mask, uniq_times) values = numpy.compress(mask, values) y = numpy.cumprod(values) return uniq_times, y
[ "def", "kaplan_meier_estimator", "(", "event", ",", "time_exit", ",", "time_enter", "=", "None", ",", "time_min", "=", "None", ")", ":", "event", ",", "time_enter", ",", "time_exit", "=", "check_y_survival", "(", "event", ",", "time_enter", ",", "time_exit", ",", "allow_all_censored", "=", "True", ")", "check_consistent_length", "(", "event", ",", "time_enter", ",", "time_exit", ")", "if", "time_enter", "is", "None", ":", "uniq_times", ",", "n_events", ",", "n_at_risk", "=", "_compute_counts", "(", "event", ",", "time_exit", ")", "else", ":", "uniq_times", ",", "n_events", ",", "n_at_risk", "=", "_compute_counts_truncated", "(", "event", ",", "time_enter", ",", "time_exit", ")", "values", "=", "1", "-", "n_events", "/", "n_at_risk", "if", "time_min", "is", "not", "None", ":", "mask", "=", "uniq_times", ">=", "time_min", "uniq_times", "=", "numpy", ".", "compress", "(", "mask", ",", "uniq_times", ")", "values", "=", "numpy", ".", "compress", "(", "mask", ",", "values", ")", "y", "=", "numpy", ".", "cumprod", "(", "values", ")", "return", "uniq_times", ",", "y" ]
Kaplan-Meier estimator of survival function. Parameters ---------- event : array-like, shape = (n_samples,) Contains binary event indicators. time_exit : array-like, shape = (n_samples,) Contains event/censoring times. time_enter : array-like, shape = (n_samples,), optional Contains time when each individual entered the study for left truncated survival data. time_min : float, optional Compute estimator conditional on survival at least up to the specified time. Returns ------- time : array, shape = (n_times,) Unique times. prob_survival : array, shape = (n_times,) Survival probability at each unique time point. If `time_enter` is provided, estimates are conditional probabilities. Examples -------- Creating a Kaplan-Meier curve: >>> x, y = kaplan_meier_estimator(event, time) >>> plt.step(x, y, where="post") >>> plt.ylim(0, 1) >>> plt.show() References ---------- .. [1] Kaplan, E. L. and Meier, P., "Nonparametric estimation from incomplete observations", Journal of The American Statistical Association, vol. 53, pp. 457-481, 1958.
[ "Kaplan", "-", "Meier", "estimator", "of", "survival", "function", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1064-L1069
def bk_blue(cls): "Make the text background color blue." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.BACKGROUND_MASK wAttributes |= win32.BACKGROUND_BLUE cls._set_text_attributes(wAttributes)
[ "def", "bk_blue", "(", "cls", ")", ":", "wAttributes", "=", "cls", ".", "_get_text_attributes", "(", ")", "wAttributes", "&=", "~", "win32", ".", "BACKGROUND_MASK", "wAttributes", "|=", "win32", ".", "BACKGROUND_BLUE", "cls", ".", "_set_text_attributes", "(", "wAttributes", ")" ]
Make the text background color blue.
[ "Make", "the", "text", "background", "color", "blue", "." ]
python
train
Deathnerd/pyterp
pyterp/__init__.py
https://github.com/Deathnerd/pyterp/blob/baf2957263685f03873f368226f5752da4e51f08/pyterp/__init__.py#L188-L197
def _read_byte(self): """ Read a single byte from the user without waiting for the \n character """ from .getch import _Getch try: g = _Getch() self.tape[self.pointer] = ord(g()) except TypeError as e: print "Here's what _Getch() is giving me {}".format(g())
[ "def", "_read_byte", "(", "self", ")", ":", "from", ".", "getch", "import", "_Getch", "try", ":", "g", "=", "_Getch", "(", ")", "self", ".", "tape", "[", "self", ".", "pointer", "]", "=", "ord", "(", "g", "(", ")", ")", "except", "TypeError", "as", "e", ":", "print", "\"Here's what _Getch() is giving me {}\"", ".", "format", "(", "g", "(", ")", ")" ]
Read a single byte from the user without waiting for the \n character
[ "Read", "a", "single", "byte", "from", "the", "user", "without", "waiting", "for", "the", "\\", "n", "character" ]
python
train
malja/zroya
setup.py
https://github.com/malja/zroya/blob/41830133a54528e9cd9ef43d9637a576ac849c11/setup.py#L53-L67
def find_pyd_file(): """ Return path to .pyd after successful build command. :return: Path to .pyd file or None. """ if not os.path.isdir("./build"): raise NotADirectoryError for path, dirs, files in os.walk("./build"): for file_name in files: file_name_parts = os.path.splitext(file_name) if file_name_parts[1] == ".pyd": return path return None
[ "def", "find_pyd_file", "(", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "\"./build\"", ")", ":", "raise", "NotADirectoryError", "for", "path", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "\"./build\"", ")", ":", "for", "file_name", "in", "files", ":", "file_name_parts", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "if", "file_name_parts", "[", "1", "]", "==", "\".pyd\"", ":", "return", "path", "return", "None" ]
Return path to .pyd after successful build command. :return: Path to .pyd file or None.
[ "Return", "path", "to", ".", "pyd", "after", "successful", "build", "command", ".", ":", "return", ":", "Path", "to", ".", "pyd", "file", "or", "None", "." ]
python
train
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3489-L3505
def schedule_forced_svc_check(self, service, check_time): """Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None """ service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(service.get_update_status_brok())
[ "def", "schedule_forced_svc_check", "(", "self", ",", "service", ",", "check_time", ")", ":", "service", ".", "schedule", "(", "self", ".", "daemon", ".", "hosts", ",", "self", ".", "daemon", ".", "services", ",", "self", ".", "daemon", ".", "timeperiods", ",", "self", ".", "daemon", ".", "macromodulations", ",", "self", ".", "daemon", ".", "checkmodulations", ",", "self", ".", "daemon", ".", "checks", ",", "force", "=", "True", ",", "force_time", "=", "check_time", ")", "self", ".", "send_an_element", "(", "service", ".", "get_update_status_brok", "(", ")", ")" ]
Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None
[ "Schedule", "a", "forced", "check", "on", "a", "service", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
ArchiveTeam/wpull
wpull/application/hook.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/application/hook.py#L64-L69
def connect(self, name, callback): '''Add callback to hook.''' if not self._callbacks[name]: self._callbacks[name] = callback else: raise HookAlreadyConnectedError('Callback hook already connected.')
[ "def", "connect", "(", "self", ",", "name", ",", "callback", ")", ":", "if", "not", "self", ".", "_callbacks", "[", "name", "]", ":", "self", ".", "_callbacks", "[", "name", "]", "=", "callback", "else", ":", "raise", "HookAlreadyConnectedError", "(", "'Callback hook already connected.'", ")" ]
Add callback to hook.
[ "Add", "callback", "to", "hook", "." ]
python
train
pszafer/epson_projector
epson_projector/main.py
https://github.com/pszafer/epson_projector/blob/b8a10ace56e0a5cf858546041819c0e7ebca208f/epson_projector/main.py#L119-L141
async def send_request(self, params, timeout, type='json_query', command=False): """Send request to Epson.""" try: with async_timeout.timeout(timeout): url = '{url}{type}'.format( url=self._http_url, type=type) async with self.websession.get( url=url, params=params, headers=self._headers) as response: if response.status != HTTP_OK: _LOGGER.warning( "Error message %d from Epson.", response.status) return False if command == TURN_ON and self._powering_on: self._powering_on = False if type == 'json_query': return await response.json() return response except (aiohttp.ClientError, aiohttp.ClientConnectionError): _LOGGER.error("Error request") return False
[ "async", "def", "send_request", "(", "self", ",", "params", ",", "timeout", ",", "type", "=", "'json_query'", ",", "command", "=", "False", ")", ":", "try", ":", "with", "async_timeout", ".", "timeout", "(", "timeout", ")", ":", "url", "=", "'{url}{type}'", ".", "format", "(", "url", "=", "self", ".", "_http_url", ",", "type", "=", "type", ")", "async", "with", "self", ".", "websession", ".", "get", "(", "url", "=", "url", ",", "params", "=", "params", ",", "headers", "=", "self", ".", "_headers", ")", "as", "response", ":", "if", "response", ".", "status", "!=", "HTTP_OK", ":", "_LOGGER", ".", "warning", "(", "\"Error message %d from Epson.\"", ",", "response", ".", "status", ")", "return", "False", "if", "command", "==", "TURN_ON", "and", "self", ".", "_powering_on", ":", "self", ".", "_powering_on", "=", "False", "if", "type", "==", "'json_query'", ":", "return", "await", "response", ".", "json", "(", ")", "return", "response", "except", "(", "aiohttp", ".", "ClientError", ",", "aiohttp", ".", "ClientConnectionError", ")", ":", "_LOGGER", ".", "error", "(", "\"Error request\"", ")", "return", "False" ]
Send request to Epson.
[ "Send", "request", "to", "Epson", "." ]
python
train
waqasbhatti/astrobase
astrobase/lcfit/nonphysical.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcfit/nonphysical.py#L271-L484
def savgol_fit_magseries(times, mags, errs, period, windowlength=None, polydeg=2, sigclip=30.0, plotfit=False, magsarefluxes=False, verbose=True): '''Fit a Savitzky-Golay filter to the magnitude/flux time series. SG fits successive sub-sets (windows) of adjacent data points with a low-order polynomial via least squares. At each point (magnitude), it returns the value of the polynomial at that magnitude's time. This is made significantly cheaper than *actually* performing least squares for each window through linear algebra tricks that are possible when specifying the window size and polynomial order beforehand. Numerical Recipes Ch 14.8 gives an overview, Eq. 14.8.6 is what Scipy has implemented. The idea behind Savitzky-Golay is to preserve higher moments (>=2) of the input data series than would be done by a simple moving window average. Note that the filter assumes evenly spaced data, which magnitude time series are not. By *pretending* the data points are evenly spaced, we introduce an additional noise source in the function values. This is a relatively small noise source provided that the changes in the magnitude values across the full width of the N=windowlength point window is < sqrt(N/2) times the measurement noise on a single point. TODO: - Find correct dof for reduced chi squared in savgol_fit_magseries Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to fit the Savitsky-Golay model to. period : float The period to use for the model fit. windowlength : None or int The length of the filter window (the number of coefficients). Must be either positive and odd, or None. (The window is the number of points to the left, and to the right, of whatever point is having a polynomial fit to it locally). Bigger windows at fixed polynomial order risk lowering the amplitude of sharp features. If None, this routine (arbitrarily) sets the `windowlength` for phased LCs to be either the number of finite data points divided by 300, or polydeg+3, whichever is bigger. polydeg : int This is the order of the polynomial used to fit the samples. Must be less than `windowlength`. "Higher-order filters do better at preserving feature heights and widths, but do less smoothing on broader features." (Numerical Recipes). sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, will treat the input values of `mags` as fluxes for purposes of plotting the fit and sig-clipping. plotfit : str or False If this is a string, this function will make a plot for the fit to the mag/flux time-series and writes the plot to the path specified here. ignoreinitfail : bool If this is True, ignores the initial failure to find a set of optimized Fourier parameters using the global optimization function and proceeds to do a least-squares fit anyway. verbose : bool If True, will indicate progress and warn of any problems. Returns ------- dict This function returns a dict containing the model fit parameters, the minimized chi-sq value and the reduced chi-sq value. The form of this dict is mostly standardized across all functions in this module:: { 'fittype':'savgol', 'fitinfo':{ 'windowlength': the window length used for the fit, 'polydeg':the polynomial degree used for the fit, 'fitmags': the model fit mags, 'fitepoch': the epoch of minimum light for the fit, }, 'fitchisq': the minimized value of the fit's chi-sq, 'fitredchisq':the reduced chi-sq value, 'fitplotfile': the output fit plot if fitplot is not None, 'magseries':{ 'times':input times in phase order of the model, 'phase':the phases of the model mags, 'mags':input mags/fluxes in the phase order of the model, 'errs':errs in the phase order of the model, 'magsarefluxes':input value of magsarefluxes kwarg } } ''' stimes, smags, serrs = sigclip_magseries(times, mags, errs, sigclip=sigclip, magsarefluxes=magsarefluxes) # get rid of zero errs nzind = npnonzero(serrs) stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind] phase, pmags, perrs, ptimes, mintime = ( get_phased_quantities(stimes, smags, serrs, period) ) if not isinstance(windowlength, int): windowlength = max( polydeg + 3, int(len(phase)/300) ) if windowlength % 2 == 0: windowlength += 1 if verbose: LOGINFO('applying Savitzky-Golay filter with ' 'window length %s and polynomial degree %s to ' 'mag series with %s observations, ' 'using period %.6f, folded at %.6f' % (windowlength, polydeg, len(pmags), period, mintime)) # generate the function values obtained by applying the SG filter. The # "wrap" option is best for phase-folded LCs. sgf = savgol_filter(pmags, windowlength, polydeg, mode='wrap') # here the "fit" to the phases is the function produced by the # Savitzky-Golay filter. then compute the chisq and red-chisq. fitmags = sgf fitchisq = npsum( ((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs) ) # TODO: quantify dof for SG filter. nparams = int(len(pmags)/windowlength) * polydeg fitredchisq = fitchisq/(len(pmags) - nparams - 1) fitredchisq = -99. if verbose: LOGINFO( 'SG filter applied. chisq = %.5f, reduced chisq = %.5f' % (fitchisq, fitredchisq) ) # figure out the time of light curve minimum (i.e. the fit epoch) # this is when the fit mag is maximum (i.e. the faintest) # or if magsarefluxes = True, then this is when fit flux is minimum if not magsarefluxes: fitmagminind = npwhere(fitmags == npmax(fitmags)) else: fitmagminind = npwhere(fitmags == npmin(fitmags)) if len(fitmagminind[0]) > 1: fitmagminind = (fitmagminind[0][0],) magseriesepoch = ptimes[fitmagminind] # assemble the returndict returndict = { 'fittype':'savgol', 'fitinfo':{ 'windowlength':windowlength, 'polydeg':polydeg, 'fitmags':fitmags, 'fitepoch':magseriesepoch }, 'fitchisq':fitchisq, 'fitredchisq':fitredchisq, 'fitplotfile':None, 'magseries':{ 'times':ptimes, 'phase':phase, 'mags':pmags, 'errs':perrs, 'magsarefluxes':magsarefluxes } } # make the fit plot if required if plotfit and isinstance(plotfit, str): make_fit_plot(phase, pmags, perrs, fitmags, period, mintime, magseriesepoch, plotfit, magsarefluxes=magsarefluxes) returndict['fitplotfile'] = plotfit return returndict
[ "def", "savgol_fit_magseries", "(", "times", ",", "mags", ",", "errs", ",", "period", ",", "windowlength", "=", "None", ",", "polydeg", "=", "2", ",", "sigclip", "=", "30.0", ",", "plotfit", "=", "False", ",", "magsarefluxes", "=", "False", ",", "verbose", "=", "True", ")", ":", "stimes", ",", "smags", ",", "serrs", "=", "sigclip_magseries", "(", "times", ",", "mags", ",", "errs", ",", "sigclip", "=", "sigclip", ",", "magsarefluxes", "=", "magsarefluxes", ")", "# get rid of zero errs", "nzind", "=", "npnonzero", "(", "serrs", ")", "stimes", ",", "smags", ",", "serrs", "=", "stimes", "[", "nzind", "]", ",", "smags", "[", "nzind", "]", ",", "serrs", "[", "nzind", "]", "phase", ",", "pmags", ",", "perrs", ",", "ptimes", ",", "mintime", "=", "(", "get_phased_quantities", "(", "stimes", ",", "smags", ",", "serrs", ",", "period", ")", ")", "if", "not", "isinstance", "(", "windowlength", ",", "int", ")", ":", "windowlength", "=", "max", "(", "polydeg", "+", "3", ",", "int", "(", "len", "(", "phase", ")", "/", "300", ")", ")", "if", "windowlength", "%", "2", "==", "0", ":", "windowlength", "+=", "1", "if", "verbose", ":", "LOGINFO", "(", "'applying Savitzky-Golay filter with '", "'window length %s and polynomial degree %s to '", "'mag series with %s observations, '", "'using period %.6f, folded at %.6f'", "%", "(", "windowlength", ",", "polydeg", ",", "len", "(", "pmags", ")", ",", "period", ",", "mintime", ")", ")", "# generate the function values obtained by applying the SG filter. The", "# \"wrap\" option is best for phase-folded LCs.", "sgf", "=", "savgol_filter", "(", "pmags", ",", "windowlength", ",", "polydeg", ",", "mode", "=", "'wrap'", ")", "# here the \"fit\" to the phases is the function produced by the", "# Savitzky-Golay filter. then compute the chisq and red-chisq.", "fitmags", "=", "sgf", "fitchisq", "=", "npsum", "(", "(", "(", "fitmags", "-", "pmags", ")", "*", "(", "fitmags", "-", "pmags", ")", ")", "/", "(", "perrs", "*", "perrs", ")", ")", "# TODO: quantify dof for SG filter.", "nparams", "=", "int", "(", "len", "(", "pmags", ")", "/", "windowlength", ")", "*", "polydeg", "fitredchisq", "=", "fitchisq", "/", "(", "len", "(", "pmags", ")", "-", "nparams", "-", "1", ")", "fitredchisq", "=", "-", "99.", "if", "verbose", ":", "LOGINFO", "(", "'SG filter applied. chisq = %.5f, reduced chisq = %.5f'", "%", "(", "fitchisq", ",", "fitredchisq", ")", ")", "# figure out the time of light curve minimum (i.e. the fit epoch)", "# this is when the fit mag is maximum (i.e. the faintest)", "# or if magsarefluxes = True, then this is when fit flux is minimum", "if", "not", "magsarefluxes", ":", "fitmagminind", "=", "npwhere", "(", "fitmags", "==", "npmax", "(", "fitmags", ")", ")", "else", ":", "fitmagminind", "=", "npwhere", "(", "fitmags", "==", "npmin", "(", "fitmags", ")", ")", "if", "len", "(", "fitmagminind", "[", "0", "]", ")", ">", "1", ":", "fitmagminind", "=", "(", "fitmagminind", "[", "0", "]", "[", "0", "]", ",", ")", "magseriesepoch", "=", "ptimes", "[", "fitmagminind", "]", "# assemble the returndict", "returndict", "=", "{", "'fittype'", ":", "'savgol'", ",", "'fitinfo'", ":", "{", "'windowlength'", ":", "windowlength", ",", "'polydeg'", ":", "polydeg", ",", "'fitmags'", ":", "fitmags", ",", "'fitepoch'", ":", "magseriesepoch", "}", ",", "'fitchisq'", ":", "fitchisq", ",", "'fitredchisq'", ":", "fitredchisq", ",", "'fitplotfile'", ":", "None", ",", "'magseries'", ":", "{", "'times'", ":", "ptimes", ",", "'phase'", ":", "phase", ",", "'mags'", ":", "pmags", ",", "'errs'", ":", "perrs", ",", "'magsarefluxes'", ":", "magsarefluxes", "}", "}", "# make the fit plot if required", "if", "plotfit", "and", "isinstance", "(", "plotfit", ",", "str", ")", ":", "make_fit_plot", "(", "phase", ",", "pmags", ",", "perrs", ",", "fitmags", ",", "period", ",", "mintime", ",", "magseriesepoch", ",", "plotfit", ",", "magsarefluxes", "=", "magsarefluxes", ")", "returndict", "[", "'fitplotfile'", "]", "=", "plotfit", "return", "returndict" ]
Fit a Savitzky-Golay filter to the magnitude/flux time series. SG fits successive sub-sets (windows) of adjacent data points with a low-order polynomial via least squares. At each point (magnitude), it returns the value of the polynomial at that magnitude's time. This is made significantly cheaper than *actually* performing least squares for each window through linear algebra tricks that are possible when specifying the window size and polynomial order beforehand. Numerical Recipes Ch 14.8 gives an overview, Eq. 14.8.6 is what Scipy has implemented. The idea behind Savitzky-Golay is to preserve higher moments (>=2) of the input data series than would be done by a simple moving window average. Note that the filter assumes evenly spaced data, which magnitude time series are not. By *pretending* the data points are evenly spaced, we introduce an additional noise source in the function values. This is a relatively small noise source provided that the changes in the magnitude values across the full width of the N=windowlength point window is < sqrt(N/2) times the measurement noise on a single point. TODO: - Find correct dof for reduced chi squared in savgol_fit_magseries Parameters ---------- times,mags,errs : np.array The input mag/flux time-series to fit the Savitsky-Golay model to. period : float The period to use for the model fit. windowlength : None or int The length of the filter window (the number of coefficients). Must be either positive and odd, or None. (The window is the number of points to the left, and to the right, of whatever point is having a polynomial fit to it locally). Bigger windows at fixed polynomial order risk lowering the amplitude of sharp features. If None, this routine (arbitrarily) sets the `windowlength` for phased LCs to be either the number of finite data points divided by 300, or polydeg+3, whichever is bigger. polydeg : int This is the order of the polynomial used to fit the samples. Must be less than `windowlength`. "Higher-order filters do better at preserving feature heights and widths, but do less smoothing on broader features." (Numerical Recipes). sigclip : float or int or sequence of two floats/ints or None If a single float or int, a symmetric sigma-clip will be performed using the number provided as the sigma-multiplier to cut out from the input time-series. If a list of two ints/floats is provided, the function will perform an 'asymmetric' sigma-clip. The first element in this list is the sigma value to use for fainter flux/mag values; the second element in this list is the sigma value to use for brighter flux/mag values. For example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma dimmings and greater than 3-sigma brightenings. Here the meaning of "dimming" and "brightening" is set by *physics* (not the magnitude system), which is why the `magsarefluxes` kwarg must be correctly set. If `sigclip` is None, no sigma-clipping will be performed, and the time-series (with non-finite elems removed) will be passed through to the output. magsarefluxes : bool If True, will treat the input values of `mags` as fluxes for purposes of plotting the fit and sig-clipping. plotfit : str or False If this is a string, this function will make a plot for the fit to the mag/flux time-series and writes the plot to the path specified here. ignoreinitfail : bool If this is True, ignores the initial failure to find a set of optimized Fourier parameters using the global optimization function and proceeds to do a least-squares fit anyway. verbose : bool If True, will indicate progress and warn of any problems. Returns ------- dict This function returns a dict containing the model fit parameters, the minimized chi-sq value and the reduced chi-sq value. The form of this dict is mostly standardized across all functions in this module:: { 'fittype':'savgol', 'fitinfo':{ 'windowlength': the window length used for the fit, 'polydeg':the polynomial degree used for the fit, 'fitmags': the model fit mags, 'fitepoch': the epoch of minimum light for the fit, }, 'fitchisq': the minimized value of the fit's chi-sq, 'fitredchisq':the reduced chi-sq value, 'fitplotfile': the output fit plot if fitplot is not None, 'magseries':{ 'times':input times in phase order of the model, 'phase':the phases of the model mags, 'mags':input mags/fluxes in the phase order of the model, 'errs':errs in the phase order of the model, 'magsarefluxes':input value of magsarefluxes kwarg } }
[ "Fit", "a", "Savitzky", "-", "Golay", "filter", "to", "the", "magnitude", "/", "flux", "time", "series", "." ]
python
valid
pyviz/holoviews
holoviews/core/operation.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/operation.py#L113-L124
def _apply(self, element, key=None): """ Applies the operation to the element, executing any pre- and post-processor hooks if defined. """ kwargs = {} for hook in self._preprocess_hooks: kwargs.update(hook(self, element)) ret = self._process(element, key) for hook in self._postprocess_hooks: ret = hook(self, ret, **kwargs) return ret
[ "def", "_apply", "(", "self", ",", "element", ",", "key", "=", "None", ")", ":", "kwargs", "=", "{", "}", "for", "hook", "in", "self", ".", "_preprocess_hooks", ":", "kwargs", ".", "update", "(", "hook", "(", "self", ",", "element", ")", ")", "ret", "=", "self", ".", "_process", "(", "element", ",", "key", ")", "for", "hook", "in", "self", ".", "_postprocess_hooks", ":", "ret", "=", "hook", "(", "self", ",", "ret", ",", "*", "*", "kwargs", ")", "return", "ret" ]
Applies the operation to the element, executing any pre- and post-processor hooks if defined.
[ "Applies", "the", "operation", "to", "the", "element", "executing", "any", "pre", "-", "and", "post", "-", "processor", "hooks", "if", "defined", "." ]
python
train
mila/pyoo
pyoo.py
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L1636-L1646
def copy(self, old_name, new_name, index=None): """ Copies an old sheet with the old_name to a new sheet with new_name. If an optional index argument is not provided then the created sheet is appended at the end. Returns the new sheet. """ if index is None: index = len(self) self._copy(old_name, new_name, index) return self[new_name]
[ "def", "copy", "(", "self", ",", "old_name", ",", "new_name", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "index", "=", "len", "(", "self", ")", "self", ".", "_copy", "(", "old_name", ",", "new_name", ",", "index", ")", "return", "self", "[", "new_name", "]" ]
Copies an old sheet with the old_name to a new sheet with new_name. If an optional index argument is not provided then the created sheet is appended at the end. Returns the new sheet.
[ "Copies", "an", "old", "sheet", "with", "the", "old_name", "to", "a", "new", "sheet", "with", "new_name", "." ]
python
train
empymod/empymod
empymod/scripts/fdesign.py
https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L1057-L1066
def sin_3(a=1): r"""Fourier sine transform pair sin_3 ([Ande75]_).""" def lhs(x): return x/(a**2 + x**2) def rhs(b): return np.pi*np.exp(-a*b)/2 return Ghosh('sin', lhs, rhs)
[ "def", "sin_3", "(", "a", "=", "1", ")", ":", "def", "lhs", "(", "x", ")", ":", "return", "x", "/", "(", "a", "**", "2", "+", "x", "**", "2", ")", "def", "rhs", "(", "b", ")", ":", "return", "np", ".", "pi", "*", "np", ".", "exp", "(", "-", "a", "*", "b", ")", "/", "2", "return", "Ghosh", "(", "'sin'", ",", "lhs", ",", "rhs", ")" ]
r"""Fourier sine transform pair sin_3 ([Ande75]_).
[ "r", "Fourier", "sine", "transform", "pair", "sin_3", "(", "[", "Ande75", "]", "_", ")", "." ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/magics.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/magics.py#L200-L218
def project(self): """str: Default project to use for queries performed through IPython magics Note: The project does not need to be explicitly defined if you have an environment default project set. If you do not have a default project set in your environment, manually assign the project as demonstrated in the example below. Example: Manually setting the context project: >>> from google.cloud.bigquery import magics >>> magics.context.project = 'my-project' """ if self._project is None: _, self._project = google.auth.default() return self._project
[ "def", "project", "(", "self", ")", ":", "if", "self", ".", "_project", "is", "None", ":", "_", ",", "self", ".", "_project", "=", "google", ".", "auth", ".", "default", "(", ")", "return", "self", ".", "_project" ]
str: Default project to use for queries performed through IPython magics Note: The project does not need to be explicitly defined if you have an environment default project set. If you do not have a default project set in your environment, manually assign the project as demonstrated in the example below. Example: Manually setting the context project: >>> from google.cloud.bigquery import magics >>> magics.context.project = 'my-project'
[ "str", ":", "Default", "project", "to", "use", "for", "queries", "performed", "through", "IPython", "magics" ]
python
train
Parsely/probably
probably/cdbf.py
https://github.com/Parsely/probably/blob/5d80855c1645fb2813678d5bcfe6108e33d80b9e/probably/cdbf.py#L75-L80
def compute_refresh_time(self): """ Compute the refresh period for the given expiration delay """ if self.z == 0: self.z = 1E-10 s = float(self.expiration) * (1.0/(self.nbr_bits)) * (1.0/(self.counter_init - 1 + (1.0/(self.z * (self.nbr_slices + 1))))) return s
[ "def", "compute_refresh_time", "(", "self", ")", ":", "if", "self", ".", "z", "==", "0", ":", "self", ".", "z", "=", "1E-10", "s", "=", "float", "(", "self", ".", "expiration", ")", "*", "(", "1.0", "/", "(", "self", ".", "nbr_bits", ")", ")", "*", "(", "1.0", "/", "(", "self", ".", "counter_init", "-", "1", "+", "(", "1.0", "/", "(", "self", ".", "z", "*", "(", "self", ".", "nbr_slices", "+", "1", ")", ")", ")", ")", ")", "return", "s" ]
Compute the refresh period for the given expiration delay
[ "Compute", "the", "refresh", "period", "for", "the", "given", "expiration", "delay" ]
python
train
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L1778-L1815
def _issubclass_2(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check): """Helper for _issubclass, a.k.a pytypes.issubtype. """ if is_Tuple(superclass): return _issubclass_Tuple(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if is_Union(superclass): return _issubclass_Union(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if is_Union(subclass): return all(_issubclass(t, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) \ for t in get_Union_params(subclass)) if is_Generic(superclass): cls = superclass.__origin__ if not superclass.__origin__ is None else superclass # We would rather use issubclass(superclass.__origin__, Mapping), but that's somehow erroneous if pytypes.covariant_Mapping and (_has_base(cls, Mapping) or # Python 3.7 maps everything to collections.abc: (cls in _extra_dict and issubclass(cls, collections.abc.Mapping))): return _issubclass_Mapping_covariant(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) else: return _issubclass_Generic(subclass, superclass, bound_Generic, bound_typevars, bound_typevars_readonly, follow_fwd_refs, _recursion_check) if subclass in _extra_dict: subclass = _extra_dict[subclass] try: return issubclass(subclass, superclass) except TypeError: if not is_Type(subclass): # For Python 3.7, types from typing are not types. # So issubclass emits TypeError: issubclass() arg 1 must be a class raise TypeError("Invalid type declaration: %s, %s" % (type_str(subclass), type_str(superclass))) return False
[ "def", "_issubclass_2", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", ":", "if", "is_Tuple", "(", "superclass", ")", ":", "return", "_issubclass_Tuple", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", "if", "is_Union", "(", "superclass", ")", ":", "return", "_issubclass_Union", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", "if", "is_Union", "(", "subclass", ")", ":", "return", "all", "(", "_issubclass", "(", "t", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", "for", "t", "in", "get_Union_params", "(", "subclass", ")", ")", "if", "is_Generic", "(", "superclass", ")", ":", "cls", "=", "superclass", ".", "__origin__", "if", "not", "superclass", ".", "__origin__", "is", "None", "else", "superclass", "# We would rather use issubclass(superclass.__origin__, Mapping), but that's somehow erroneous", "if", "pytypes", ".", "covariant_Mapping", "and", "(", "_has_base", "(", "cls", ",", "Mapping", ")", "or", "# Python 3.7 maps everything to collections.abc:", "(", "cls", "in", "_extra_dict", "and", "issubclass", "(", "cls", ",", "collections", ".", "abc", ".", "Mapping", ")", ")", ")", ":", "return", "_issubclass_Mapping_covariant", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", "else", ":", "return", "_issubclass_Generic", "(", "subclass", ",", "superclass", ",", "bound_Generic", ",", "bound_typevars", ",", "bound_typevars_readonly", ",", "follow_fwd_refs", ",", "_recursion_check", ")", "if", "subclass", "in", "_extra_dict", ":", "subclass", "=", "_extra_dict", "[", "subclass", "]", "try", ":", "return", "issubclass", "(", "subclass", ",", "superclass", ")", "except", "TypeError", ":", "if", "not", "is_Type", "(", "subclass", ")", ":", "# For Python 3.7, types from typing are not types.", "# So issubclass emits TypeError: issubclass() arg 1 must be a class", "raise", "TypeError", "(", "\"Invalid type declaration: %s, %s\"", "%", "(", "type_str", "(", "subclass", ")", ",", "type_str", "(", "superclass", ")", ")", ")", "return", "False" ]
Helper for _issubclass, a.k.a pytypes.issubtype.
[ "Helper", "for", "_issubclass", "a", ".", "k", ".", "a", "pytypes", ".", "issubtype", "." ]
python
train
nickpandolfi/Cyther
cyther/instructions.py
https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/instructions.py#L53-L82
def processAndSetDefaults(self): """ The heart of the 'Instruction' object. This method will make sure that all fields not entered will be defaulted to a correct value. Also checks for incongruities in the data entered, if it was by the user. """ # INPUT, OUTPUT, GIVEN + BUILDABLE DEPS if not self.input: raise ValueError(NO_INPUT_FILE) if not self.output: # Build directory must exist, right? if not self.build_directory: File() pass # Can it be built? / reference self.output_format for this else: pass # if it is not congruent with other info provided if not self.build_directory: pass # Initialize it for dependency in self.given_dependencies: pass # Check if the dependcy exists if self.output_format != self.output.getType(): raise ValueError("") # Given dependencies must actually exist! # output_name must be at a lower extenion level than input_name # The build directory return
[ "def", "processAndSetDefaults", "(", "self", ")", ":", "# INPUT, OUTPUT, GIVEN + BUILDABLE DEPS", "if", "not", "self", ".", "input", ":", "raise", "ValueError", "(", "NO_INPUT_FILE", ")", "if", "not", "self", ".", "output", ":", "# Build directory must exist, right?", "if", "not", "self", ".", "build_directory", ":", "File", "(", ")", "pass", "# Can it be built? / reference self.output_format for this", "else", ":", "pass", "# if it is not congruent with other info provided", "if", "not", "self", ".", "build_directory", ":", "pass", "# Initialize it", "for", "dependency", "in", "self", ".", "given_dependencies", ":", "pass", "# Check if the dependcy exists", "if", "self", ".", "output_format", "!=", "self", ".", "output", ".", "getType", "(", ")", ":", "raise", "ValueError", "(", "\"\"", ")", "# Given dependencies must actually exist!", "# output_name must be at a lower extenion level than input_name", "# The build directory", "return" ]
The heart of the 'Instruction' object. This method will make sure that all fields not entered will be defaulted to a correct value. Also checks for incongruities in the data entered, if it was by the user.
[ "The", "heart", "of", "the", "Instruction", "object", ".", "This", "method", "will", "make", "sure", "that", "all", "fields", "not", "entered", "will", "be", "defaulted", "to", "a", "correct", "value", ".", "Also", "checks", "for", "incongruities", "in", "the", "data", "entered", "if", "it", "was", "by", "the", "user", "." ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L1806-L1817
def vlm_add_broadcast(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop): '''Add a broadcast, with one input. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error. ''' return libvlc_vlm_add_broadcast(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
[ "def", "vlm_add_broadcast", "(", "self", ",", "psz_name", ",", "psz_input", ",", "psz_output", ",", "i_options", ",", "ppsz_options", ",", "b_enabled", ",", "b_loop", ")", ":", "return", "libvlc_vlm_add_broadcast", "(", "self", ",", "str_to_bytes", "(", "psz_name", ")", ",", "str_to_bytes", "(", "psz_input", ")", ",", "str_to_bytes", "(", "psz_output", ")", ",", "i_options", ",", "ppsz_options", ",", "b_enabled", ",", "b_loop", ")" ]
Add a broadcast, with one input. @param psz_name: the name of the new broadcast. @param psz_input: the input MRL. @param psz_output: the output MRL (the parameter to the "sout" variable). @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new broadcast. @param b_loop: Should this broadcast be played in loop ? @return: 0 on success, -1 on error.
[ "Add", "a", "broadcast", "with", "one", "input", "." ]
python
train
richardkiss/pycoin
pycoin/merkle.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/merkle.py#L12-L20
def merkle_pair(hashes, hash_f): """Take a list of hashes, and return the parent row in the tree of merkle hashes.""" if len(hashes) % 2 == 1: hashes = list(hashes) hashes.append(hashes[-1]) items = [] for i in range(0, len(hashes), 2): items.append(hash_f(hashes[i] + hashes[i+1])) return items
[ "def", "merkle_pair", "(", "hashes", ",", "hash_f", ")", ":", "if", "len", "(", "hashes", ")", "%", "2", "==", "1", ":", "hashes", "=", "list", "(", "hashes", ")", "hashes", ".", "append", "(", "hashes", "[", "-", "1", "]", ")", "items", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "hashes", ")", ",", "2", ")", ":", "items", ".", "append", "(", "hash_f", "(", "hashes", "[", "i", "]", "+", "hashes", "[", "i", "+", "1", "]", ")", ")", "return", "items" ]
Take a list of hashes, and return the parent row in the tree of merkle hashes.
[ "Take", "a", "list", "of", "hashes", "and", "return", "the", "parent", "row", "in", "the", "tree", "of", "merkle", "hashes", "." ]
python
train
CalebBell/fluids
fluids/flow_meter.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/flow_meter.py#L196-L257
def orifice_expansibility_1989(D, Do, P1, P2, k): r'''Calculates the expansibility factor for orifice plate calculations based on the geometry of the plate, measured pressures of the orifice, and the isentropic exponent of the fluid. .. math:: \epsilon = 1- (0.41 + 0.35\beta^4)\Delta P/\kappa/P_1 Parameters ---------- D : float Upstream internal pipe diameter, [m] Do : float Diameter of orifice at flow conditions, [m] P1 : float Static pressure of fluid upstream of orifice at the cross-section of the pressure tap, [Pa] P2 : float Static pressure of fluid downstream of orifice at the cross-section of the pressure tap, [Pa] k : float Isentropic exponent of fluid, [-] Returns ------- expansibility : float Expansibility factor (1 for incompressible fluids, less than 1 for real fluids), [-] Notes ----- This formula was determined for the range of P2/P1 >= 0.75, and for fluids of air, steam, and natural gas. However, there is no objection to using it for other fluids. This is an older formula used to calculate expansibility factors for orifice plates. In this standard, an expansibility factor formula transformation in terms of the pressure after the orifice is presented as well. This is the more standard formulation in terms of the upstream conditions. The other formula is below for reference only: .. math:: \epsilon_2 = \sqrt{1 + \frac{\Delta P}{P_2}} - (0.41 + 0.35\beta^4) \frac{\Delta P}{\kappa P_2 \sqrt{1 + \frac{\Delta P}{P_2}}} [2]_ recommends this formulation for wedge meters as well. Examples -------- >>> orifice_expansibility_1989(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4) 0.9970510687411718 References ---------- .. [1] American Society of Mechanical Engineers. MFC-3M-1989 Measurement Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2005. .. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd edition. New York: McGraw-Hill Education, 1996. ''' return 1.0 - (0.41 + 0.35*(Do/D)**4)*(P1 - P2)/(k*P1)
[ "def", "orifice_expansibility_1989", "(", "D", ",", "Do", ",", "P1", ",", "P2", ",", "k", ")", ":", "return", "1.0", "-", "(", "0.41", "+", "0.35", "*", "(", "Do", "/", "D", ")", "**", "4", ")", "*", "(", "P1", "-", "P2", ")", "/", "(", "k", "*", "P1", ")" ]
r'''Calculates the expansibility factor for orifice plate calculations based on the geometry of the plate, measured pressures of the orifice, and the isentropic exponent of the fluid. .. math:: \epsilon = 1- (0.41 + 0.35\beta^4)\Delta P/\kappa/P_1 Parameters ---------- D : float Upstream internal pipe diameter, [m] Do : float Diameter of orifice at flow conditions, [m] P1 : float Static pressure of fluid upstream of orifice at the cross-section of the pressure tap, [Pa] P2 : float Static pressure of fluid downstream of orifice at the cross-section of the pressure tap, [Pa] k : float Isentropic exponent of fluid, [-] Returns ------- expansibility : float Expansibility factor (1 for incompressible fluids, less than 1 for real fluids), [-] Notes ----- This formula was determined for the range of P2/P1 >= 0.75, and for fluids of air, steam, and natural gas. However, there is no objection to using it for other fluids. This is an older formula used to calculate expansibility factors for orifice plates. In this standard, an expansibility factor formula transformation in terms of the pressure after the orifice is presented as well. This is the more standard formulation in terms of the upstream conditions. The other formula is below for reference only: .. math:: \epsilon_2 = \sqrt{1 + \frac{\Delta P}{P_2}} - (0.41 + 0.35\beta^4) \frac{\Delta P}{\kappa P_2 \sqrt{1 + \frac{\Delta P}{P_2}}} [2]_ recommends this formulation for wedge meters as well. Examples -------- >>> orifice_expansibility_1989(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4) 0.9970510687411718 References ---------- .. [1] American Society of Mechanical Engineers. MFC-3M-1989 Measurement Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2005. .. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd edition. New York: McGraw-Hill Education, 1996.
[ "r", "Calculates", "the", "expansibility", "factor", "for", "orifice", "plate", "calculations", "based", "on", "the", "geometry", "of", "the", "plate", "measured", "pressures", "of", "the", "orifice", "and", "the", "isentropic", "exponent", "of", "the", "fluid", ".", "..", "math", "::", "\\", "epsilon", "=", "1", "-", "(", "0", ".", "41", "+", "0", ".", "35", "\\", "beta^4", ")", "\\", "Delta", "P", "/", "\\", "kappa", "/", "P_1", "Parameters", "----------", "D", ":", "float", "Upstream", "internal", "pipe", "diameter", "[", "m", "]", "Do", ":", "float", "Diameter", "of", "orifice", "at", "flow", "conditions", "[", "m", "]", "P1", ":", "float", "Static", "pressure", "of", "fluid", "upstream", "of", "orifice", "at", "the", "cross", "-", "section", "of", "the", "pressure", "tap", "[", "Pa", "]", "P2", ":", "float", "Static", "pressure", "of", "fluid", "downstream", "of", "orifice", "at", "the", "cross", "-", "section", "of", "the", "pressure", "tap", "[", "Pa", "]", "k", ":", "float", "Isentropic", "exponent", "of", "fluid", "[", "-", "]" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L398-L411
def get_ip_interface_input_request_type_get_request_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_ip_interface = ET.Element("get_ip_interface") config = get_ip_interface input = ET.SubElement(get_ip_interface, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_type = ET.SubElement(get_request, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_ip_interface_input_request_type_get_request_interface_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_ip_interface", "=", "ET", ".", "Element", "(", "\"get_ip_interface\"", ")", "config", "=", "get_ip_interface", "input", "=", "ET", ".", "SubElement", "(", "get_ip_interface", ",", "\"input\"", ")", "request_type", "=", "ET", ".", "SubElement", "(", "input", ",", "\"request-type\"", ")", "get_request", "=", "ET", ".", "SubElement", "(", "request_type", ",", "\"get-request\"", ")", "interface_type", "=", "ET", ".", "SubElement", "(", "get_request", ",", "\"interface-type\"", ")", "interface_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
SheffieldML/GPy
GPy/kern/src/stationary.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/stationary.py#L151-L168
def _scaled_dist(self, X, X2=None): """ Efficiently compute the scaled distance, r. ..math:: r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 ) Note that if thre is only one lengthscale, l comes outside the sum. In this case we compute the unscaled distance first (in a separate function for caching) and divide by lengthscale afterwards """ if self.ARD: if X2 is not None: X2 = X2 / self.lengthscale return self._unscaled_dist(X/self.lengthscale, X2) else: return self._unscaled_dist(X, X2)/self.lengthscale
[ "def", "_scaled_dist", "(", "self", ",", "X", ",", "X2", "=", "None", ")", ":", "if", "self", ".", "ARD", ":", "if", "X2", "is", "not", "None", ":", "X2", "=", "X2", "/", "self", ".", "lengthscale", "return", "self", ".", "_unscaled_dist", "(", "X", "/", "self", ".", "lengthscale", ",", "X2", ")", "else", ":", "return", "self", ".", "_unscaled_dist", "(", "X", ",", "X2", ")", "/", "self", ".", "lengthscale" ]
Efficiently compute the scaled distance, r. ..math:: r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 ) Note that if thre is only one lengthscale, l comes outside the sum. In this case we compute the unscaled distance first (in a separate function for caching) and divide by lengthscale afterwards
[ "Efficiently", "compute", "the", "scaled", "distance", "r", "." ]
python
train
MillionIntegrals/vel
vel/rl/commands/record_movie_command.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/commands/record_movie_command.py#L47-L91
def record_take(self, model, env_instance, device, take_number): """ Record a single movie and store it on hard drive """ frames = [] observation = env_instance.reset() if model.is_recurrent: hidden_state = model.zero_state(1).to(device) frames.append(env_instance.render('rgb_array')) print("Evaluating environment...") while True: observation_array = np.expand_dims(np.array(observation), axis=0) observation_tensor = torch.from_numpy(observation_array).to(device) if model.is_recurrent: output = model.step(observation_tensor, hidden_state, **self.sample_args) hidden_state = output['state'] actions = output['actions'] else: actions = model.step(observation_tensor, **self.sample_args)['actions'] actions = actions.detach().cpu().numpy() observation, reward, done, epinfo = env_instance.step(actions[0]) frames.append(env_instance.render('rgb_array')) if 'episode' in epinfo: # End of an episode break takename = self.model_config.output_dir('videos', self.model_config.run_name, self.videoname.format(take_number)) pathlib.Path(os.path.dirname(takename)).mkdir(parents=True, exist_ok=True) fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') video = cv2.VideoWriter(takename, fourcc, self.fps, (frames[0].shape[1], frames[0].shape[0])) for i in tqdm.trange(len(frames), file=sys.stdout): video.write(cv2.cvtColor(frames[i], cv2.COLOR_RGB2BGR)) video.release() print("Written {}".format(takename))
[ "def", "record_take", "(", "self", ",", "model", ",", "env_instance", ",", "device", ",", "take_number", ")", ":", "frames", "=", "[", "]", "observation", "=", "env_instance", ".", "reset", "(", ")", "if", "model", ".", "is_recurrent", ":", "hidden_state", "=", "model", ".", "zero_state", "(", "1", ")", ".", "to", "(", "device", ")", "frames", ".", "append", "(", "env_instance", ".", "render", "(", "'rgb_array'", ")", ")", "print", "(", "\"Evaluating environment...\"", ")", "while", "True", ":", "observation_array", "=", "np", ".", "expand_dims", "(", "np", ".", "array", "(", "observation", ")", ",", "axis", "=", "0", ")", "observation_tensor", "=", "torch", ".", "from_numpy", "(", "observation_array", ")", ".", "to", "(", "device", ")", "if", "model", ".", "is_recurrent", ":", "output", "=", "model", ".", "step", "(", "observation_tensor", ",", "hidden_state", ",", "*", "*", "self", ".", "sample_args", ")", "hidden_state", "=", "output", "[", "'state'", "]", "actions", "=", "output", "[", "'actions'", "]", "else", ":", "actions", "=", "model", ".", "step", "(", "observation_tensor", ",", "*", "*", "self", ".", "sample_args", ")", "[", "'actions'", "]", "actions", "=", "actions", ".", "detach", "(", ")", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "observation", ",", "reward", ",", "done", ",", "epinfo", "=", "env_instance", ".", "step", "(", "actions", "[", "0", "]", ")", "frames", ".", "append", "(", "env_instance", ".", "render", "(", "'rgb_array'", ")", ")", "if", "'episode'", "in", "epinfo", ":", "# End of an episode", "break", "takename", "=", "self", ".", "model_config", ".", "output_dir", "(", "'videos'", ",", "self", ".", "model_config", ".", "run_name", ",", "self", ".", "videoname", ".", "format", "(", "take_number", ")", ")", "pathlib", ".", "Path", "(", "os", ".", "path", ".", "dirname", "(", "takename", ")", ")", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "fourcc", "=", "cv2", ".", "VideoWriter_fourcc", "(", "'M'", ",", "'J'", ",", "'P'", ",", "'G'", ")", "video", "=", "cv2", ".", "VideoWriter", "(", "takename", ",", "fourcc", ",", "self", ".", "fps", ",", "(", "frames", "[", "0", "]", ".", "shape", "[", "1", "]", ",", "frames", "[", "0", "]", ".", "shape", "[", "0", "]", ")", ")", "for", "i", "in", "tqdm", ".", "trange", "(", "len", "(", "frames", ")", ",", "file", "=", "sys", ".", "stdout", ")", ":", "video", ".", "write", "(", "cv2", ".", "cvtColor", "(", "frames", "[", "i", "]", ",", "cv2", ".", "COLOR_RGB2BGR", ")", ")", "video", ".", "release", "(", ")", "print", "(", "\"Written {}\"", ".", "format", "(", "takename", ")", ")" ]
Record a single movie and store it on hard drive
[ "Record", "a", "single", "movie", "and", "store", "it", "on", "hard", "drive" ]
python
train
boriel/zxbasic
symbols/number.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/symbols/number.py#L21-L31
def _get_val(other): """ Given a Number, a Numeric Constant or a python number return its value """ assert isinstance(other, (numbers.Number, SymbolNUMBER, SymbolCONST)) if isinstance(other, SymbolNUMBER): return other.value if isinstance(other, SymbolCONST): return other.expr.value return other
[ "def", "_get_val", "(", "other", ")", ":", "assert", "isinstance", "(", "other", ",", "(", "numbers", ".", "Number", ",", "SymbolNUMBER", ",", "SymbolCONST", ")", ")", "if", "isinstance", "(", "other", ",", "SymbolNUMBER", ")", ":", "return", "other", ".", "value", "if", "isinstance", "(", "other", ",", "SymbolCONST", ")", ":", "return", "other", ".", "expr", ".", "value", "return", "other" ]
Given a Number, a Numeric Constant or a python number return its value
[ "Given", "a", "Number", "a", "Numeric", "Constant", "or", "a", "python", "number", "return", "its", "value" ]
python
train
inspirehep/inspire-json-merger
inspire_json_merger/pre_filters.py
https://github.com/inspirehep/inspire-json-merger/blob/6af3140fcf7c3f851141c0928eedfe99fddeeda0/inspire_json_merger/pre_filters.py#L33-L37
def remove_elements_with_source(source, field): """Remove all elements matching ``source`` in ``field``.""" return freeze( [element for element in field if element.get('source', '').lower() != source] )
[ "def", "remove_elements_with_source", "(", "source", ",", "field", ")", ":", "return", "freeze", "(", "[", "element", "for", "element", "in", "field", "if", "element", ".", "get", "(", "'source'", ",", "''", ")", ".", "lower", "(", ")", "!=", "source", "]", ")" ]
Remove all elements matching ``source`` in ``field``.
[ "Remove", "all", "elements", "matching", "source", "in", "field", "." ]
python
train
not-na/peng3d
peng3d/gui/layered.py
https://github.com/not-na/peng3d/blob/1151be665b26cc8a479f6307086ba919e4d32d85/peng3d/gui/layered.py#L491-L506
def redraw_label(self): """ Re-draws the text by calculating its position. Currently, the text will always be centered on the position of the layer. """ # Convenience variables x,y,_,_ = self.getPos() sx,sy = self.getSize() self._label.x = x+sx/2. self._label.y = y+sy/2. self._label.width = sx # Height is not set, would look weird otherwise #self._label.height = sx self._label._update()
[ "def", "redraw_label", "(", "self", ")", ":", "# Convenience variables", "x", ",", "y", ",", "_", ",", "_", "=", "self", ".", "getPos", "(", ")", "sx", ",", "sy", "=", "self", ".", "getSize", "(", ")", "self", ".", "_label", ".", "x", "=", "x", "+", "sx", "/", "2.", "self", ".", "_label", ".", "y", "=", "y", "+", "sy", "/", "2.", "self", ".", "_label", ".", "width", "=", "sx", "# Height is not set, would look weird otherwise", "#self._label.height = sx", "self", ".", "_label", ".", "_update", "(", ")" ]
Re-draws the text by calculating its position. Currently, the text will always be centered on the position of the layer.
[ "Re", "-", "draws", "the", "text", "by", "calculating", "its", "position", ".", "Currently", "the", "text", "will", "always", "be", "centered", "on", "the", "position", "of", "the", "layer", "." ]
python
test
wummel/dosage
scripts/creators.py
https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/scripts/creators.py#L66-L79
def print_results(args): """Print comics.""" min_comics, filename = args with codecs.open(filename, 'a', 'utf-8') as fp: for name, url in sorted(load_result(json_file).items()): if name in exclude_comics: continue if has_gocomics_comic(name): prefix = u'# duplicate of gocomics ' else: prefix = u'' fp.write(u"%sadd(%r, %r)\n" % ( prefix, str(truncate_name(name)), str(url)) )
[ "def", "print_results", "(", "args", ")", ":", "min_comics", ",", "filename", "=", "args", "with", "codecs", ".", "open", "(", "filename", ",", "'a'", ",", "'utf-8'", ")", "as", "fp", ":", "for", "name", ",", "url", "in", "sorted", "(", "load_result", "(", "json_file", ")", ".", "items", "(", ")", ")", ":", "if", "name", "in", "exclude_comics", ":", "continue", "if", "has_gocomics_comic", "(", "name", ")", ":", "prefix", "=", "u'# duplicate of gocomics '", "else", ":", "prefix", "=", "u''", "fp", ".", "write", "(", "u\"%sadd(%r, %r)\\n\"", "%", "(", "prefix", ",", "str", "(", "truncate_name", "(", "name", ")", ")", ",", "str", "(", "url", ")", ")", ")" ]
Print comics.
[ "Print", "comics", "." ]
python
train
geomet/geomet
geomet/wkt.py
https://github.com/geomet/geomet/blob/b82d7118113ab723751eba3de5df98c368423c2b/geomet/wkt.py#L203-L219
def _dump_point(obj, decimals): """ Dump a GeoJSON-like Point object to WKT. :param dict obj: A GeoJSON-like `dict` representing a Point. :param int decimals: int which indicates the number of digits to display after the decimal point when formatting coordinates. :returns: WKT representation of the input GeoJSON Point ``obj``. """ coords = obj['coordinates'] pt = 'POINT (%s)' % ' '.join(_round_and_pad(c, decimals) for c in coords) return pt
[ "def", "_dump_point", "(", "obj", ",", "decimals", ")", ":", "coords", "=", "obj", "[", "'coordinates'", "]", "pt", "=", "'POINT (%s)'", "%", "' '", ".", "join", "(", "_round_and_pad", "(", "c", ",", "decimals", ")", "for", "c", "in", "coords", ")", "return", "pt" ]
Dump a GeoJSON-like Point object to WKT. :param dict obj: A GeoJSON-like `dict` representing a Point. :param int decimals: int which indicates the number of digits to display after the decimal point when formatting coordinates. :returns: WKT representation of the input GeoJSON Point ``obj``.
[ "Dump", "a", "GeoJSON", "-", "like", "Point", "object", "to", "WKT", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L4764-L4782
def get_assessments_by_bank(self, bank_id): """Gets the list of ``Assessments`` associated with a ``Bank``. arg: bank_id (osid.id.Id): ``Id`` of the ``Bank`` return: (osid.assessment.AssessmentList) - list of related assessments raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resources_by_bin mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_assessment_lookup_session_for_bank(bank_id, proxy=self._proxy) lookup_session.use_isolated_bank_view() return lookup_session.get_assessments()
[ "def", "get_assessments_by_bank", "(", "self", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_resources_by_bin", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_assessment_lookup_session_for_bank", "(", "bank_id", ",", "proxy", "=", "self", ".", "_proxy", ")", "lookup_session", ".", "use_isolated_bank_view", "(", ")", "return", "lookup_session", ".", "get_assessments", "(", ")" ]
Gets the list of ``Assessments`` associated with a ``Bank``. arg: bank_id (osid.id.Id): ``Id`` of the ``Bank`` return: (osid.assessment.AssessmentList) - list of related assessments raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Assessments", "associated", "with", "a", "Bank", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/execution_control/processing_controller/scheduler/scheduler.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/processing_controller/scheduler/scheduler.py#L44-L60
def _init_queue(): """Initialise the Processing Block queue from the database. This method should populate the queue from the current state of the Configuration Database. This needs to be based on the current set of Processing Blocks in the database and consider events on these processing blocks. """ LOG.info('Initialising Processing Block queue.') queue = ProcessingBlockQueue() active_pb_ids = ProcessingBlockList().active LOG.info('Initialising PC PB queue: %s', active_pb_ids) for pb_id in active_pb_ids: pb = ProcessingBlock(pb_id) queue.put(pb.id, pb.priority, pb.type) return queue
[ "def", "_init_queue", "(", ")", ":", "LOG", ".", "info", "(", "'Initialising Processing Block queue.'", ")", "queue", "=", "ProcessingBlockQueue", "(", ")", "active_pb_ids", "=", "ProcessingBlockList", "(", ")", ".", "active", "LOG", ".", "info", "(", "'Initialising PC PB queue: %s'", ",", "active_pb_ids", ")", "for", "pb_id", "in", "active_pb_ids", ":", "pb", "=", "ProcessingBlock", "(", "pb_id", ")", "queue", ".", "put", "(", "pb", ".", "id", ",", "pb", ".", "priority", ",", "pb", ".", "type", ")", "return", "queue" ]
Initialise the Processing Block queue from the database. This method should populate the queue from the current state of the Configuration Database. This needs to be based on the current set of Processing Blocks in the database and consider events on these processing blocks.
[ "Initialise", "the", "Processing", "Block", "queue", "from", "the", "database", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L545-L562
def ip_rtm_config_route_static_route_nh_route_attributes_metric(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_nh = ET.SubElement(route, "static-route-nh") static_route_dest_key = ET.SubElement(static_route_nh, "static-route-dest") static_route_dest_key.text = kwargs.pop('static_route_dest') static_route_next_hop_key = ET.SubElement(static_route_nh, "static-route-next-hop") static_route_next_hop_key.text = kwargs.pop('static_route_next_hop') route_attributes = ET.SubElement(static_route_nh, "route-attributes") metric = ET.SubElement(route_attributes, "metric") metric.text = kwargs.pop('metric') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_rtm_config_route_static_route_nh_route_attributes_metric", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "rtm_config", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"rtm-config\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-rtm\"", ")", "route", "=", "ET", ".", "SubElement", "(", "rtm_config", ",", "\"route\"", ")", "static_route_nh", "=", "ET", ".", "SubElement", "(", "route", ",", "\"static-route-nh\"", ")", "static_route_dest_key", "=", "ET", ".", "SubElement", "(", "static_route_nh", ",", "\"static-route-dest\"", ")", "static_route_dest_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_dest'", ")", "static_route_next_hop_key", "=", "ET", ".", "SubElement", "(", "static_route_nh", ",", "\"static-route-next-hop\"", ")", "static_route_next_hop_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'static_route_next_hop'", ")", "route_attributes", "=", "ET", ".", "SubElement", "(", "static_route_nh", ",", "\"route-attributes\"", ")", "metric", "=", "ET", ".", "SubElement", "(", "route_attributes", ",", "\"metric\"", ")", "metric", ".", "text", "=", "kwargs", ".", "pop", "(", "'metric'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_notification_stream.py#L388-L397
def OSPFNeighborState_OSPFNeighborIpAddress(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") OSPFNeighborState = ET.SubElement(config, "OSPFNeighborState", xmlns="http://brocade.com/ns/brocade-notification-stream") OSPFNeighborIpAddress = ET.SubElement(OSPFNeighborState, "OSPFNeighborIpAddress") OSPFNeighborIpAddress.text = kwargs.pop('OSPFNeighborIpAddress') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "OSPFNeighborState_OSPFNeighborIpAddress", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "OSPFNeighborState", "=", "ET", ".", "SubElement", "(", "config", ",", "\"OSPFNeighborState\"", ",", "xmlns", "=", "\"http://brocade.com/ns/brocade-notification-stream\"", ")", "OSPFNeighborIpAddress", "=", "ET", ".", "SubElement", "(", "OSPFNeighborState", ",", "\"OSPFNeighborIpAddress\"", ")", "OSPFNeighborIpAddress", ".", "text", "=", "kwargs", ".", "pop", "(", "'OSPFNeighborIpAddress'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
fabioz/PyDev.Debugger
third_party/pep8/lib2to3/lib2to3/pytree.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/lib2to3/lib2to3/pytree.py#L429-L445
def convert(gr, raw_node): """ Convert raw node information to a Node or Leaf instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. """ type, value, context, children = raw_node if children or type in gr.number2symbol: # If there's exactly one child, return that child instead of # creating a new node. if len(children) == 1: return children[0] return Node(type, children, context=context) else: return Leaf(type, value, context=context)
[ "def", "convert", "(", "gr", ",", "raw_node", ")", ":", "type", ",", "value", ",", "context", ",", "children", "=", "raw_node", "if", "children", "or", "type", "in", "gr", ".", "number2symbol", ":", "# If there's exactly one child, return that child instead of", "# creating a new node.", "if", "len", "(", "children", ")", "==", "1", ":", "return", "children", "[", "0", "]", "return", "Node", "(", "type", ",", "children", ",", "context", "=", "context", ")", "else", ":", "return", "Leaf", "(", "type", ",", "value", ",", "context", "=", "context", ")" ]
Convert raw node information to a Node or Leaf instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up.
[ "Convert", "raw", "node", "information", "to", "a", "Node", "or", "Leaf", "instance", "." ]
python
train
resonai/ybt
yabt/buildcontext.py
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/buildcontext.py#L167-L181
def remove_target(self, target_name: str): """Remove (unregister) a `target` from this build context. Removes the target instance with the given name, if it exists, from both the `targets` map and the `targets_by_module` map. Doesn't do anything if no target with that name is found. Doesn't touch the target graph, if it exists. """ if target_name in self.targets: del self.targets[target_name] build_module = split_build_module(target_name) if build_module in self.targets_by_module: self.targets_by_module[build_module].remove(target_name)
[ "def", "remove_target", "(", "self", ",", "target_name", ":", "str", ")", ":", "if", "target_name", "in", "self", ".", "targets", ":", "del", "self", ".", "targets", "[", "target_name", "]", "build_module", "=", "split_build_module", "(", "target_name", ")", "if", "build_module", "in", "self", ".", "targets_by_module", ":", "self", ".", "targets_by_module", "[", "build_module", "]", ".", "remove", "(", "target_name", ")" ]
Remove (unregister) a `target` from this build context. Removes the target instance with the given name, if it exists, from both the `targets` map and the `targets_by_module` map. Doesn't do anything if no target with that name is found. Doesn't touch the target graph, if it exists.
[ "Remove", "(", "unregister", ")", "a", "target", "from", "this", "build", "context", "." ]
python
train
pltrdy/rouge
rouge/rouge_score.py
https://github.com/pltrdy/rouge/blob/7bf8a83af5ca5c1677b93620b4e1f85ffd63b377/rouge/rouge_score.py#L270-L324
def rouge_l_summary_level(evaluated_sentences, reference_sentences): """ Computes ROUGE-L (summary level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: SUM(i,u) = SUM from i through u u = number of sentences in reference summary C = Candidate summary made up of v sentences m = number of words in reference summary n = number of words in candidate summary Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentence: One of the sentences in the reference summaries Returns: A float: F_lcs Raises: ValueError: raises exception if a param has len <= 0 """ if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0: raise ValueError("Collections must contain at least 1 sentence.") # total number of words in reference sentences m = len(set(_split_into_words(reference_sentences))) # total number of words in evaluated sentences n = len(set(_split_into_words(evaluated_sentences))) # print("m,n %d %d" % (m, n)) union_lcs_sum_across_all_references = 0 union = set() for ref_s in reference_sentences: lcs_count, union = _union_lcs(evaluated_sentences, ref_s, prev_union=union) union_lcs_sum_across_all_references += lcs_count llcs = union_lcs_sum_across_all_references r_lcs = llcs / m p_lcs = llcs / n beta = p_lcs / (r_lcs + 1e-12) num = (1 + (beta**2)) * r_lcs * p_lcs denom = r_lcs + ((beta**2) * p_lcs) f_lcs = num / (denom + 1e-12) return {"f": f_lcs, "p": p_lcs, "r": r_lcs}
[ "def", "rouge_l_summary_level", "(", "evaluated_sentences", ",", "reference_sentences", ")", ":", "if", "len", "(", "evaluated_sentences", ")", "<=", "0", "or", "len", "(", "reference_sentences", ")", "<=", "0", ":", "raise", "ValueError", "(", "\"Collections must contain at least 1 sentence.\"", ")", "# total number of words in reference sentences", "m", "=", "len", "(", "set", "(", "_split_into_words", "(", "reference_sentences", ")", ")", ")", "# total number of words in evaluated sentences", "n", "=", "len", "(", "set", "(", "_split_into_words", "(", "evaluated_sentences", ")", ")", ")", "# print(\"m,n %d %d\" % (m, n))", "union_lcs_sum_across_all_references", "=", "0", "union", "=", "set", "(", ")", "for", "ref_s", "in", "reference_sentences", ":", "lcs_count", ",", "union", "=", "_union_lcs", "(", "evaluated_sentences", ",", "ref_s", ",", "prev_union", "=", "union", ")", "union_lcs_sum_across_all_references", "+=", "lcs_count", "llcs", "=", "union_lcs_sum_across_all_references", "r_lcs", "=", "llcs", "/", "m", "p_lcs", "=", "llcs", "/", "n", "beta", "=", "p_lcs", "/", "(", "r_lcs", "+", "1e-12", ")", "num", "=", "(", "1", "+", "(", "beta", "**", "2", ")", ")", "*", "r_lcs", "*", "p_lcs", "denom", "=", "r_lcs", "+", "(", "(", "beta", "**", "2", ")", "*", "p_lcs", ")", "f_lcs", "=", "num", "/", "(", "denom", "+", "1e-12", ")", "return", "{", "\"f\"", ":", "f_lcs", ",", "\"p\"", ":", "p_lcs", ",", "\"r\"", ":", "r_lcs", "}" ]
Computes ROUGE-L (summary level) of two text collections of sentences. http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Calculated according to: R_lcs = SUM(1, u)[LCS<union>(r_i,C)]/m P_lcs = SUM(1, u)[LCS<union>(r_i,C)]/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: SUM(i,u) = SUM from i through u u = number of sentences in reference summary C = Candidate summary made up of v sentences m = number of words in reference summary n = number of words in candidate summary Args: evaluated_sentences: The sentences that have been picked by the summarizer reference_sentence: One of the sentences in the reference summaries Returns: A float: F_lcs Raises: ValueError: raises exception if a param has len <= 0
[ "Computes", "ROUGE", "-", "L", "(", "summary", "level", ")", "of", "two", "text", "collections", "of", "sentences", ".", "http", ":", "//", "research", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "um", "/", "people", "/", "cyl", "/", "download", "/", "papers", "/", "rouge", "-", "working", "-", "note", "-", "v1", ".", "3", ".", "1", ".", "pdf" ]
python
test
lsst-sqre/ltd-conveyor
ltdconveyor/s3/upload.py
https://github.com/lsst-sqre/ltd-conveyor/blob/c492937c4c1e050ccc4a0b9dcc38f9980d57e305/ltdconveyor/s3/upload.py#L434-L464
def delete_directory(self, dirname): """Delete a directory (and contents) from the bucket. Parameters ---------- dirname : `str` Name of the directory, relative to ``bucket_root/``. Raises ------ RuntimeError Raised when there are no objects to delete (directory does not exist). """ key = os.path.join(self._bucket_root, dirname) if not key.endswith('/'): key += '/' key_objects = [{'Key': obj.key} for obj in self._bucket.objects.filter(Prefix=key)] if len(key_objects) == 0: msg = 'No objects in bucket directory {}'.format(dirname) raise RuntimeError(msg) delete_keys = {'Objects': key_objects} # based on http://stackoverflow.com/a/34888103 s3 = self._session.resource('s3') r = s3.meta.client.delete_objects(Bucket=self._bucket.name, Delete=delete_keys) self._logger.debug(r) if 'Errors' in r: raise S3Error('S3 could not delete {0}'.format(key))
[ "def", "delete_directory", "(", "self", ",", "dirname", ")", ":", "key", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_bucket_root", ",", "dirname", ")", "if", "not", "key", ".", "endswith", "(", "'/'", ")", ":", "key", "+=", "'/'", "key_objects", "=", "[", "{", "'Key'", ":", "obj", ".", "key", "}", "for", "obj", "in", "self", ".", "_bucket", ".", "objects", ".", "filter", "(", "Prefix", "=", "key", ")", "]", "if", "len", "(", "key_objects", ")", "==", "0", ":", "msg", "=", "'No objects in bucket directory {}'", ".", "format", "(", "dirname", ")", "raise", "RuntimeError", "(", "msg", ")", "delete_keys", "=", "{", "'Objects'", ":", "key_objects", "}", "# based on http://stackoverflow.com/a/34888103", "s3", "=", "self", ".", "_session", ".", "resource", "(", "'s3'", ")", "r", "=", "s3", ".", "meta", ".", "client", ".", "delete_objects", "(", "Bucket", "=", "self", ".", "_bucket", ".", "name", ",", "Delete", "=", "delete_keys", ")", "self", ".", "_logger", ".", "debug", "(", "r", ")", "if", "'Errors'", "in", "r", ":", "raise", "S3Error", "(", "'S3 could not delete {0}'", ".", "format", "(", "key", ")", ")" ]
Delete a directory (and contents) from the bucket. Parameters ---------- dirname : `str` Name of the directory, relative to ``bucket_root/``. Raises ------ RuntimeError Raised when there are no objects to delete (directory does not exist).
[ "Delete", "a", "directory", "(", "and", "contents", ")", "from", "the", "bucket", "." ]
python
test
awslabs/serverless-application-model
samtranslator/policy_template_processor/template.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/policy_template_processor/template.py#L102-L115
def from_dict(template_name, template_values_dict): """ Parses the input and returns an instance of this class. :param string template_name: Name of the template :param dict template_values_dict: Dictionary containing the value of the template. This dict must have passed the JSON Schema validation. :return Template: Instance of this class containing the values provided in this dictionary """ parameters = template_values_dict.get("Parameters", {}) definition = template_values_dict.get("Definition", {}) return Template(template_name, parameters, definition)
[ "def", "from_dict", "(", "template_name", ",", "template_values_dict", ")", ":", "parameters", "=", "template_values_dict", ".", "get", "(", "\"Parameters\"", ",", "{", "}", ")", "definition", "=", "template_values_dict", ".", "get", "(", "\"Definition\"", ",", "{", "}", ")", "return", "Template", "(", "template_name", ",", "parameters", ",", "definition", ")" ]
Parses the input and returns an instance of this class. :param string template_name: Name of the template :param dict template_values_dict: Dictionary containing the value of the template. This dict must have passed the JSON Schema validation. :return Template: Instance of this class containing the values provided in this dictionary
[ "Parses", "the", "input", "and", "returns", "an", "instance", "of", "this", "class", "." ]
python
train
jobovy/galpy
galpy/potential/Potential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L1466-L1536
def ttensor(self,R,z,phi=0.,t=0.,eigenval=False): """ NAME: ttensor PURPOSE: Calculate the tidal tensor Tij=-d(Psi)(dxidxj) INPUT: R - Galactocentric radius (can be Quantity) z - height (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) eigenval - return eigenvalues if true (optional; boolean) OUTPUT: Tidal Tensor HISTORY: 2018-03-21 - Written - Webb (UofT) """ if self.isNonAxi: raise PotentialError("Tidal tensor calculation is currently only implemented for axisymmetric potentials") #Evaluate forces, angles and derivatives Rderiv= -self.Rforce(R,z,phi=phi,t=t,use_physical=False) phideriv= -self.phiforce(R,z,phi=phi,t=t,use_physical=False) R2deriv= self.R2deriv(R,z,phi=phi,t=t,use_physical=False) z2deriv= self.z2deriv(R,z,phi=phi,t=t,use_physical=False) phi2deriv= self.phi2deriv(R,z,phi=phi,t=t,use_physical=False) Rzderiv= self.Rzderiv(R,z,phi=phi,t=t,use_physical=False) Rphideriv= self.Rphideriv(R,z,phi=phi,t=t,use_physical=False) #Temporarily set zphideriv to zero until zphideriv is added to Class zphideriv=0.0 cosphi=nu.cos(phi) sinphi=nu.sin(phi) cos2phi=cosphi**2.0 sin2phi=sinphi**2.0 R2=R**2.0 R3=R**3.0 # Tidal tensor txx= R2deriv*cos2phi-Rphideriv*2.*cosphi*sinphi/R+Rderiv*sin2phi/R\ +phi2deriv*sin2phi/R2+phideriv*2.*cosphi*sinphi/R2 tyx= R2deriv*sinphi*cosphi+Rphideriv*(cos2phi-sin2phi)/R\ -Rderiv*sinphi*cosphi/R-phi2deriv*sinphi*cosphi/R2\ +phideriv*(sin2phi-cos2phi)/R2 tzx=Rzderiv*cosphi-Rderiv*cosphi*z/R2-zphideriv*sinphi/R\ +phideriv*2.*sinphi*z/R3 tyy=R2deriv*sin2phi+Rphideriv*2.*cosphi*sinphi/R+Rderiv*cos2phi/R\ +phi2deriv*cos2phi/R2-phideriv*2.*sinphi*cosphi/R2 txy=tyx tzy=Rzderiv*sinphi-Rderiv*sinphi*z/R2+zphideriv*cosphi/R\ -phideriv*2.*cosphi*z/R3 txz=Rzderiv*cosphi-zphideriv*sinphi/R tyz=Rzderiv*sinphi+zphideriv*cosphi/R tzz=z2deriv tij=-nu.array([[txx,txy,txz],[tyx,tyy,tyz],[tzx,tzy,tzz]]) if eigenval: return nu.linalg.eigvals(tij) else: return tij
[ "def", "ttensor", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0.", ",", "t", "=", "0.", ",", "eigenval", "=", "False", ")", ":", "if", "self", ".", "isNonAxi", ":", "raise", "PotentialError", "(", "\"Tidal tensor calculation is currently only implemented for axisymmetric potentials\"", ")", "#Evaluate forces, angles and derivatives", "Rderiv", "=", "-", "self", ".", "Rforce", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "phideriv", "=", "-", "self", ".", "phiforce", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "R2deriv", "=", "self", ".", "R2deriv", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "z2deriv", "=", "self", ".", "z2deriv", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "phi2deriv", "=", "self", ".", "phi2deriv", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "Rzderiv", "=", "self", ".", "Rzderiv", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "Rphideriv", "=", "self", ".", "Rphideriv", "(", "R", ",", "z", ",", "phi", "=", "phi", ",", "t", "=", "t", ",", "use_physical", "=", "False", ")", "#Temporarily set zphideriv to zero until zphideriv is added to Class", "zphideriv", "=", "0.0", "cosphi", "=", "nu", ".", "cos", "(", "phi", ")", "sinphi", "=", "nu", ".", "sin", "(", "phi", ")", "cos2phi", "=", "cosphi", "**", "2.0", "sin2phi", "=", "sinphi", "**", "2.0", "R2", "=", "R", "**", "2.0", "R3", "=", "R", "**", "3.0", "# Tidal tensor", "txx", "=", "R2deriv", "*", "cos2phi", "-", "Rphideriv", "*", "2.", "*", "cosphi", "*", "sinphi", "/", "R", "+", "Rderiv", "*", "sin2phi", "/", "R", "+", "phi2deriv", "*", "sin2phi", "/", "R2", "+", "phideriv", "*", "2.", "*", "cosphi", "*", "sinphi", "/", "R2", "tyx", "=", "R2deriv", "*", "sinphi", "*", "cosphi", "+", "Rphideriv", "*", "(", "cos2phi", "-", "sin2phi", ")", "/", "R", "-", "Rderiv", "*", "sinphi", "*", "cosphi", "/", "R", "-", "phi2deriv", "*", "sinphi", "*", "cosphi", "/", "R2", "+", "phideriv", "*", "(", "sin2phi", "-", "cos2phi", ")", "/", "R2", "tzx", "=", "Rzderiv", "*", "cosphi", "-", "Rderiv", "*", "cosphi", "*", "z", "/", "R2", "-", "zphideriv", "*", "sinphi", "/", "R", "+", "phideriv", "*", "2.", "*", "sinphi", "*", "z", "/", "R3", "tyy", "=", "R2deriv", "*", "sin2phi", "+", "Rphideriv", "*", "2.", "*", "cosphi", "*", "sinphi", "/", "R", "+", "Rderiv", "*", "cos2phi", "/", "R", "+", "phi2deriv", "*", "cos2phi", "/", "R2", "-", "phideriv", "*", "2.", "*", "sinphi", "*", "cosphi", "/", "R2", "txy", "=", "tyx", "tzy", "=", "Rzderiv", "*", "sinphi", "-", "Rderiv", "*", "sinphi", "*", "z", "/", "R2", "+", "zphideriv", "*", "cosphi", "/", "R", "-", "phideriv", "*", "2.", "*", "cosphi", "*", "z", "/", "R3", "txz", "=", "Rzderiv", "*", "cosphi", "-", "zphideriv", "*", "sinphi", "/", "R", "tyz", "=", "Rzderiv", "*", "sinphi", "+", "zphideriv", "*", "cosphi", "/", "R", "tzz", "=", "z2deriv", "tij", "=", "-", "nu", ".", "array", "(", "[", "[", "txx", ",", "txy", ",", "txz", "]", ",", "[", "tyx", ",", "tyy", ",", "tyz", "]", ",", "[", "tzx", ",", "tzy", ",", "tzz", "]", "]", ")", "if", "eigenval", ":", "return", "nu", ".", "linalg", ".", "eigvals", "(", "tij", ")", "else", ":", "return", "tij" ]
NAME: ttensor PURPOSE: Calculate the tidal tensor Tij=-d(Psi)(dxidxj) INPUT: R - Galactocentric radius (can be Quantity) z - height (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) eigenval - return eigenvalues if true (optional; boolean) OUTPUT: Tidal Tensor HISTORY: 2018-03-21 - Written - Webb (UofT)
[ "NAME", ":", "ttensor", "PURPOSE", ":", "Calculate", "the", "tidal", "tensor", "Tij", "=", "-", "d", "(", "Psi", ")", "(", "dxidxj", ")", "INPUT", ":", "R", "-", "Galactocentric", "radius", "(", "can", "be", "Quantity", ")", "z", "-", "height", "(", "can", "be", "Quantity", ")", "phi", "-", "azimuth", "(", "optional", ";", "can", "be", "Quantity", ")", "t", "-", "time", "(", "optional", ";", "can", "be", "Quantity", ")", "eigenval", "-", "return", "eigenvalues", "if", "true", "(", "optional", ";", "boolean", ")", "OUTPUT", ":", "Tidal", "Tensor", "HISTORY", ":", "2018", "-", "03", "-", "21", "-", "Written", "-", "Webb", "(", "UofT", ")" ]
python
train
google/apitools
apitools/base/protorpclite/messages.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/protorpclite/messages.py#L1697-L1714
def value_to_message(self, value): """Convert a value instance to a message. Used by serializers to convert Python user types to underlying messages for transmission. Args: value: A value of type self.type. Returns: An instance of type self.message_type. """ if not isinstance(value, self.type): raise EncodeError('Expected type %s, got %s: %r' % (self.type.__name__, type(value).__name__, value)) return value
[ "def", "value_to_message", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "self", ".", "type", ")", ":", "raise", "EncodeError", "(", "'Expected type %s, got %s: %r'", "%", "(", "self", ".", "type", ".", "__name__", ",", "type", "(", "value", ")", ".", "__name__", ",", "value", ")", ")", "return", "value" ]
Convert a value instance to a message. Used by serializers to convert Python user types to underlying messages for transmission. Args: value: A value of type self.type. Returns: An instance of type self.message_type.
[ "Convert", "a", "value", "instance", "to", "a", "message", "." ]
python
train
PyCQA/astroid
astroid/bases.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/bases.py#L120-L151
def _infer_stmts(stmts, context, frame=None): """Return an iterator on statements inferred by each statement in *stmts*.""" inferred = False if context is not None: name = context.lookupname context = context.clone() else: name = None context = contextmod.InferenceContext() for stmt in stmts: if stmt is util.Uninferable: yield stmt inferred = True continue context.lookupname = stmt._infer_name(frame, name) try: for inferred in stmt.infer(context=context): yield inferred inferred = True except exceptions.NameInferenceError: continue except exceptions.InferenceError: yield util.Uninferable inferred = True if not inferred: raise exceptions.InferenceError( "Inference failed for all members of {stmts!r}.", stmts=stmts, frame=frame, context=context, )
[ "def", "_infer_stmts", "(", "stmts", ",", "context", ",", "frame", "=", "None", ")", ":", "inferred", "=", "False", "if", "context", "is", "not", "None", ":", "name", "=", "context", ".", "lookupname", "context", "=", "context", ".", "clone", "(", ")", "else", ":", "name", "=", "None", "context", "=", "contextmod", ".", "InferenceContext", "(", ")", "for", "stmt", "in", "stmts", ":", "if", "stmt", "is", "util", ".", "Uninferable", ":", "yield", "stmt", "inferred", "=", "True", "continue", "context", ".", "lookupname", "=", "stmt", ".", "_infer_name", "(", "frame", ",", "name", ")", "try", ":", "for", "inferred", "in", "stmt", ".", "infer", "(", "context", "=", "context", ")", ":", "yield", "inferred", "inferred", "=", "True", "except", "exceptions", ".", "NameInferenceError", ":", "continue", "except", "exceptions", ".", "InferenceError", ":", "yield", "util", ".", "Uninferable", "inferred", "=", "True", "if", "not", "inferred", ":", "raise", "exceptions", ".", "InferenceError", "(", "\"Inference failed for all members of {stmts!r}.\"", ",", "stmts", "=", "stmts", ",", "frame", "=", "frame", ",", "context", "=", "context", ",", ")" ]
Return an iterator on statements inferred by each statement in *stmts*.
[ "Return", "an", "iterator", "on", "statements", "inferred", "by", "each", "statement", "in", "*", "stmts", "*", "." ]
python
train
inveniosoftware/invenio-marc21
invenio_marc21/serializers/marcxml.py
https://github.com/inveniosoftware/invenio-marc21/blob/b91347b5b000757b6dc9dc1be88d76ca09611905/invenio_marc21/serializers/marcxml.py#L44-L54
def dump(self, obj): """Serialize object with schema. :param obj: The object to serialize. :returns: The object serialized. """ if self.schema_class: obj = self.schema_class().dump(obj).data else: obj = obj['metadata'] return super(MARCXMLSerializer, self).dump(obj)
[ "def", "dump", "(", "self", ",", "obj", ")", ":", "if", "self", ".", "schema_class", ":", "obj", "=", "self", ".", "schema_class", "(", ")", ".", "dump", "(", "obj", ")", ".", "data", "else", ":", "obj", "=", "obj", "[", "'metadata'", "]", "return", "super", "(", "MARCXMLSerializer", ",", "self", ")", ".", "dump", "(", "obj", ")" ]
Serialize object with schema. :param obj: The object to serialize. :returns: The object serialized.
[ "Serialize", "object", "with", "schema", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/urllib3/contrib/pyopenssl.py#L157-L192
def _dnsname_to_stdlib(name): """ Converts a dNSName SubjectAlternativeName field to the form used by the standard library on the given Python version. Cryptography produces a dNSName as a unicode string that was idna-decoded from ASCII bytes. We need to idna-encode that string to get it back, and then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). If the name cannot be idna-encoded then we return None signalling that the name given should be skipped. """ def idna_encode(name): """ Borrowed wholesale from the Python Cryptography Project. It turns out that we can't just safely call `idna.encode`: it can explode for wildcard names. This avoids that problem. """ from pipenv.patched.notpip._vendor import idna try: for prefix in [u'*.', u'.']: if name.startswith(prefix): name = name[len(prefix):] return prefix.encode('ascii') + idna.encode(name) return idna.encode(name) except idna.core.IDNAError: return None name = idna_encode(name) if name is None: return None elif sys.version_info >= (3, 0): name = name.decode('utf-8') return name
[ "def", "_dnsname_to_stdlib", "(", "name", ")", ":", "def", "idna_encode", "(", "name", ")", ":", "\"\"\"\n Borrowed wholesale from the Python Cryptography Project. It turns out\n that we can't just safely call `idna.encode`: it can explode for\n wildcard names. This avoids that problem.\n \"\"\"", "from", "pipenv", ".", "patched", ".", "notpip", ".", "_vendor", "import", "idna", "try", ":", "for", "prefix", "in", "[", "u'*.'", ",", "u'.'", "]", ":", "if", "name", ".", "startswith", "(", "prefix", ")", ":", "name", "=", "name", "[", "len", "(", "prefix", ")", ":", "]", "return", "prefix", ".", "encode", "(", "'ascii'", ")", "+", "idna", ".", "encode", "(", "name", ")", "return", "idna", ".", "encode", "(", "name", ")", "except", "idna", ".", "core", ".", "IDNAError", ":", "return", "None", "name", "=", "idna_encode", "(", "name", ")", "if", "name", "is", "None", ":", "return", "None", "elif", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "name", "=", "name", ".", "decode", "(", "'utf-8'", ")", "return", "name" ]
Converts a dNSName SubjectAlternativeName field to the form used by the standard library on the given Python version. Cryptography produces a dNSName as a unicode string that was idna-decoded from ASCII bytes. We need to idna-encode that string to get it back, and then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8). If the name cannot be idna-encoded then we return None signalling that the name given should be skipped.
[ "Converts", "a", "dNSName", "SubjectAlternativeName", "field", "to", "the", "form", "used", "by", "the", "standard", "library", "on", "the", "given", "Python", "version", "." ]
python
train
The-Politico/politico-civic-geography
geography/models/division.py
https://github.com/The-Politico/politico-civic-geography/blob/032b3ee773b50b65cfe672f230dda772df0f89e0/geography/models/division.py#L84-L99
def add_intersecting(self, division, intersection=None, symm=True): """ Adds paired relationships between intersecting divisions. Optional intersection represents the portion of the area of the related division intersecting this division. You can only specify an intersection on one side of the relationship when adding a peer. """ relationship, created = IntersectRelationship.objects.update_or_create( from_division=self, to_division=division, defaults={"intersection": intersection}, ) if symm: division.add_intersecting(self, None, False) return relationship
[ "def", "add_intersecting", "(", "self", ",", "division", ",", "intersection", "=", "None", ",", "symm", "=", "True", ")", ":", "relationship", ",", "created", "=", "IntersectRelationship", ".", "objects", ".", "update_or_create", "(", "from_division", "=", "self", ",", "to_division", "=", "division", ",", "defaults", "=", "{", "\"intersection\"", ":", "intersection", "}", ",", ")", "if", "symm", ":", "division", ".", "add_intersecting", "(", "self", ",", "None", ",", "False", ")", "return", "relationship" ]
Adds paired relationships between intersecting divisions. Optional intersection represents the portion of the area of the related division intersecting this division. You can only specify an intersection on one side of the relationship when adding a peer.
[ "Adds", "paired", "relationships", "between", "intersecting", "divisions", "." ]
python
train
senaite/senaite.core
bika/lims/browser/client/views/analysisspecs.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/client/views/analysisspecs.py#L33-L41
def before_render(self): """Before template render hook """ # We want to display the nav tabs, so we do NOT want disable_border in # the request. Thus, do not call super.before_render mtool = api.get_tool("portal_membership") if not mtool.checkPermission(AddAnalysisSpec, self.context): del self.context_actions[_("Add")]
[ "def", "before_render", "(", "self", ")", ":", "# We want to display the nav tabs, so we do NOT want disable_border in", "# the request. Thus, do not call super.before_render", "mtool", "=", "api", ".", "get_tool", "(", "\"portal_membership\"", ")", "if", "not", "mtool", ".", "checkPermission", "(", "AddAnalysisSpec", ",", "self", ".", "context", ")", ":", "del", "self", ".", "context_actions", "[", "_", "(", "\"Add\"", ")", "]" ]
Before template render hook
[ "Before", "template", "render", "hook" ]
python
train
saltstack/salt
salt/cloud/clouds/opennebula.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L3373-L3404
def vm_info(name, call=None): ''' Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm ''' if call != 'action': raise SaltCloudSystemExit( 'The vm_info action must be called with -a or --action.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.info(auth, vm_id) if response[0] is False: return response[1] else: info = {} tree = _get_xml(response[1]) info[tree.find('NAME').text] = _xml_to_dict(tree) return info
[ "def", "vm_info", "(", "name", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The vm_info action must be called with -a or --action.'", ")", "server", ",", "user", ",", "password", "=", "_get_xml_rpc", "(", ")", "auth", "=", "':'", ".", "join", "(", "[", "user", ",", "password", "]", ")", "vm_id", "=", "int", "(", "get_vm_id", "(", "kwargs", "=", "{", "'name'", ":", "name", "}", ")", ")", "response", "=", "server", ".", "one", ".", "vm", ".", "info", "(", "auth", ",", "vm_id", ")", "if", "response", "[", "0", "]", "is", "False", ":", "return", "response", "[", "1", "]", "else", ":", "info", "=", "{", "}", "tree", "=", "_get_xml", "(", "response", "[", "1", "]", ")", "info", "[", "tree", ".", "find", "(", "'NAME'", ")", ".", "text", "]", "=", "_xml_to_dict", "(", "tree", ")", "return", "info" ]
Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm
[ "Retrieves", "information", "for", "a", "given", "virtual", "machine", ".", "A", "VM", "name", "must", "be", "supplied", "." ]
python
train
SITools2/pySitools2_1.0
sitools2/core/query.py
https://github.com/SITools2/pySitools2_1.0/blob/acd13198162456ba401a0b923af989bb29feb3b6/sitools2/core/query.py#L228-L235
def _getParameters(self): """Returns the result of this decorator.""" param = self.query._getParameters() key = self.__PATTERN_KEY % (str(self._getIndex())) val = self.__PATTERN_VALUE % (self.__column, self.__value) #self.__column.getColumnAlias() param.update({key:val}) return param
[ "def", "_getParameters", "(", "self", ")", ":", "param", "=", "self", ".", "query", ".", "_getParameters", "(", ")", "key", "=", "self", ".", "__PATTERN_KEY", "%", "(", "str", "(", "self", ".", "_getIndex", "(", ")", ")", ")", "val", "=", "self", ".", "__PATTERN_VALUE", "%", "(", "self", ".", "__column", ",", "self", ".", "__value", ")", "#self.__column.getColumnAlias()", "param", ".", "update", "(", "{", "key", ":", "val", "}", ")", "return", "param" ]
Returns the result of this decorator.
[ "Returns", "the", "result", "of", "this", "decorator", "." ]
python
train
IRC-SPHERE/HyperStream
hyperstream/workflow/workflow_manager.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow_manager.py#L193-L212
def add_workflow(self, workflow, commit=False): """ Add a new workflow and optionally commit it to the database :param workflow: The workflow :param commit: Whether to commit the workflow to the database :type workflow: Workflow :type commit: bool :return: None """ if workflow.workflow_id in self.workflows: raise KeyError("Workflow with id {} already exists".format(workflow.workflow_id)) self.workflows[workflow.workflow_id] = workflow logging.info("Added workflow {} to workflow manager".format(workflow.workflow_id)) # Optionally also save the workflow to database if commit: self.commit_workflow(workflow.workflow_id) else: self.uncommitted_workflows.add(workflow.workflow_id)
[ "def", "add_workflow", "(", "self", ",", "workflow", ",", "commit", "=", "False", ")", ":", "if", "workflow", ".", "workflow_id", "in", "self", ".", "workflows", ":", "raise", "KeyError", "(", "\"Workflow with id {} already exists\"", ".", "format", "(", "workflow", ".", "workflow_id", ")", ")", "self", ".", "workflows", "[", "workflow", ".", "workflow_id", "]", "=", "workflow", "logging", ".", "info", "(", "\"Added workflow {} to workflow manager\"", ".", "format", "(", "workflow", ".", "workflow_id", ")", ")", "# Optionally also save the workflow to database", "if", "commit", ":", "self", ".", "commit_workflow", "(", "workflow", ".", "workflow_id", ")", "else", ":", "self", ".", "uncommitted_workflows", ".", "add", "(", "workflow", ".", "workflow_id", ")" ]
Add a new workflow and optionally commit it to the database :param workflow: The workflow :param commit: Whether to commit the workflow to the database :type workflow: Workflow :type commit: bool :return: None
[ "Add", "a", "new", "workflow", "and", "optionally", "commit", "it", "to", "the", "database", ":", "param", "workflow", ":", "The", "workflow", ":", "param", "commit", ":", "Whether", "to", "commit", "the", "workflow", "to", "the", "database", ":", "type", "workflow", ":", "Workflow", ":", "type", "commit", ":", "bool", ":", "return", ":", "None" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/collections/base_collection.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/collections/base_collection.py#L22-L29
def next_power_of_2(n): """ Return next power of 2 greater than or equal to n """ n -= 1 # greater than OR EQUAL TO n shift = 1 while (n + 1) & n: # n+1 is not a power of 2 yet n |= n >> shift shift *= 2 return max(4, n + 1)
[ "def", "next_power_of_2", "(", "n", ")", ":", "n", "-=", "1", "# greater than OR EQUAL TO n", "shift", "=", "1", "while", "(", "n", "+", "1", ")", "&", "n", ":", "# n+1 is not a power of 2 yet", "n", "|=", "n", ">>", "shift", "shift", "*=", "2", "return", "max", "(", "4", ",", "n", "+", "1", ")" ]
Return next power of 2 greater than or equal to n
[ "Return", "next", "power", "of", "2", "greater", "than", "or", "equal", "to", "n" ]
python
train
FujiMakoto/IPS-Vagrant
ips_vagrant/downloaders/ips.py
https://github.com/FujiMakoto/IPS-Vagrant/blob/7b1d6d095034dd8befb026d9315ecc6494d52269/ips_vagrant/downloaders/ips.py#L71-L93
def _read_zip(self, filepath): """ Read an IPS installation zipfile and return the core version number @type filepath: str @rtype: Version """ with ZipFile(filepath) as zip: namelist = zip.namelist() if re.match(r'^ips_\w{5}/?$', namelist[0]): self.log.debug('Setup directory matched: %s', namelist[0]) else: self.log.error('No setup directory matched') raise BadZipfile('Unrecognized setup file format') versions_path = os.path.join(namelist[0], 'applications/core/data/versions.json') if versions_path not in namelist: raise BadZipfile('Missing versions.json file') versions = json.loads(zip.read(versions_path), object_pairs_hook=OrderedDict) vid = next(reversed(versions)) version = versions[vid] self.log.debug('Version matched: %s', version) return Version(version, vid)
[ "def", "_read_zip", "(", "self", ",", "filepath", ")", ":", "with", "ZipFile", "(", "filepath", ")", "as", "zip", ":", "namelist", "=", "zip", ".", "namelist", "(", ")", "if", "re", ".", "match", "(", "r'^ips_\\w{5}/?$'", ",", "namelist", "[", "0", "]", ")", ":", "self", ".", "log", ".", "debug", "(", "'Setup directory matched: %s'", ",", "namelist", "[", "0", "]", ")", "else", ":", "self", ".", "log", ".", "error", "(", "'No setup directory matched'", ")", "raise", "BadZipfile", "(", "'Unrecognized setup file format'", ")", "versions_path", "=", "os", ".", "path", ".", "join", "(", "namelist", "[", "0", "]", ",", "'applications/core/data/versions.json'", ")", "if", "versions_path", "not", "in", "namelist", ":", "raise", "BadZipfile", "(", "'Missing versions.json file'", ")", "versions", "=", "json", ".", "loads", "(", "zip", ".", "read", "(", "versions_path", ")", ",", "object_pairs_hook", "=", "OrderedDict", ")", "vid", "=", "next", "(", "reversed", "(", "versions", ")", ")", "version", "=", "versions", "[", "vid", "]", "self", ".", "log", ".", "debug", "(", "'Version matched: %s'", ",", "version", ")", "return", "Version", "(", "version", ",", "vid", ")" ]
Read an IPS installation zipfile and return the core version number @type filepath: str @rtype: Version
[ "Read", "an", "IPS", "installation", "zipfile", "and", "return", "the", "core", "version", "number" ]
python
train
invoice-x/invoice2data
src/invoice2data/input/pdftotext.py
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/input/pdftotext.py#L2-L31
def to_text(path): """Wrapper around Poppler pdftotext. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- out : str returns extracted text from pdf Raises ------ EnvironmentError: If pdftotext library is not found """ import subprocess from distutils import spawn # py2 compat if spawn.find_executable("pdftotext"): # shutil.which('pdftotext'): out, err = subprocess.Popen( ["pdftotext", '-layout', '-enc', 'UTF-8', path, '-'], stdout=subprocess.PIPE ).communicate() return out else: raise EnvironmentError( 'pdftotext not installed. Can be downloaded from https://poppler.freedesktop.org/' )
[ "def", "to_text", "(", "path", ")", ":", "import", "subprocess", "from", "distutils", "import", "spawn", "# py2 compat", "if", "spawn", ".", "find_executable", "(", "\"pdftotext\"", ")", ":", "# shutil.which('pdftotext'):", "out", ",", "err", "=", "subprocess", ".", "Popen", "(", "[", "\"pdftotext\"", ",", "'-layout'", ",", "'-enc'", ",", "'UTF-8'", ",", "path", ",", "'-'", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "communicate", "(", ")", "return", "out", "else", ":", "raise", "EnvironmentError", "(", "'pdftotext not installed. Can be downloaded from https://poppler.freedesktop.org/'", ")" ]
Wrapper around Poppler pdftotext. Parameters ---------- path : str path of electronic invoice in PDF Returns ------- out : str returns extracted text from pdf Raises ------ EnvironmentError: If pdftotext library is not found
[ "Wrapper", "around", "Poppler", "pdftotext", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L254-L266
def set_workdir(self, workdir, chroot=False): """ Set the working directory. Cannot be set more than once unless chroot is True """ if not chroot and hasattr(self, "workdir") and self.workdir != workdir: raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir)) # Directories with (input|output|temporary) data. self.workdir = os.path.abspath(workdir) self.indir = Directory(os.path.join(self.workdir, "indata")) self.outdir = Directory(os.path.join(self.workdir, "outdata")) self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata")) self.wdir = Directory(self.workdir)
[ "def", "set_workdir", "(", "self", ",", "workdir", ",", "chroot", "=", "False", ")", ":", "if", "not", "chroot", "and", "hasattr", "(", "self", ",", "\"workdir\"", ")", "and", "self", ".", "workdir", "!=", "workdir", ":", "raise", "ValueError", "(", "\"self.workdir != workdir: %s, %s\"", "%", "(", "self", ".", "workdir", ",", "workdir", ")", ")", "# Directories with (input|output|temporary) data.", "self", ".", "workdir", "=", "os", ".", "path", ".", "abspath", "(", "workdir", ")", "self", ".", "indir", "=", "Directory", "(", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "\"indata\"", ")", ")", "self", ".", "outdir", "=", "Directory", "(", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "\"outdata\"", ")", ")", "self", ".", "tmpdir", "=", "Directory", "(", "os", ".", "path", ".", "join", "(", "self", ".", "workdir", ",", "\"tmpdata\"", ")", ")", "self", ".", "wdir", "=", "Directory", "(", "self", ".", "workdir", ")" ]
Set the working directory. Cannot be set more than once unless chroot is True
[ "Set", "the", "working", "directory", ".", "Cannot", "be", "set", "more", "than", "once", "unless", "chroot", "is", "True" ]
python
train