repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
CalebBell/ht
ht/conv_free_immersed.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_free_immersed.py#L458-L518
def Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr, Gr, turbulent=None): r'''Calculates Nusselt number for natural convection around a vertical isothermal cylinder according to the results of [1]_ and [2]_ correlated by [3]_, as presented in [4]_, [5]_, and [6]_. .. math:: Nu_H = 0.59 Ra_H^{0.25},\; 10^{4} < Ra < 10^{9} Nu_H = 0.13 Ra_H^{1/3.},\; 10^{9} < Ra < 10^{12} Parameters ---------- Pr : float Prandtl number [-] Gr : float Grashof number [-] turbulent : bool or None, optional Whether or not to force the correlation to return the turbulent result; will return the laminar regime if False; leave as None for automatic selection Returns ------- Nu : float Nusselt number, [-] Notes ----- Transition between ranges is not smooth. If outside of range, no warning is given. For ranges under 10^4, a graph is provided, not included here. Examples -------- >>> Nu_vertical_cylinder_McAdams_Weiss_Saunders(.7, 2E10) 313.31849434277973 References ---------- .. [1] Weise, Rudolf. "Warmeubergang durch freie Konvektion an quadratischen Platten." Forschung auf dem Gebiet des Ingenieurwesens A 6, no. 6 (November 1935): 281-92. doi:10.1007/BF02592565. .. [2] Saunders, O. A. "The Effect of Pressure Upon Natural Convection in Air." Proceedings of the Royal Society of London A: Mathematical, Physical and Engineering Sciences 157, no. 891 (November 2, 1936): 278-91. doi:10.1098/rspa.1936.0194. .. [3] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla: Krieger Pub Co, 1985. .. [4] Morgan, V.T., The Overall Convective Heat Transfer from Smooth Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and J.P. Hartnett, V 11, 199-264, 1975. .. [5] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6 (June 1, 2008): 521-36. doi:10.1080/01457630801891557. .. [6] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From Vertical Cylinders." In Natural Convection from Circular Cylinders, 23-42. Springer, 2014. ''' Ra = Pr*Gr if turbulent or (Ra > 1E9 and turbulent is None): return 0.13*Ra**(1/3.) else: return 0.59*Ra**0.25
[ "def", "Nu_vertical_cylinder_McAdams_Weiss_Saunders", "(", "Pr", ",", "Gr", ",", "turbulent", "=", "None", ")", ":", "Ra", "=", "Pr", "*", "Gr", "if", "turbulent", "or", "(", "Ra", ">", "1E9", "and", "turbulent", "is", "None", ")", ":", "return", "0.13", "*", "Ra", "**", "(", "1", "/", "3.", ")", "else", ":", "return", "0.59", "*", "Ra", "**", "0.25" ]
r'''Calculates Nusselt number for natural convection around a vertical isothermal cylinder according to the results of [1]_ and [2]_ correlated by [3]_, as presented in [4]_, [5]_, and [6]_. .. math:: Nu_H = 0.59 Ra_H^{0.25},\; 10^{4} < Ra < 10^{9} Nu_H = 0.13 Ra_H^{1/3.},\; 10^{9} < Ra < 10^{12} Parameters ---------- Pr : float Prandtl number [-] Gr : float Grashof number [-] turbulent : bool or None, optional Whether or not to force the correlation to return the turbulent result; will return the laminar regime if False; leave as None for automatic selection Returns ------- Nu : float Nusselt number, [-] Notes ----- Transition between ranges is not smooth. If outside of range, no warning is given. For ranges under 10^4, a graph is provided, not included here. Examples -------- >>> Nu_vertical_cylinder_McAdams_Weiss_Saunders(.7, 2E10) 313.31849434277973 References ---------- .. [1] Weise, Rudolf. "Warmeubergang durch freie Konvektion an quadratischen Platten." Forschung auf dem Gebiet des Ingenieurwesens A 6, no. 6 (November 1935): 281-92. doi:10.1007/BF02592565. .. [2] Saunders, O. A. "The Effect of Pressure Upon Natural Convection in Air." Proceedings of the Royal Society of London A: Mathematical, Physical and Engineering Sciences 157, no. 891 (November 2, 1936): 278-91. doi:10.1098/rspa.1936.0194. .. [3] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla: Krieger Pub Co, 1985. .. [4] Morgan, V.T., The Overall Convective Heat Transfer from Smooth Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and J.P. Hartnett, V 11, 199-264, 1975. .. [5] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6 (June 1, 2008): 521-36. doi:10.1080/01457630801891557. .. [6] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From Vertical Cylinders." In Natural Convection from Circular Cylinders, 23-42. Springer, 2014.
[ "r", "Calculates", "Nusselt", "number", "for", "natural", "convection", "around", "a", "vertical", "isothermal", "cylinder", "according", "to", "the", "results", "of", "[", "1", "]", "_", "and", "[", "2", "]", "_", "correlated", "by", "[", "3", "]", "_", "as", "presented", "in", "[", "4", "]", "_", "[", "5", "]", "_", "and", "[", "6", "]", "_", "." ]
python
train
odrling/peony-twitter
peony/data_processing.py
https://github.com/odrling/peony-twitter/blob/967f98e16e1889389540f2e6acbf7cc7a1a80203/peony/data_processing.py#L175-L211
async def read(response, loads=loads, encoding=None): """ read the data of the response Parameters ---------- response : aiohttp.ClientResponse response loads : callable json loads function encoding : :obj:`str`, optional character encoding of the response, if set to None aiohttp should guess the right encoding Returns ------- :obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list` the data returned depends on the response """ ctype = response.headers.get('Content-Type', "").lower() try: if "application/json" in ctype: logger.info("decoding data as json") return await response.json(encoding=encoding, loads=loads) if "text" in ctype: logger.info("decoding data as text") return await response.text(encoding=encoding) except (UnicodeDecodeError, json.JSONDecodeError) as exc: data = await response.read() raise exceptions.PeonyDecodeError(response=response, data=data, exception=exc) return await response.read()
[ "async", "def", "read", "(", "response", ",", "loads", "=", "loads", ",", "encoding", "=", "None", ")", ":", "ctype", "=", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "\"\"", ")", ".", "lower", "(", ")", "try", ":", "if", "\"application/json\"", "in", "ctype", ":", "logger", ".", "info", "(", "\"decoding data as json\"", ")", "return", "await", "response", ".", "json", "(", "encoding", "=", "encoding", ",", "loads", "=", "loads", ")", "if", "\"text\"", "in", "ctype", ":", "logger", ".", "info", "(", "\"decoding data as text\"", ")", "return", "await", "response", ".", "text", "(", "encoding", "=", "encoding", ")", "except", "(", "UnicodeDecodeError", ",", "json", ".", "JSONDecodeError", ")", "as", "exc", ":", "data", "=", "await", "response", ".", "read", "(", ")", "raise", "exceptions", ".", "PeonyDecodeError", "(", "response", "=", "response", ",", "data", "=", "data", ",", "exception", "=", "exc", ")", "return", "await", "response", ".", "read", "(", ")" ]
read the data of the response Parameters ---------- response : aiohttp.ClientResponse response loads : callable json loads function encoding : :obj:`str`, optional character encoding of the response, if set to None aiohttp should guess the right encoding Returns ------- :obj:`bytes`, :obj:`str`, :obj:`dict` or :obj:`list` the data returned depends on the response
[ "read", "the", "data", "of", "the", "response" ]
python
valid
chrisspen/weka
weka/arff.py
https://github.com/chrisspen/weka/blob/c86fc4b8eef1afd56f89ec28283bdf9e2fdc453b/weka/arff.py#L438-L445
def save(self, filename=None): """ Save an arff structure to a file. """ filename = filename or self._filename o = open(filename, 'w') o.write(self.write()) o.close()
[ "def", "save", "(", "self", ",", "filename", "=", "None", ")", ":", "filename", "=", "filename", "or", "self", ".", "_filename", "o", "=", "open", "(", "filename", ",", "'w'", ")", "o", ".", "write", "(", "self", ".", "write", "(", ")", ")", "o", ".", "close", "(", ")" ]
Save an arff structure to a file.
[ "Save", "an", "arff", "structure", "to", "a", "file", "." ]
python
train
nccgroup/Scout2
AWSScout2/output/utils.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/output/utils.py#L12-L30
def prompt_4_yes_no(question): """ Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean """ while True: sys.stdout.write(question + ' (y/n)? ') try: choice = raw_input().lower() except: choice = input().lower() if choice == 'yes' or choice == 'y': return True elif choice == 'no' or choice == 'n': return False else: printError('\'%s\' is not a valid answer. Enter \'yes\'(y) or \'no\'(n).' % choice)
[ "def", "prompt_4_yes_no", "(", "question", ")", ":", "while", "True", ":", "sys", ".", "stdout", ".", "write", "(", "question", "+", "' (y/n)? '", ")", "try", ":", "choice", "=", "raw_input", "(", ")", ".", "lower", "(", ")", "except", ":", "choice", "=", "input", "(", ")", ".", "lower", "(", ")", "if", "choice", "==", "'yes'", "or", "choice", "==", "'y'", ":", "return", "True", "elif", "choice", "==", "'no'", "or", "choice", "==", "'n'", ":", "return", "False", "else", ":", "printError", "(", "'\\'%s\\' is not a valid answer. Enter \\'yes\\'(y) or \\'no\\'(n).'", "%", "choice", ")" ]
Ask a question and prompt for yes or no :param question: Question to ask; answer is yes/no :return: :boolean
[ "Ask", "a", "question", "and", "prompt", "for", "yes", "or", "no" ]
python
train
KelSolaar/Umbra
umbra/ui/highlighters.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/highlighters.py#L305-L326
def highlight_text(self, text, start, end): """ Highlights given text. :param text: Text. :type text: QString :param start: Text start index. :type start: int :param end: Text end index. :type end: int :return: Method success. :rtype: bool """ for rule in self.__rules: index = rule.pattern.indexIn(text, start) while index >= start and index < end: length = rule.pattern.matchedLength() format = self.formats.get_format(rule.name) or self.formats.get_format("default") self.setFormat(index, min(length, end - index), format) index = rule.pattern.indexIn(text, index + length) return True
[ "def", "highlight_text", "(", "self", ",", "text", ",", "start", ",", "end", ")", ":", "for", "rule", "in", "self", ".", "__rules", ":", "index", "=", "rule", ".", "pattern", ".", "indexIn", "(", "text", ",", "start", ")", "while", "index", ">=", "start", "and", "index", "<", "end", ":", "length", "=", "rule", ".", "pattern", ".", "matchedLength", "(", ")", "format", "=", "self", ".", "formats", ".", "get_format", "(", "rule", ".", "name", ")", "or", "self", ".", "formats", ".", "get_format", "(", "\"default\"", ")", "self", ".", "setFormat", "(", "index", ",", "min", "(", "length", ",", "end", "-", "index", ")", ",", "format", ")", "index", "=", "rule", ".", "pattern", ".", "indexIn", "(", "text", ",", "index", "+", "length", ")", "return", "True" ]
Highlights given text. :param text: Text. :type text: QString :param start: Text start index. :type start: int :param end: Text end index. :type end: int :return: Method success. :rtype: bool
[ "Highlights", "given", "text", "." ]
python
train
Mangopay/mangopay2-python-sdk
mangopay/utils.py
https://github.com/Mangopay/mangopay2-python-sdk/blob/9bbbc0f797581c9fdf7da5a70879bee6643024b7/mangopay/utils.py#L684-L708
def reraise_as(new_exception_or_type): """ Obtained from https://github.com/dcramer/reraise/blob/master/src/reraise.py >>> try: >>> do_something_crazy() >>> except Exception: >>> reraise_as(UnhandledException) """ __traceback_hide__ = True # NOQA e_type, e_value, e_traceback = sys.exc_info() if inspect.isclass(new_exception_or_type): new_type = new_exception_or_type new_exception = new_exception_or_type() else: new_type = type(new_exception_or_type) new_exception = new_exception_or_type new_exception.__cause__ = e_value try: six.reraise(new_type, new_exception, e_traceback) finally: del e_traceback
[ "def", "reraise_as", "(", "new_exception_or_type", ")", ":", "__traceback_hide__", "=", "True", "# NOQA", "e_type", ",", "e_value", ",", "e_traceback", "=", "sys", ".", "exc_info", "(", ")", "if", "inspect", ".", "isclass", "(", "new_exception_or_type", ")", ":", "new_type", "=", "new_exception_or_type", "new_exception", "=", "new_exception_or_type", "(", ")", "else", ":", "new_type", "=", "type", "(", "new_exception_or_type", ")", "new_exception", "=", "new_exception_or_type", "new_exception", ".", "__cause__", "=", "e_value", "try", ":", "six", ".", "reraise", "(", "new_type", ",", "new_exception", ",", "e_traceback", ")", "finally", ":", "del", "e_traceback" ]
Obtained from https://github.com/dcramer/reraise/blob/master/src/reraise.py >>> try: >>> do_something_crazy() >>> except Exception: >>> reraise_as(UnhandledException)
[ "Obtained", "from", "https", ":", "//", "github", ".", "com", "/", "dcramer", "/", "reraise", "/", "blob", "/", "master", "/", "src", "/", "reraise", ".", "py", ">>>", "try", ":", ">>>", "do_something_crazy", "()", ">>>", "except", "Exception", ":", ">>>", "reraise_as", "(", "UnhandledException", ")" ]
python
train
talkincode/txradius
txradius/radius/packet.py
https://github.com/talkincode/txradius/blob/b86fdbc9be41183680b82b07d3a8e8ea10926e01/txradius/radius/packet.py#L237-L254
def ReplyPacket(self): """Create a ready-to-transmit authentication reply packet. Returns a RADIUS packet which can be directly transmitted to a RADIUS server. This differs with Packet() in how the authenticator is calculated. :return: raw packet :rtype: string """ assert(self.authenticator) assert(self.secret is not None) attr = self._PktEncodeAttributes() header = struct.pack('!BBH', self.code, self.id, (20 + len(attr))) authenticator = md5_constructor(header[0:4] + self.authenticator + attr + self.secret).digest() return header + authenticator + attr
[ "def", "ReplyPacket", "(", "self", ")", ":", "assert", "(", "self", ".", "authenticator", ")", "assert", "(", "self", ".", "secret", "is", "not", "None", ")", "attr", "=", "self", ".", "_PktEncodeAttributes", "(", ")", "header", "=", "struct", ".", "pack", "(", "'!BBH'", ",", "self", ".", "code", ",", "self", ".", "id", ",", "(", "20", "+", "len", "(", "attr", ")", ")", ")", "authenticator", "=", "md5_constructor", "(", "header", "[", "0", ":", "4", "]", "+", "self", ".", "authenticator", "+", "attr", "+", "self", ".", "secret", ")", ".", "digest", "(", ")", "return", "header", "+", "authenticator", "+", "attr" ]
Create a ready-to-transmit authentication reply packet. Returns a RADIUS packet which can be directly transmitted to a RADIUS server. This differs with Packet() in how the authenticator is calculated. :return: raw packet :rtype: string
[ "Create", "a", "ready", "-", "to", "-", "transmit", "authentication", "reply", "packet", ".", "Returns", "a", "RADIUS", "packet", "which", "can", "be", "directly", "transmitted", "to", "a", "RADIUS", "server", ".", "This", "differs", "with", "Packet", "()", "in", "how", "the", "authenticator", "is", "calculated", "." ]
python
train
MarcoFavorito/flloat
flloat/parser/ldlf.py
https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ldlf.py#L143-L167
def p_path(self, p): """path : path UNION path | path SEQ path | path STAR | temp_formula TEST | propositional""" if len(p)==2: p[0] = RegExpPropositional(p[1]) elif len(p)==3: if p[2]==Symbols.PATH_TEST.value: p[0] = RegExpTest(p[1]) elif p[2] == Symbols.PATH_STAR.value: p[0] = RegExpStar(p[1]) else: raise ValueError elif len(p)==4: if p[2]==Symbols.PATH_UNION.value: p[0] = RegExpUnion([p[1], p[3]]) elif p[2] == Symbols.PATH_SEQUENCE.value: p[0] = RegExpSequence([p[1], p[3]]) else: raise ValueError else: raise ValueError
[ "def", "p_path", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "RegExpPropositional", "(", "p", "[", "1", "]", ")", "elif", "len", "(", "p", ")", "==", "3", ":", "if", "p", "[", "2", "]", "==", "Symbols", ".", "PATH_TEST", ".", "value", ":", "p", "[", "0", "]", "=", "RegExpTest", "(", "p", "[", "1", "]", ")", "elif", "p", "[", "2", "]", "==", "Symbols", ".", "PATH_STAR", ".", "value", ":", "p", "[", "0", "]", "=", "RegExpStar", "(", "p", "[", "1", "]", ")", "else", ":", "raise", "ValueError", "elif", "len", "(", "p", ")", "==", "4", ":", "if", "p", "[", "2", "]", "==", "Symbols", ".", "PATH_UNION", ".", "value", ":", "p", "[", "0", "]", "=", "RegExpUnion", "(", "[", "p", "[", "1", "]", ",", "p", "[", "3", "]", "]", ")", "elif", "p", "[", "2", "]", "==", "Symbols", ".", "PATH_SEQUENCE", ".", "value", ":", "p", "[", "0", "]", "=", "RegExpSequence", "(", "[", "p", "[", "1", "]", ",", "p", "[", "3", "]", "]", ")", "else", ":", "raise", "ValueError", "else", ":", "raise", "ValueError" ]
path : path UNION path | path SEQ path | path STAR | temp_formula TEST | propositional
[ "path", ":", "path", "UNION", "path", "|", "path", "SEQ", "path", "|", "path", "STAR", "|", "temp_formula", "TEST", "|", "propositional" ]
python
train
google/grr
grr/core/grr_response_core/lib/config_lib.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/config_lib.py#L422-L430
def SaveDataToFD(self, raw_data, fd): """Merge the raw data with the config file and store it.""" for key, value in iteritems(raw_data): # TODO(hanuszczak): Incorrect type specification for `set`. # pytype: disable=wrong-arg-types self.set("", key, value=value) # pytype: enable=wrong-arg-types self.write(fd)
[ "def", "SaveDataToFD", "(", "self", ",", "raw_data", ",", "fd", ")", ":", "for", "key", ",", "value", "in", "iteritems", "(", "raw_data", ")", ":", "# TODO(hanuszczak): Incorrect type specification for `set`.", "# pytype: disable=wrong-arg-types", "self", ".", "set", "(", "\"\"", ",", "key", ",", "value", "=", "value", ")", "# pytype: enable=wrong-arg-types", "self", ".", "write", "(", "fd", ")" ]
Merge the raw data with the config file and store it.
[ "Merge", "the", "raw", "data", "with", "the", "config", "file", "and", "store", "it", "." ]
python
train
tortoise/tortoise-orm
tortoise/queryset.py
https://github.com/tortoise/tortoise-orm/blob/7d16457731905e19d4d06ccd5b4ea16d4a9447b2/tortoise/queryset.py#L319-L330
def update(self, **kwargs) -> "UpdateQuery": """ Update all objects in QuerySet with given kwargs. """ return UpdateQuery( db=self._db, model=self.model, update_kwargs=kwargs, q_objects=self._q_objects, annotations=self._annotations, custom_filters=self._custom_filters, )
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", "->", "\"UpdateQuery\"", ":", "return", "UpdateQuery", "(", "db", "=", "self", ".", "_db", ",", "model", "=", "self", ".", "model", ",", "update_kwargs", "=", "kwargs", ",", "q_objects", "=", "self", ".", "_q_objects", ",", "annotations", "=", "self", ".", "_annotations", ",", "custom_filters", "=", "self", ".", "_custom_filters", ",", ")" ]
Update all objects in QuerySet with given kwargs.
[ "Update", "all", "objects", "in", "QuerySet", "with", "given", "kwargs", "." ]
python
train
digidotcom/python-devicecloud
devicecloud/streams.py
https://github.com/digidotcom/python-devicecloud/blob/32529684a348a7830a269c32601604c78036bcb8/devicecloud/streams.py#L499-L526
def set_location(self, location): """Set the location for this data point The location must be either None (if no location data is known) or a 3-tuple of floating point values in the form (latitude-degrees, longitude-degrees, altitude-meters). """ if location is None: self._location = location elif isinstance(location, *six.string_types): # from device cloud, convert from csv parts = str(location).split(",") if len(parts) == 3: self._location = tuple(map(float, parts)) return else: raise ValueError("Location string %r has unexpected format" % location) # TODO: could maybe try to allow any iterable but this covers the most common cases elif (isinstance(location, (tuple, list)) and len(location) == 3 and all([isinstance(x, (float, six.integer_types)) for x in location])): self._location = tuple(map(float, location)) # coerce ints to float else: raise TypeError("Location must be None or 3-tuple of floats") self._location = location
[ "def", "set_location", "(", "self", ",", "location", ")", ":", "if", "location", "is", "None", ":", "self", ".", "_location", "=", "location", "elif", "isinstance", "(", "location", ",", "*", "six", ".", "string_types", ")", ":", "# from device cloud, convert from csv", "parts", "=", "str", "(", "location", ")", ".", "split", "(", "\",\"", ")", "if", "len", "(", "parts", ")", "==", "3", ":", "self", ".", "_location", "=", "tuple", "(", "map", "(", "float", ",", "parts", ")", ")", "return", "else", ":", "raise", "ValueError", "(", "\"Location string %r has unexpected format\"", "%", "location", ")", "# TODO: could maybe try to allow any iterable but this covers the most common cases", "elif", "(", "isinstance", "(", "location", ",", "(", "tuple", ",", "list", ")", ")", "and", "len", "(", "location", ")", "==", "3", "and", "all", "(", "[", "isinstance", "(", "x", ",", "(", "float", ",", "six", ".", "integer_types", ")", ")", "for", "x", "in", "location", "]", ")", ")", ":", "self", ".", "_location", "=", "tuple", "(", "map", "(", "float", ",", "location", ")", ")", "# coerce ints to float", "else", ":", "raise", "TypeError", "(", "\"Location must be None or 3-tuple of floats\"", ")", "self", ".", "_location", "=", "location" ]
Set the location for this data point The location must be either None (if no location data is known) or a 3-tuple of floating point values in the form (latitude-degrees, longitude-degrees, altitude-meters).
[ "Set", "the", "location", "for", "this", "data", "point" ]
python
train
jazzband/django-pipeline
pipeline/forms.py
https://github.com/jazzband/django-pipeline/blob/3cd2f93bb47bf8d34447e13ff691f7027e7b07a2/pipeline/forms.py#L192-L233
def _get_media_files(cls, packager, media_packages, media_type, extra_files): """Return source or output media files for a list of packages. This will go through the media files belonging to the provided list of packages referenced in a Media class and return the output files (if Pipeline is enabled) or the source files (if not enabled). Args: packager (pipeline.packager.Packager): The packager responsible for media compilation for this type of package. media_packages (list of unicode): The list of media packages referenced in Media to compile or return. extra_files (list of unicode): The list of extra files to include in the result. This would be the list stored in the Media class's original :py:attr:`css` or :py:attr:`js` attributes. Returns: list: The list of media files for the given packages. """ source_files = list(extra_files) if (not settings.PIPELINE_ENABLED and settings.PIPELINE_COLLECTOR_ENABLED): default_collector.collect() for media_package in media_packages: package = packager.package_for(media_type, media_package) if settings.PIPELINE_ENABLED: source_files.append( staticfiles_storage.url(package.output_filename)) else: source_files += packager.compile(package.paths) return source_files
[ "def", "_get_media_files", "(", "cls", ",", "packager", ",", "media_packages", ",", "media_type", ",", "extra_files", ")", ":", "source_files", "=", "list", "(", "extra_files", ")", "if", "(", "not", "settings", ".", "PIPELINE_ENABLED", "and", "settings", ".", "PIPELINE_COLLECTOR_ENABLED", ")", ":", "default_collector", ".", "collect", "(", ")", "for", "media_package", "in", "media_packages", ":", "package", "=", "packager", ".", "package_for", "(", "media_type", ",", "media_package", ")", "if", "settings", ".", "PIPELINE_ENABLED", ":", "source_files", ".", "append", "(", "staticfiles_storage", ".", "url", "(", "package", ".", "output_filename", ")", ")", "else", ":", "source_files", "+=", "packager", ".", "compile", "(", "package", ".", "paths", ")", "return", "source_files" ]
Return source or output media files for a list of packages. This will go through the media files belonging to the provided list of packages referenced in a Media class and return the output files (if Pipeline is enabled) or the source files (if not enabled). Args: packager (pipeline.packager.Packager): The packager responsible for media compilation for this type of package. media_packages (list of unicode): The list of media packages referenced in Media to compile or return. extra_files (list of unicode): The list of extra files to include in the result. This would be the list stored in the Media class's original :py:attr:`css` or :py:attr:`js` attributes. Returns: list: The list of media files for the given packages.
[ "Return", "source", "or", "output", "media", "files", "for", "a", "list", "of", "packages", "." ]
python
train
CodeReclaimers/neat-python
neat/distributed.py
https://github.com/CodeReclaimers/neat-python/blob/e3dbe77c0d776eae41d598e6439e6ac02ab90b18/neat/distributed.py#L398-L426
def start(self, exit_on_stop=True, secondary_wait=0, reconnect=False): """ If the DistributedEvaluator is in primary mode, starts the manager process and returns. In this case, the ``exit_on_stop`` argument will be ignored. If the DistributedEvaluator is in secondary mode, it connects to the manager and waits for tasks. If in secondary mode and ``exit_on_stop`` is True, sys.exit() will be called when the connection is lost. ``secondary_wait`` specifies the time (in seconds) to sleep before actually starting when in secondary mode. If 'reconnect' is True, the secondary nodes will try to reconnect when the connection is lost. In this case, sys.exit() will only be called when 'exit_on_stop' is True and the primary node send a forced shutdown command. """ if self.started: raise RuntimeError("DistributedEvaluator already started!") self.started = True if self.mode == MODE_PRIMARY: self._start_primary() elif self.mode == MODE_SECONDARY: time.sleep(secondary_wait) self._start_secondary() self._secondary_loop(reconnect=reconnect) if exit_on_stop: sys.exit(0) else: raise ValueError("Invalid mode {!r}!".format(self.mode))
[ "def", "start", "(", "self", ",", "exit_on_stop", "=", "True", ",", "secondary_wait", "=", "0", ",", "reconnect", "=", "False", ")", ":", "if", "self", ".", "started", ":", "raise", "RuntimeError", "(", "\"DistributedEvaluator already started!\"", ")", "self", ".", "started", "=", "True", "if", "self", ".", "mode", "==", "MODE_PRIMARY", ":", "self", ".", "_start_primary", "(", ")", "elif", "self", ".", "mode", "==", "MODE_SECONDARY", ":", "time", ".", "sleep", "(", "secondary_wait", ")", "self", ".", "_start_secondary", "(", ")", "self", ".", "_secondary_loop", "(", "reconnect", "=", "reconnect", ")", "if", "exit_on_stop", ":", "sys", ".", "exit", "(", "0", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid mode {!r}!\"", ".", "format", "(", "self", ".", "mode", ")", ")" ]
If the DistributedEvaluator is in primary mode, starts the manager process and returns. In this case, the ``exit_on_stop`` argument will be ignored. If the DistributedEvaluator is in secondary mode, it connects to the manager and waits for tasks. If in secondary mode and ``exit_on_stop`` is True, sys.exit() will be called when the connection is lost. ``secondary_wait`` specifies the time (in seconds) to sleep before actually starting when in secondary mode. If 'reconnect' is True, the secondary nodes will try to reconnect when the connection is lost. In this case, sys.exit() will only be called when 'exit_on_stop' is True and the primary node send a forced shutdown command.
[ "If", "the", "DistributedEvaluator", "is", "in", "primary", "mode", "starts", "the", "manager", "process", "and", "returns", ".", "In", "this", "case", "the", "exit_on_stop", "argument", "will", "be", "ignored", ".", "If", "the", "DistributedEvaluator", "is", "in", "secondary", "mode", "it", "connects", "to", "the", "manager", "and", "waits", "for", "tasks", ".", "If", "in", "secondary", "mode", "and", "exit_on_stop", "is", "True", "sys", ".", "exit", "()", "will", "be", "called", "when", "the", "connection", "is", "lost", ".", "secondary_wait", "specifies", "the", "time", "(", "in", "seconds", ")", "to", "sleep", "before", "actually", "starting", "when", "in", "secondary", "mode", ".", "If", "reconnect", "is", "True", "the", "secondary", "nodes", "will", "try", "to", "reconnect", "when", "the", "connection", "is", "lost", ".", "In", "this", "case", "sys", ".", "exit", "()", "will", "only", "be", "called", "when", "exit_on_stop", "is", "True", "and", "the", "primary", "node", "send", "a", "forced", "shutdown", "command", "." ]
python
train
ninuxorg/nodeshot
nodeshot/core/nodes/models/node.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/nodes/models/node.py#L88-L116
def save(self, *args, **kwargs): """ Custom save method does the following things: * converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point) * intercepts changes to status and fires node_status_changed signal * set default status """ # geometry collection check if isinstance(self.geometry, GeometryCollection) and 0 < len(self.geometry) < 2: self.geometry = self.geometry[0] # if no status specified if not self.status and not self.status_id: try: self.status = Status.objects.filter(is_default=True)[0] except IndexError: pass super(Node, self).save(*args, **kwargs) # if status of a node changes if (self.status and self._current_status and self.status.id != self._current_status) or\ (self.status_id and self._current_status and self.status_id != self._current_status): # send django signal node_status_changed.send( sender=self.__class__, instance=self, old_status=Status.objects.get(pk=self._current_status), new_status=self.status ) # update _current_status self._current_status = self.status_id
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# geometry collection check", "if", "isinstance", "(", "self", ".", "geometry", ",", "GeometryCollection", ")", "and", "0", "<", "len", "(", "self", ".", "geometry", ")", "<", "2", ":", "self", ".", "geometry", "=", "self", ".", "geometry", "[", "0", "]", "# if no status specified", "if", "not", "self", ".", "status", "and", "not", "self", ".", "status_id", ":", "try", ":", "self", ".", "status", "=", "Status", ".", "objects", ".", "filter", "(", "is_default", "=", "True", ")", "[", "0", "]", "except", "IndexError", ":", "pass", "super", "(", "Node", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# if status of a node changes", "if", "(", "self", ".", "status", "and", "self", ".", "_current_status", "and", "self", ".", "status", ".", "id", "!=", "self", ".", "_current_status", ")", "or", "(", "self", ".", "status_id", "and", "self", ".", "_current_status", "and", "self", ".", "status_id", "!=", "self", ".", "_current_status", ")", ":", "# send django signal", "node_status_changed", ".", "send", "(", "sender", "=", "self", ".", "__class__", ",", "instance", "=", "self", ",", "old_status", "=", "Status", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "_current_status", ")", ",", "new_status", "=", "self", ".", "status", ")", "# update _current_status", "self", ".", "_current_status", "=", "self", ".", "status_id" ]
Custom save method does the following things: * converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point) * intercepts changes to status and fires node_status_changed signal * set default status
[ "Custom", "save", "method", "does", "the", "following", "things", ":", "*", "converts", "geometry", "collections", "of", "just", "1", "item", "to", "that", "item", "(", "eg", ":", "a", "collection", "of", "1", "Point", "becomes", "a", "Point", ")", "*", "intercepts", "changes", "to", "status", "and", "fires", "node_status_changed", "signal", "*", "set", "default", "status" ]
python
train
sixty-north/added-value
source/added_value/tabulator.py
https://github.com/sixty-north/added-value/blob/7ae75b56712822b074fc874612d6058bb7d16a1e/source/added_value/tabulator.py#L402-L423
def strip_hidden(key_tuples, visibilities): """Filter each tuple according to visibility. Args: key_tuples: A sequence of tuples of equal length (i.e. rectangular) visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples. Returns: A sequence equal in length to key_tuples where the items are tuples with a length corresponding to the number of items in visibility which are True. """ result = [] for key_tuple in key_tuples: if len(key_tuple) != len(visibilities): raise ValueError( "length of key tuple {} is not equal to length of visibilities {}".format( key_tuple, visibilities ) ) filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible) result.append(filtered_tuple) return result
[ "def", "strip_hidden", "(", "key_tuples", ",", "visibilities", ")", ":", "result", "=", "[", "]", "for", "key_tuple", "in", "key_tuples", ":", "if", "len", "(", "key_tuple", ")", "!=", "len", "(", "visibilities", ")", ":", "raise", "ValueError", "(", "\"length of key tuple {} is not equal to length of visibilities {}\"", ".", "format", "(", "key_tuple", ",", "visibilities", ")", ")", "filtered_tuple", "=", "tuple", "(", "item", "for", "item", ",", "visible", "in", "zip", "(", "key_tuple", ",", "visibilities", ")", "if", "visible", ")", "result", ".", "append", "(", "filtered_tuple", ")", "return", "result" ]
Filter each tuple according to visibility. Args: key_tuples: A sequence of tuples of equal length (i.e. rectangular) visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples. Returns: A sequence equal in length to key_tuples where the items are tuples with a length corresponding to the number of items in visibility which are True.
[ "Filter", "each", "tuple", "according", "to", "visibility", "." ]
python
train
quora/qcore
qcore/decorators.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/decorators.py#L182-L203
def deprecated(replacement_description): """States that method is deprecated. :param replacement_description: Describes what must be used instead. :return: the original method with modified docstring. """ def decorate(fn_or_class): if isinstance(fn_or_class, type): pass # Can't change __doc__ of type objects else: try: fn_or_class.__doc__ = "This API point is obsolete. %s\n\n%s" % ( replacement_description, fn_or_class.__doc__, ) except AttributeError: pass # For Cython method descriptors, etc. return fn_or_class return decorate
[ "def", "deprecated", "(", "replacement_description", ")", ":", "def", "decorate", "(", "fn_or_class", ")", ":", "if", "isinstance", "(", "fn_or_class", ",", "type", ")", ":", "pass", "# Can't change __doc__ of type objects", "else", ":", "try", ":", "fn_or_class", ".", "__doc__", "=", "\"This API point is obsolete. %s\\n\\n%s\"", "%", "(", "replacement_description", ",", "fn_or_class", ".", "__doc__", ",", ")", "except", "AttributeError", ":", "pass", "# For Cython method descriptors, etc.", "return", "fn_or_class", "return", "decorate" ]
States that method is deprecated. :param replacement_description: Describes what must be used instead. :return: the original method with modified docstring.
[ "States", "that", "method", "is", "deprecated", "." ]
python
train
Kortemme-Lab/klab
klab/deprecated/rosettadb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/deprecated/rosettadb.py#L501-L514
def _getSortedString(o): """ Returns a string describing o, sorting the contents (case-insensitive on keys) if o is a dict. """ # todo: replace this with something like pprint on Python upgrade # We assume here that any container type is either list or tuple which may not always hold if isinstance(o, (dict)): pkeys = sorted(o.keys(), key=_lowercaseToStr) l = [] for k in pkeys: l.append(str(k) + ":" + _getSortedString(o[k])) return "{" + join(l, ",") + "}" else: return str(o)
[ "def", "_getSortedString", "(", "o", ")", ":", "# todo: replace this with something like pprint on Python upgrade", "# We assume here that any container type is either list or tuple which may not always hold", "if", "isinstance", "(", "o", ",", "(", "dict", ")", ")", ":", "pkeys", "=", "sorted", "(", "o", ".", "keys", "(", ")", ",", "key", "=", "_lowercaseToStr", ")", "l", "=", "[", "]", "for", "k", "in", "pkeys", ":", "l", ".", "append", "(", "str", "(", "k", ")", "+", "\":\"", "+", "_getSortedString", "(", "o", "[", "k", "]", ")", ")", "return", "\"{\"", "+", "join", "(", "l", ",", "\",\"", ")", "+", "\"}\"", "else", ":", "return", "str", "(", "o", ")" ]
Returns a string describing o, sorting the contents (case-insensitive on keys) if o is a dict.
[ "Returns", "a", "string", "describing", "o", "sorting", "the", "contents", "(", "case", "-", "insensitive", "on", "keys", ")", "if", "o", "is", "a", "dict", "." ]
python
train
kensho-technologies/graphql-compiler
setup.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/setup.py#L13-L20
def read_file(filename): """Read package file as text to get name and version""" # intentionally *not* adding an encoding option to open # see here: # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 here = os.path.abspath(os.path.dirname(__file__)) with codecs.open(os.path.join(here, 'graphql_compiler', filename), 'r') as f: return f.read()
[ "def", "read_file", "(", "filename", ")", ":", "# intentionally *not* adding an encoding option to open", "# see here:", "# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690", "here", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "here", ",", "'graphql_compiler'", ",", "filename", ")", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Read package file as text to get name and version
[ "Read", "package", "file", "as", "text", "to", "get", "name", "and", "version" ]
python
train
jaraco/svg.charts
svg/charts/graph.py
https://github.com/jaraco/svg.charts/blob/23053497b3f1af4e760f355050107ae3bc05909d/svg/charts/graph.py#L209-L219
def calculate_top_margin(self): """ Calculate the margin in pixels above the plot area, setting border_top. """ self.border_top = 5 if self.show_graph_title: self.border_top += self.title_font_size self.border_top += 5 if self.show_graph_subtitle: self.border_top += self.subtitle_font_size
[ "def", "calculate_top_margin", "(", "self", ")", ":", "self", ".", "border_top", "=", "5", "if", "self", ".", "show_graph_title", ":", "self", ".", "border_top", "+=", "self", ".", "title_font_size", "self", ".", "border_top", "+=", "5", "if", "self", ".", "show_graph_subtitle", ":", "self", ".", "border_top", "+=", "self", ".", "subtitle_font_size" ]
Calculate the margin in pixels above the plot area, setting border_top.
[ "Calculate", "the", "margin", "in", "pixels", "above", "the", "plot", "area", "setting", "border_top", "." ]
python
test
voidpp/vcp
vcp/project_handler_base.py
https://github.com/voidpp/vcp/blob/5538cdb7b43029db9aac9edad823cd87afd89ab5/vcp/project_handler_base.py#L18-L37
def create(self, uri, local_path): """Create a project handler Args: uri (str): schema://something formatted uri local_path (str): the project configs directory Return: ProjectHandler derived class instance """ matches = self.schema_pattern.search(uri) if not matches: logger.error("Unknown uri schema: '%s'. Added schemas: %s", uri, list(self.handlers.keys())) return None schema = matches.group(1) url = matches.group(2) return self.handlers[schema](url, local_path)
[ "def", "create", "(", "self", ",", "uri", ",", "local_path", ")", ":", "matches", "=", "self", ".", "schema_pattern", ".", "search", "(", "uri", ")", "if", "not", "matches", ":", "logger", ".", "error", "(", "\"Unknown uri schema: '%s'. Added schemas: %s\"", ",", "uri", ",", "list", "(", "self", ".", "handlers", ".", "keys", "(", ")", ")", ")", "return", "None", "schema", "=", "matches", ".", "group", "(", "1", ")", "url", "=", "matches", ".", "group", "(", "2", ")", "return", "self", ".", "handlers", "[", "schema", "]", "(", "url", ",", "local_path", ")" ]
Create a project handler Args: uri (str): schema://something formatted uri local_path (str): the project configs directory Return: ProjectHandler derived class instance
[ "Create", "a", "project", "handler" ]
python
test
noxdafox/vminspect
vminspect/comparator.py
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/comparator.py#L105-L126
def extract(self, disk, files, path='.'): """Extracts the given files from the given disk. Disk must be an integer (1 or 2) indicating from which of the two disks to extract. Files must be a list of dictionaries containing the keys 'path' and 'sha1'. Files will be extracted in path and will be named with their sha1. Returns a dictionary. {'extracted_files': [<sha1>, <sha1>], 'extraction_errors': [<sha1>, <sha1>]} """ self.logger.debug("Extracting files.") extracted_files, failed = self._extract_files(disk, files, path) return {'extracted_files': [f for f in extracted_files.keys()], 'extraction_errors': [f for f in failed.keys()]}
[ "def", "extract", "(", "self", ",", "disk", ",", "files", ",", "path", "=", "'.'", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Extracting files.\"", ")", "extracted_files", ",", "failed", "=", "self", ".", "_extract_files", "(", "disk", ",", "files", ",", "path", ")", "return", "{", "'extracted_files'", ":", "[", "f", "for", "f", "in", "extracted_files", ".", "keys", "(", ")", "]", ",", "'extraction_errors'", ":", "[", "f", "for", "f", "in", "failed", ".", "keys", "(", ")", "]", "}" ]
Extracts the given files from the given disk. Disk must be an integer (1 or 2) indicating from which of the two disks to extract. Files must be a list of dictionaries containing the keys 'path' and 'sha1'. Files will be extracted in path and will be named with their sha1. Returns a dictionary. {'extracted_files': [<sha1>, <sha1>], 'extraction_errors': [<sha1>, <sha1>]}
[ "Extracts", "the", "given", "files", "from", "the", "given", "disk", "." ]
python
train
molmod/molmod
molmod/quaternions.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/quaternions.py#L40-L47
def quaternion_product(quat1, quat2): """Return the quaternion product of the two arguments""" return np.array([ quat1[0]*quat2[0] - np.dot(quat1[1:], quat2[1:]), quat1[0]*quat2[1] + quat2[0]*quat1[1] + quat1[2]*quat2[3] - quat1[3]*quat2[2], quat1[0]*quat2[2] + quat2[0]*quat1[2] + quat1[3]*quat2[1] - quat1[1]*quat2[3], quat1[0]*quat2[3] + quat2[0]*quat1[3] + quat1[1]*quat2[2] - quat1[2]*quat2[1] ], float)
[ "def", "quaternion_product", "(", "quat1", ",", "quat2", ")", ":", "return", "np", ".", "array", "(", "[", "quat1", "[", "0", "]", "*", "quat2", "[", "0", "]", "-", "np", ".", "dot", "(", "quat1", "[", "1", ":", "]", ",", "quat2", "[", "1", ":", "]", ")", ",", "quat1", "[", "0", "]", "*", "quat2", "[", "1", "]", "+", "quat2", "[", "0", "]", "*", "quat1", "[", "1", "]", "+", "quat1", "[", "2", "]", "*", "quat2", "[", "3", "]", "-", "quat1", "[", "3", "]", "*", "quat2", "[", "2", "]", ",", "quat1", "[", "0", "]", "*", "quat2", "[", "2", "]", "+", "quat2", "[", "0", "]", "*", "quat1", "[", "2", "]", "+", "quat1", "[", "3", "]", "*", "quat2", "[", "1", "]", "-", "quat1", "[", "1", "]", "*", "quat2", "[", "3", "]", ",", "quat1", "[", "0", "]", "*", "quat2", "[", "3", "]", "+", "quat2", "[", "0", "]", "*", "quat1", "[", "3", "]", "+", "quat1", "[", "1", "]", "*", "quat2", "[", "2", "]", "-", "quat1", "[", "2", "]", "*", "quat2", "[", "1", "]", "]", ",", "float", ")" ]
Return the quaternion product of the two arguments
[ "Return", "the", "quaternion", "product", "of", "the", "two", "arguments" ]
python
train
ejhigson/nestcheck
nestcheck/pandas_functions.py
https://github.com/ejhigson/nestcheck/blob/29151c314deb89746fd674f27f6ce54b77603189/nestcheck/pandas_functions.py#L39-L63
def summary_df_from_list(results_list, names, **kwargs): """Make a panda data frame of the mean and std devs of each element of a list of 1d arrays, including the uncertainties on the values. This just converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_list: list of 1d numpy arrays Must have same length as names. names: list of strs Names for the output df's columns. kwargs: dict, optional Keyword arguments to pass to summary_df. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details. """ for arr in results_list: assert arr.shape == (len(names),) df = pd.DataFrame(np.stack(results_list, axis=0)) df.columns = names return summary_df(df, **kwargs)
[ "def", "summary_df_from_list", "(", "results_list", ",", "names", ",", "*", "*", "kwargs", ")", ":", "for", "arr", "in", "results_list", ":", "assert", "arr", ".", "shape", "==", "(", "len", "(", "names", ")", ",", ")", "df", "=", "pd", ".", "DataFrame", "(", "np", ".", "stack", "(", "results_list", ",", "axis", "=", "0", ")", ")", "df", ".", "columns", "=", "names", "return", "summary_df", "(", "df", ",", "*", "*", "kwargs", ")" ]
Make a panda data frame of the mean and std devs of each element of a list of 1d arrays, including the uncertainties on the values. This just converts the array to a DataFrame and calls summary_df on it. Parameters ---------- results_list: list of 1d numpy arrays Must have same length as names. names: list of strs Names for the output df's columns. kwargs: dict, optional Keyword arguments to pass to summary_df. Returns ------- df: MultiIndex DataFrame See summary_df docstring for more details.
[ "Make", "a", "panda", "data", "frame", "of", "the", "mean", "and", "std", "devs", "of", "each", "element", "of", "a", "list", "of", "1d", "arrays", "including", "the", "uncertainties", "on", "the", "values", "." ]
python
train
mitsei/dlkit
dlkit/json_/commenting/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/objects.py#L259-L270
def set_text(self, text): """Sets the text. arg: text (string): the new text raise: InvalidArgument - ``text`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``text`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetForm.set_title_template self._my_map['text'] = self._get_display_text(text, self.get_text_metadata())
[ "def", "set_text", "(", "self", ",", "text", ")", ":", "# Implemented from template for osid.repository.AssetForm.set_title_template", "self", ".", "_my_map", "[", "'text'", "]", "=", "self", ".", "_get_display_text", "(", "text", ",", "self", ".", "get_text_metadata", "(", ")", ")" ]
Sets the text. arg: text (string): the new text raise: InvalidArgument - ``text`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``text`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "text", "." ]
python
train
opentracing-contrib/python-flask
flask_opentracing/tracing.py
https://github.com/opentracing-contrib/python-flask/blob/74bfe8bcd00eee9ce75a15c1634fda4c5d5f26ca/flask_opentracing/tracing.py#L95-L108
def get_span(self, request=None): """ Returns the span tracing `request`, or the current request if `request==None`. If there is no such span, get_span returns None. @param request the request to get the span from """ if request is None and stack.top: request = stack.top.request scope = self._current_scopes.get(request, None) return None if scope is None else scope.span
[ "def", "get_span", "(", "self", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", "and", "stack", ".", "top", ":", "request", "=", "stack", ".", "top", ".", "request", "scope", "=", "self", ".", "_current_scopes", ".", "get", "(", "request", ",", "None", ")", "return", "None", "if", "scope", "is", "None", "else", "scope", ".", "span" ]
Returns the span tracing `request`, or the current request if `request==None`. If there is no such span, get_span returns None. @param request the request to get the span from
[ "Returns", "the", "span", "tracing", "request", "or", "the", "current", "request", "if", "request", "==", "None", "." ]
python
train
yyuu/botornado
botornado/s3/key.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/botornado/s3/key.py#L153-L163
def exists(self, callback=None): """ Returns True if the key exists :rtype: bool :return: Whether the key exists on S3 """ def existence_tested(response): if callable(callback): callback(bool(response)) self.bucket.lookup(self.name, callback=existence_tested)
[ "def", "exists", "(", "self", ",", "callback", "=", "None", ")", ":", "def", "existence_tested", "(", "response", ")", ":", "if", "callable", "(", "callback", ")", ":", "callback", "(", "bool", "(", "response", ")", ")", "self", ".", "bucket", ".", "lookup", "(", "self", ".", "name", ",", "callback", "=", "existence_tested", ")" ]
Returns True if the key exists :rtype: bool :return: Whether the key exists on S3
[ "Returns", "True", "if", "the", "key", "exists" ]
python
train
mdgoldberg/sportsref
sportsref/nba/boxscores.py
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/nba/boxscores.py#L126-L136
def season(self): """ Returns the year ID of the season in which this game took place. :returns: An int representing the year of the season. """ d = self.date() if d.month >= 9: return d.year + 1 else: return d.year
[ "def", "season", "(", "self", ")", ":", "d", "=", "self", ".", "date", "(", ")", "if", "d", ".", "month", ">=", "9", ":", "return", "d", ".", "year", "+", "1", "else", ":", "return", "d", ".", "year" ]
Returns the year ID of the season in which this game took place. :returns: An int representing the year of the season.
[ "Returns", "the", "year", "ID", "of", "the", "season", "in", "which", "this", "game", "took", "place", "." ]
python
test
LEMS/pylems
lems/parser/LEMS.py
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/LEMS.py#L283-L312
def raise_error(self, message, *params, **key_params): """ Raise a parse error. """ s = 'Parser error in ' self.xml_node_stack.reverse() if len(self.xml_node_stack) > 1: node = self.xml_node_stack[0] s += '<{0}'.format(node.tag) if 'name' in node.lattrib: s += ' name=\"{0}\"'.format(node.lattrib['name']) if 'id' in node.lattrib: s += ' id=\"{0}\"'.format(node.lattrib['id']) s += '>' for node in self.xml_node_stack[1:]: s += '.<{0}'.format(node.tag) if 'name' in node.lattrib: s += ' name=\"{0}\"'.format(node.lattrib['name']) if 'id' in node.lattrib: s += ' id=\"{0}\"'.format(node.lattrib['id']) s += '>' s += ':\n ' + message raise ParseError(s, *params, **key_params) self.xml_node_stack.reverse()
[ "def", "raise_error", "(", "self", ",", "message", ",", "*", "params", ",", "*", "*", "key_params", ")", ":", "s", "=", "'Parser error in '", "self", ".", "xml_node_stack", ".", "reverse", "(", ")", "if", "len", "(", "self", ".", "xml_node_stack", ")", ">", "1", ":", "node", "=", "self", ".", "xml_node_stack", "[", "0", "]", "s", "+=", "'<{0}'", ".", "format", "(", "node", ".", "tag", ")", "if", "'name'", "in", "node", ".", "lattrib", ":", "s", "+=", "' name=\\\"{0}\\\"'", ".", "format", "(", "node", ".", "lattrib", "[", "'name'", "]", ")", "if", "'id'", "in", "node", ".", "lattrib", ":", "s", "+=", "' id=\\\"{0}\\\"'", ".", "format", "(", "node", ".", "lattrib", "[", "'id'", "]", ")", "s", "+=", "'>'", "for", "node", "in", "self", ".", "xml_node_stack", "[", "1", ":", "]", ":", "s", "+=", "'.<{0}'", ".", "format", "(", "node", ".", "tag", ")", "if", "'name'", "in", "node", ".", "lattrib", ":", "s", "+=", "' name=\\\"{0}\\\"'", ".", "format", "(", "node", ".", "lattrib", "[", "'name'", "]", ")", "if", "'id'", "in", "node", ".", "lattrib", ":", "s", "+=", "' id=\\\"{0}\\\"'", ".", "format", "(", "node", ".", "lattrib", "[", "'id'", "]", ")", "s", "+=", "'>'", "s", "+=", "':\\n '", "+", "message", "raise", "ParseError", "(", "s", ",", "*", "params", ",", "*", "*", "key_params", ")", "self", ".", "xml_node_stack", ".", "reverse", "(", ")" ]
Raise a parse error.
[ "Raise", "a", "parse", "error", "." ]
python
train
ly0/baidupcsapi
baidupcsapi/api.py
https://github.com/ly0/baidupcsapi/blob/6f6feeef0767a75b3b968924727460eb09242d76/baidupcsapi/api.py#L558-L602
def upload(self, dest_dir, file_handler, filename, callback=None, **kwargs): """上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param dest_dir: 网盘中文件的保存路径(不包含文件名)。 必须以 / 开头。 .. warning:: * 注意本接口的 dest_dir 参数不包含文件名,只包含路径 * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_handler: 上传文件对象 。(e.g. ``open('foobar', 'rb')`` ) .. warning:: 注意不要使用 .read() 方法. :type file_handler: file :param callback: 上传进度回调函数 需要包含 size 和 progress 名字的参数 :param filename: :return: requests.Response 对象 .. note:: 返回正确时返回的 Reponse 对象 content 中的数据结构 {"path":"服务器文件路径","size":文件大小,"ctime":创建时间,"mtime":修改时间,"md5":"文件md5值","fs_id":服务器文件识别号,"isdir":是否为目录,"request_id":请求识别号} """ params = { 'path':str(dest_dir)+"/"+str(filename) } tmp_filename = ''.join(random.sample(string.ascii_letters, 10)) files = {'file': (tmp_filename, file_handler)} url = 'https://{0}/rest/2.0/pcs/file'.format(BAIDUPCS_SERVER) return self._request('file', 'upload', url=url, extra_params=params, files=files, callback=callback, **kwargs)
[ "def", "upload", "(", "self", ",", "dest_dir", ",", "file_handler", ",", "filename", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'path'", ":", "str", "(", "dest_dir", ")", "+", "\"/\"", "+", "str", "(", "filename", ")", "}", "tmp_filename", "=", "''", ".", "join", "(", "random", ".", "sample", "(", "string", ".", "ascii_letters", ",", "10", ")", ")", "files", "=", "{", "'file'", ":", "(", "tmp_filename", ",", "file_handler", ")", "}", "url", "=", "'https://{0}/rest/2.0/pcs/file'", ".", "format", "(", "BAIDUPCS_SERVER", ")", "return", "self", ".", "_request", "(", "'file'", ",", "'upload'", ",", "url", "=", "url", ",", "extra_params", "=", "params", ",", "files", "=", "files", ",", "callback", "=", "callback", ",", "*", "*", "kwargs", ")" ]
上传单个文件(<2G). | 百度PCS服务目前支持最大2G的单个文件上传。 | 如需支持超大文件(>2G)的断点续传,请参考下面的“分片文件上传”方法。 :param dest_dir: 网盘中文件的保存路径(不包含文件名)。 必须以 / 开头。 .. warning:: * 注意本接口的 dest_dir 参数不包含文件名,只包含路径 * 路径长度限制为1000; * 径中不能包含以下字符:``\\\\ ? | " > < : *``; * 文件名或路径名开头结尾不能是 ``.`` 或空白字符,空白字符包括: ``\\r, \\n, \\t, 空格, \\0, \\x0B`` 。 :param file_handler: 上传文件对象 。(e.g. ``open('foobar', 'rb')`` ) .. warning:: 注意不要使用 .read() 方法. :type file_handler: file :param callback: 上传进度回调函数 需要包含 size 和 progress 名字的参数 :param filename: :return: requests.Response 对象 .. note:: 返回正确时返回的 Reponse 对象 content 中的数据结构 {"path":"服务器文件路径","size":文件大小,"ctime":创建时间,"mtime":修改时间,"md5":"文件md5值","fs_id":服务器文件识别号,"isdir":是否为目录,"request_id":请求识别号}
[ "上传单个文件(<2G)", "." ]
python
train
bigchaindb/bigchaindb
bigchaindb/common/transaction.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/common/transaction.py#L872-L901
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs): """Signs a Ed25519Fulfillment. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with. """ # NOTE: To eliminate the dangers of accidentally signing a condition by # reference, we remove the reference of input_ here # intentionally. If the user of this class knows how to use it, # this should never happen, but then again, never say never. input_ = deepcopy(input_) public_key = input_.owners_before[0] message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format( input_.fulfills.txid, input_.fulfills.output).encode()) try: # cryptoconditions makes no assumptions of the encoding of the # message to sign or verify. It only accepts bytestrings input_.fulfillment.sign( message.digest(), base58.b58decode(key_pairs[public_key].encode())) except KeyError: raise KeypairMismatchException('Public key {} is not a pair to ' 'any of the private keys' .format(public_key)) return input_
[ "def", "_sign_simple_signature_fulfillment", "(", "cls", ",", "input_", ",", "message", ",", "key_pairs", ")", ":", "# NOTE: To eliminate the dangers of accidentally signing a condition by", "# reference, we remove the reference of input_ here", "# intentionally. If the user of this class knows how to use it,", "# this should never happen, but then again, never say never.", "input_", "=", "deepcopy", "(", "input_", ")", "public_key", "=", "input_", ".", "owners_before", "[", "0", "]", "message", "=", "sha3_256", "(", "message", ".", "encode", "(", ")", ")", "if", "input_", ".", "fulfills", ":", "message", ".", "update", "(", "'{}{}'", ".", "format", "(", "input_", ".", "fulfills", ".", "txid", ",", "input_", ".", "fulfills", ".", "output", ")", ".", "encode", "(", ")", ")", "try", ":", "# cryptoconditions makes no assumptions of the encoding of the", "# message to sign or verify. It only accepts bytestrings", "input_", ".", "fulfillment", ".", "sign", "(", "message", ".", "digest", "(", ")", ",", "base58", ".", "b58decode", "(", "key_pairs", "[", "public_key", "]", ".", "encode", "(", ")", ")", ")", "except", "KeyError", ":", "raise", "KeypairMismatchException", "(", "'Public key {} is not a pair to '", "'any of the private keys'", ".", "format", "(", "public_key", ")", ")", "return", "input_" ]
Signs a Ed25519Fulfillment. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
[ "Signs", "a", "Ed25519Fulfillment", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/extract_text.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/extract_text.py#L630-L660
def docx_table_from_xml_node(table_node: ElementTree.Element, level: int, config: TextProcessingConfig) -> str: """ Converts an XML node representing a DOCX table into a textual representation. Args: table_node: XML node level: current level in XML hierarchy (used for recursion; start level is 0) config: :class:`TextProcessingConfig` control object Returns: string representation """ table = CustomDocxTable() for row_node in table_node: if row_node.tag != DOCX_TABLE_ROW: continue table.new_row() for cell_node in row_node: if cell_node.tag != DOCX_TABLE_CELL: continue table.new_cell() for para_node in cell_node: text = docx_text_from_xml_node(para_node, level, config) if text: table.add_paragraph(text) return docx_process_table(table, config)
[ "def", "docx_table_from_xml_node", "(", "table_node", ":", "ElementTree", ".", "Element", ",", "level", ":", "int", ",", "config", ":", "TextProcessingConfig", ")", "->", "str", ":", "table", "=", "CustomDocxTable", "(", ")", "for", "row_node", "in", "table_node", ":", "if", "row_node", ".", "tag", "!=", "DOCX_TABLE_ROW", ":", "continue", "table", ".", "new_row", "(", ")", "for", "cell_node", "in", "row_node", ":", "if", "cell_node", ".", "tag", "!=", "DOCX_TABLE_CELL", ":", "continue", "table", ".", "new_cell", "(", ")", "for", "para_node", "in", "cell_node", ":", "text", "=", "docx_text_from_xml_node", "(", "para_node", ",", "level", ",", "config", ")", "if", "text", ":", "table", ".", "add_paragraph", "(", "text", ")", "return", "docx_process_table", "(", "table", ",", "config", ")" ]
Converts an XML node representing a DOCX table into a textual representation. Args: table_node: XML node level: current level in XML hierarchy (used for recursion; start level is 0) config: :class:`TextProcessingConfig` control object Returns: string representation
[ "Converts", "an", "XML", "node", "representing", "a", "DOCX", "table", "into", "a", "textual", "representation", "." ]
python
train
emc-openstack/storops
storops/unity/resource/metric.py
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/metric.py#L199-L208
def combine_sp_values(self, other): """ sp_values * sp_values """ if self.values is None: ret = IdValues() else: ret = IdValues({k: int(v) * int(other.values[k]) for k, v in self.values.items()}) return ret
[ "def", "combine_sp_values", "(", "self", ",", "other", ")", ":", "if", "self", ".", "values", "is", "None", ":", "ret", "=", "IdValues", "(", ")", "else", ":", "ret", "=", "IdValues", "(", "{", "k", ":", "int", "(", "v", ")", "*", "int", "(", "other", ".", "values", "[", "k", "]", ")", "for", "k", ",", "v", "in", "self", ".", "values", ".", "items", "(", ")", "}", ")", "return", "ret" ]
sp_values * sp_values
[ "sp_values", "*", "sp_values" ]
python
train
jleinonen/pytmatrix
pytmatrix/psd.py
https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/psd.py#L310-L334
def get_SZ(self, psd, geometry): """ Compute the scattering matrices for the given PSD and geometries. Returns: The new amplitude (S) and phase (Z) matrices. """ if (self._S_table is None) or (self._Z_table is None): raise AttributeError( "Initialize or load the scattering table first.") if (not isinstance(psd, PSD)) or self._previous_psd != psd: self._S_dict = {} self._Z_dict = {} psd_w = psd(self._psd_D) for geom in self.geometries: self._S_dict[geom] = \ trapz(self._S_table[geom] * psd_w, self._psd_D) self._Z_dict[geom] = \ trapz(self._Z_table[geom] * psd_w, self._psd_D) self._previous_psd = psd return (self._S_dict[geometry], self._Z_dict[geometry])
[ "def", "get_SZ", "(", "self", ",", "psd", ",", "geometry", ")", ":", "if", "(", "self", ".", "_S_table", "is", "None", ")", "or", "(", "self", ".", "_Z_table", "is", "None", ")", ":", "raise", "AttributeError", "(", "\"Initialize or load the scattering table first.\"", ")", "if", "(", "not", "isinstance", "(", "psd", ",", "PSD", ")", ")", "or", "self", ".", "_previous_psd", "!=", "psd", ":", "self", ".", "_S_dict", "=", "{", "}", "self", ".", "_Z_dict", "=", "{", "}", "psd_w", "=", "psd", "(", "self", ".", "_psd_D", ")", "for", "geom", "in", "self", ".", "geometries", ":", "self", ".", "_S_dict", "[", "geom", "]", "=", "trapz", "(", "self", ".", "_S_table", "[", "geom", "]", "*", "psd_w", ",", "self", ".", "_psd_D", ")", "self", ".", "_Z_dict", "[", "geom", "]", "=", "trapz", "(", "self", ".", "_Z_table", "[", "geom", "]", "*", "psd_w", ",", "self", ".", "_psd_D", ")", "self", ".", "_previous_psd", "=", "psd", "return", "(", "self", ".", "_S_dict", "[", "geometry", "]", ",", "self", ".", "_Z_dict", "[", "geometry", "]", ")" ]
Compute the scattering matrices for the given PSD and geometries. Returns: The new amplitude (S) and phase (Z) matrices.
[ "Compute", "the", "scattering", "matrices", "for", "the", "given", "PSD", "and", "geometries", "." ]
python
train
alvinwan/TexSoup
TexSoup/reader.py
https://github.com/alvinwan/TexSoup/blob/63323ed71510fd2351102b8c36660a3b7703cead/TexSoup/reader.py#L227-L268
def read_tex(src): r"""Read next expression from buffer :param Buffer src: a buffer of tokens """ c = next(src) if c.startswith('%'): return c elif c.startswith('$'): name = '$$' if c.startswith('$$') else '$' expr = TexEnv(name, [], nobegin=True) return read_math_env(src, expr) elif c.startswith('\[') or c.startswith("\("): if c.startswith('\['): name = 'displaymath' begin = '\[' end = '\]' else: name = "math" begin = "\(" end = "\)" expr = TexEnv(name, [], nobegin=True, begin=begin, end=end) return read_math_env(src, expr) elif c.startswith('\\'): command = TokenWithPosition(c[1:], src.position) if command == 'item': contents, arg = read_item(src) mode, expr = 'command', TexCmd(command, contents, arg) elif command == 'begin': mode, expr, _ = 'begin', TexEnv(src.peek(1)), src.forward(3) else: mode, expr = 'command', TexCmd(command) expr.args = read_args(src, expr.args) if mode == 'begin': read_env(src, expr) return expr if c in ARG_START_TOKENS: return read_arg(src, c) return c
[ "def", "read_tex", "(", "src", ")", ":", "c", "=", "next", "(", "src", ")", "if", "c", ".", "startswith", "(", "'%'", ")", ":", "return", "c", "elif", "c", ".", "startswith", "(", "'$'", ")", ":", "name", "=", "'$$'", "if", "c", ".", "startswith", "(", "'$$'", ")", "else", "'$'", "expr", "=", "TexEnv", "(", "name", ",", "[", "]", ",", "nobegin", "=", "True", ")", "return", "read_math_env", "(", "src", ",", "expr", ")", "elif", "c", ".", "startswith", "(", "'\\['", ")", "or", "c", ".", "startswith", "(", "\"\\(\"", ")", ":", "if", "c", ".", "startswith", "(", "'\\['", ")", ":", "name", "=", "'displaymath'", "begin", "=", "'\\['", "end", "=", "'\\]'", "else", ":", "name", "=", "\"math\"", "begin", "=", "\"\\(\"", "end", "=", "\"\\)\"", "expr", "=", "TexEnv", "(", "name", ",", "[", "]", ",", "nobegin", "=", "True", ",", "begin", "=", "begin", ",", "end", "=", "end", ")", "return", "read_math_env", "(", "src", ",", "expr", ")", "elif", "c", ".", "startswith", "(", "'\\\\'", ")", ":", "command", "=", "TokenWithPosition", "(", "c", "[", "1", ":", "]", ",", "src", ".", "position", ")", "if", "command", "==", "'item'", ":", "contents", ",", "arg", "=", "read_item", "(", "src", ")", "mode", ",", "expr", "=", "'command'", ",", "TexCmd", "(", "command", ",", "contents", ",", "arg", ")", "elif", "command", "==", "'begin'", ":", "mode", ",", "expr", ",", "_", "=", "'begin'", ",", "TexEnv", "(", "src", ".", "peek", "(", "1", ")", ")", ",", "src", ".", "forward", "(", "3", ")", "else", ":", "mode", ",", "expr", "=", "'command'", ",", "TexCmd", "(", "command", ")", "expr", ".", "args", "=", "read_args", "(", "src", ",", "expr", ".", "args", ")", "if", "mode", "==", "'begin'", ":", "read_env", "(", "src", ",", "expr", ")", "return", "expr", "if", "c", "in", "ARG_START_TOKENS", ":", "return", "read_arg", "(", "src", ",", "c", ")", "return", "c" ]
r"""Read next expression from buffer :param Buffer src: a buffer of tokens
[ "r", "Read", "next", "expression", "from", "buffer" ]
python
train
chdzq/ARPAbetAndIPAConvertor
arpabetandipaconvertor/arpabet2phoneticalphabet.py
https://github.com/chdzq/ARPAbetAndIPAConvertor/blob/e8b2fdbb5b7134c4f779f4d6dcd5dc30979a0a26/arpabetandipaconvertor/arpabet2phoneticalphabet.py#L76-L88
def convert_to_international_phonetic_alphabet(self, arpabet): ''' 转换成国际音标 :param arpabet: :return: ''' word = self._convert_to_word(arpabet=arpabet) if not word: return None return word.translate_to_international_phonetic_alphabet()
[ "def", "convert_to_international_phonetic_alphabet", "(", "self", ",", "arpabet", ")", ":", "word", "=", "self", ".", "_convert_to_word", "(", "arpabet", "=", "arpabet", ")", "if", "not", "word", ":", "return", "None", "return", "word", ".", "translate_to_international_phonetic_alphabet", "(", ")" ]
转换成国际音标 :param arpabet: :return:
[ "转换成国际音标", ":", "param", "arpabet", ":", ":", "return", ":" ]
python
train
twoolie/NBT
examples/utilities.py
https://github.com/twoolie/NBT/blob/b06dd6cc8117d2788da1d8416e642d58bad45762/examples/utilities.py#L33-L88
def pack_nbt(s): """ Pack a native Python data structure into an NBT tag. Only the following structures and types are supported: * int * float * str * unicode * dict Additionally, arbitrary iterables are supported. Packing is not lossless. In order to avoid data loss, TAG_Long and TAG_Double are preferred over the less precise numerical formats. Lists and tuples may become dicts on unpacking if they were not homogenous during packing, as a side-effect of NBT's format. Nothing can be done about this. Only strings are supported as keys for dicts and other mapping types. If your keys are not strings, they will be coerced. (Resistance is futile.) """ if isinstance(s, int): return TAG_Long(s) elif isinstance(s, float): return TAG_Double(s) elif isinstance(s, (str, unicode)): return TAG_String(s) elif isinstance(s, dict): tag = TAG_Compound() for k, v in s: v = pack_nbt(v) v.name = str(k) tag.tags.append(v) return tag elif hasattr(s, "__iter__"): # We arrive at a slight quandry. NBT lists must be homogenous, unlike # Python lists. NBT compounds work, but require unique names for every # entry. On the plus side, this technique should work for arbitrary # iterables as well. tags = [pack_nbt(i) for i in s] t = type(tags[0]) # If we're homogenous... if all(t == type(i) for i in tags): tag = TAG_List(type=t) tag.tags = tags else: tag = TAG_Compound() for i, item in enumerate(tags): item.name = str(i) tag.tags = tags return tag else: raise ValueError("Couldn't serialise type %s!" % type(s))
[ "def", "pack_nbt", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "int", ")", ":", "return", "TAG_Long", "(", "s", ")", "elif", "isinstance", "(", "s", ",", "float", ")", ":", "return", "TAG_Double", "(", "s", ")", "elif", "isinstance", "(", "s", ",", "(", "str", ",", "unicode", ")", ")", ":", "return", "TAG_String", "(", "s", ")", "elif", "isinstance", "(", "s", ",", "dict", ")", ":", "tag", "=", "TAG_Compound", "(", ")", "for", "k", ",", "v", "in", "s", ":", "v", "=", "pack_nbt", "(", "v", ")", "v", ".", "name", "=", "str", "(", "k", ")", "tag", ".", "tags", ".", "append", "(", "v", ")", "return", "tag", "elif", "hasattr", "(", "s", ",", "\"__iter__\"", ")", ":", "# We arrive at a slight quandry. NBT lists must be homogenous, unlike", "# Python lists. NBT compounds work, but require unique names for every", "# entry. On the plus side, this technique should work for arbitrary", "# iterables as well.", "tags", "=", "[", "pack_nbt", "(", "i", ")", "for", "i", "in", "s", "]", "t", "=", "type", "(", "tags", "[", "0", "]", ")", "# If we're homogenous...", "if", "all", "(", "t", "==", "type", "(", "i", ")", "for", "i", "in", "tags", ")", ":", "tag", "=", "TAG_List", "(", "type", "=", "t", ")", "tag", ".", "tags", "=", "tags", "else", ":", "tag", "=", "TAG_Compound", "(", ")", "for", "i", ",", "item", "in", "enumerate", "(", "tags", ")", ":", "item", ".", "name", "=", "str", "(", "i", ")", "tag", ".", "tags", "=", "tags", "return", "tag", "else", ":", "raise", "ValueError", "(", "\"Couldn't serialise type %s!\"", "%", "type", "(", "s", ")", ")" ]
Pack a native Python data structure into an NBT tag. Only the following structures and types are supported: * int * float * str * unicode * dict Additionally, arbitrary iterables are supported. Packing is not lossless. In order to avoid data loss, TAG_Long and TAG_Double are preferred over the less precise numerical formats. Lists and tuples may become dicts on unpacking if they were not homogenous during packing, as a side-effect of NBT's format. Nothing can be done about this. Only strings are supported as keys for dicts and other mapping types. If your keys are not strings, they will be coerced. (Resistance is futile.)
[ "Pack", "a", "native", "Python", "data", "structure", "into", "an", "NBT", "tag", ".", "Only", "the", "following", "structures", "and", "types", "are", "supported", ":" ]
python
train
edx/edx-enterprise
enterprise/api_client/discovery.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/api_client/discovery.py#L188-L202
def get_paginated_catalog_courses(self, catalog_id, querystring=None): """ Return paginated response for all catalog courses. Returns: dict: API response with links to next and previous pages. """ return self._load_data( self.CATALOGS_COURSES_ENDPOINT.format(catalog_id), default=[], querystring=querystring, traverse_pagination=False, many=False, )
[ "def", "get_paginated_catalog_courses", "(", "self", ",", "catalog_id", ",", "querystring", "=", "None", ")", ":", "return", "self", ".", "_load_data", "(", "self", ".", "CATALOGS_COURSES_ENDPOINT", ".", "format", "(", "catalog_id", ")", ",", "default", "=", "[", "]", ",", "querystring", "=", "querystring", ",", "traverse_pagination", "=", "False", ",", "many", "=", "False", ",", ")" ]
Return paginated response for all catalog courses. Returns: dict: API response with links to next and previous pages.
[ "Return", "paginated", "response", "for", "all", "catalog", "courses", "." ]
python
valid
cuihantao/andes
andes/routines/pflow.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/routines/pflow.py#L230-L288
def newton_call(self): """ Function calls for Newton power flow Returns ------- None """ # system = self.system # exec(system.call.newton) system = self.system dae = self.system.dae system.dae.init_fg() system.dae.reset_small_g() # evaluate algebraic equation mismatches for model, pflow, gcall in zip(system.devman.devices, system.call.pflow, system.call.gcall): if pflow and gcall: system.__dict__[model].gcall(dae) # eval differential equations for model, pflow, fcall in zip(system.devman.devices, system.call.pflow, system.call.fcall): if pflow and fcall: system.__dict__[model].fcall(dae) # reset islanded buses mismatches system.Bus.gisland(dae) if system.dae.factorize: system.dae.init_jac0() # evaluate constant Jacobian elements for model, pflow, jac0 in zip(system.devman.devices, system.call.pflow, system.call.jac0): if pflow and jac0: system.__dict__[model].jac0(dae) dae.temp_to_spmatrix('jac0') dae.setup_FxGy() # evaluate Gy for model, pflow, gycall in zip(system.devman.devices, system.call.pflow, system.call.gycall): if pflow and gycall: system.__dict__[model].gycall(dae) # evaluate Fx for model, pflow, fxcall in zip(system.devman.devices, system.call.pflow, system.call.fxcall): if pflow and fxcall: system.__dict__[model].fxcall(dae) # reset islanded buses Jacobians system.Bus.gyisland(dae) dae.temp_to_spmatrix('jac')
[ "def", "newton_call", "(", "self", ")", ":", "# system = self.system", "# exec(system.call.newton)", "system", "=", "self", ".", "system", "dae", "=", "self", ".", "system", ".", "dae", "system", ".", "dae", ".", "init_fg", "(", ")", "system", ".", "dae", ".", "reset_small_g", "(", ")", "# evaluate algebraic equation mismatches", "for", "model", ",", "pflow", ",", "gcall", "in", "zip", "(", "system", ".", "devman", ".", "devices", ",", "system", ".", "call", ".", "pflow", ",", "system", ".", "call", ".", "gcall", ")", ":", "if", "pflow", "and", "gcall", ":", "system", ".", "__dict__", "[", "model", "]", ".", "gcall", "(", "dae", ")", "# eval differential equations", "for", "model", ",", "pflow", ",", "fcall", "in", "zip", "(", "system", ".", "devman", ".", "devices", ",", "system", ".", "call", ".", "pflow", ",", "system", ".", "call", ".", "fcall", ")", ":", "if", "pflow", "and", "fcall", ":", "system", ".", "__dict__", "[", "model", "]", ".", "fcall", "(", "dae", ")", "# reset islanded buses mismatches", "system", ".", "Bus", ".", "gisland", "(", "dae", ")", "if", "system", ".", "dae", ".", "factorize", ":", "system", ".", "dae", ".", "init_jac0", "(", ")", "# evaluate constant Jacobian elements", "for", "model", ",", "pflow", ",", "jac0", "in", "zip", "(", "system", ".", "devman", ".", "devices", ",", "system", ".", "call", ".", "pflow", ",", "system", ".", "call", ".", "jac0", ")", ":", "if", "pflow", "and", "jac0", ":", "system", ".", "__dict__", "[", "model", "]", ".", "jac0", "(", "dae", ")", "dae", ".", "temp_to_spmatrix", "(", "'jac0'", ")", "dae", ".", "setup_FxGy", "(", ")", "# evaluate Gy", "for", "model", ",", "pflow", ",", "gycall", "in", "zip", "(", "system", ".", "devman", ".", "devices", ",", "system", ".", "call", ".", "pflow", ",", "system", ".", "call", ".", "gycall", ")", ":", "if", "pflow", "and", "gycall", ":", "system", ".", "__dict__", "[", "model", "]", ".", "gycall", "(", "dae", ")", "# evaluate Fx", "for", "model", ",", "pflow", ",", "fxcall", "in", "zip", "(", "system", ".", "devman", ".", "devices", ",", "system", ".", "call", ".", "pflow", ",", "system", ".", "call", ".", "fxcall", ")", ":", "if", "pflow", "and", "fxcall", ":", "system", ".", "__dict__", "[", "model", "]", ".", "fxcall", "(", "dae", ")", "# reset islanded buses Jacobians", "system", ".", "Bus", ".", "gyisland", "(", "dae", ")", "dae", ".", "temp_to_spmatrix", "(", "'jac'", ")" ]
Function calls for Newton power flow Returns ------- None
[ "Function", "calls", "for", "Newton", "power", "flow" ]
python
train
metacloud/gilt
gilt/util.py
https://github.com/metacloud/gilt/blob/234eec23fe2f8144369d0ec3b35ad2fef508b8d1/gilt/util.py#L46-L59
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
[ "def", "run_command", "(", "cmd", ",", "debug", "=", "False", ")", ":", "if", "debug", ":", "msg", "=", "' PWD: {}'", ".", "format", "(", "os", ".", "getcwd", "(", ")", ")", "print_warn", "(", "msg", ")", "msg", "=", "' COMMAND: {}'", ".", "format", "(", "cmd", ")", "print_warn", "(", "msg", ")", "cmd", "(", ")" ]
Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None
[ "Execute", "the", "given", "command", "and", "return", "None", "." ]
python
train
pmacosta/pexdoc
docs/support/trace_my_module_2.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/docs/support/trace_my_module_2.py#L14-L32
def trace_module(no_print=True): """Trace my_module_original exceptions.""" with pexdoc.ExDocCxt() as exdoc_obj: try: docs.support.my_module.func("John") obj = docs.support.my_module.MyClass() obj.value = 5 obj.value except: raise RuntimeError("Tracing did not complete successfully") if not no_print: module_prefix = "docs.support.my_module." callable_names = ["func", "MyClass.value"] for callable_name in callable_names: callable_name = module_prefix + callable_name print("\nCallable: {0}".format(callable_name)) print(exdoc_obj.get_sphinx_doc(callable_name, width=70)) print("\n") return copy.copy(exdoc_obj)
[ "def", "trace_module", "(", "no_print", "=", "True", ")", ":", "with", "pexdoc", ".", "ExDocCxt", "(", ")", "as", "exdoc_obj", ":", "try", ":", "docs", ".", "support", ".", "my_module", ".", "func", "(", "\"John\"", ")", "obj", "=", "docs", ".", "support", ".", "my_module", ".", "MyClass", "(", ")", "obj", ".", "value", "=", "5", "obj", ".", "value", "except", ":", "raise", "RuntimeError", "(", "\"Tracing did not complete successfully\"", ")", "if", "not", "no_print", ":", "module_prefix", "=", "\"docs.support.my_module.\"", "callable_names", "=", "[", "\"func\"", ",", "\"MyClass.value\"", "]", "for", "callable_name", "in", "callable_names", ":", "callable_name", "=", "module_prefix", "+", "callable_name", "print", "(", "\"\\nCallable: {0}\"", ".", "format", "(", "callable_name", ")", ")", "print", "(", "exdoc_obj", ".", "get_sphinx_doc", "(", "callable_name", ",", "width", "=", "70", ")", ")", "print", "(", "\"\\n\"", ")", "return", "copy", ".", "copy", "(", "exdoc_obj", ")" ]
Trace my_module_original exceptions.
[ "Trace", "my_module_original", "exceptions", "." ]
python
train
openvax/isovar
isovar/variant_sequence_in_reading_frame.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequence_in_reading_frame.py#L256-L282
def compute_offset_to_first_complete_codon( offset_to_first_complete_reference_codon, n_trimmed_from_reference_sequence): """ Once we've aligned the variant sequence to the ReferenceContext, we need to transfer reading frame from the reference transcripts to the variant sequences. Parameters ---------- offset_to_first_complete_reference_codon : int n_trimmed_from_reference_sequence : int Returns an offset into the variant sequence that starts from a complete codon. """ if n_trimmed_from_reference_sequence <= offset_to_first_complete_reference_codon: return ( offset_to_first_complete_reference_codon - n_trimmed_from_reference_sequence) else: n_nucleotides_trimmed_after_first_codon = ( n_trimmed_from_reference_sequence - offset_to_first_complete_reference_codon) frame = n_nucleotides_trimmed_after_first_codon % 3 return (3 - frame) % 3
[ "def", "compute_offset_to_first_complete_codon", "(", "offset_to_first_complete_reference_codon", ",", "n_trimmed_from_reference_sequence", ")", ":", "if", "n_trimmed_from_reference_sequence", "<=", "offset_to_first_complete_reference_codon", ":", "return", "(", "offset_to_first_complete_reference_codon", "-", "n_trimmed_from_reference_sequence", ")", "else", ":", "n_nucleotides_trimmed_after_first_codon", "=", "(", "n_trimmed_from_reference_sequence", "-", "offset_to_first_complete_reference_codon", ")", "frame", "=", "n_nucleotides_trimmed_after_first_codon", "%", "3", "return", "(", "3", "-", "frame", ")", "%", "3" ]
Once we've aligned the variant sequence to the ReferenceContext, we need to transfer reading frame from the reference transcripts to the variant sequences. Parameters ---------- offset_to_first_complete_reference_codon : int n_trimmed_from_reference_sequence : int Returns an offset into the variant sequence that starts from a complete codon.
[ "Once", "we", "ve", "aligned", "the", "variant", "sequence", "to", "the", "ReferenceContext", "we", "need", "to", "transfer", "reading", "frame", "from", "the", "reference", "transcripts", "to", "the", "variant", "sequences", "." ]
python
train
tanghaibao/jcvi
jcvi/utils/natsort.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/natsort.py#L248-L276
def index_natsorted(seq, key=lambda x: x, number_type=float, signed=True, exp=True): """\ Sorts a sequence naturally, but returns a list of sorted the indeces and not the sorted list. >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> [a[i] for i in index] ['num2', 'num3', 'num5'] >>> [b[i] for i in index] ['baz', 'foo', 'bar'] >>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')] >>> from operator import itemgetter >>> index_natsorted(c, key=itemgetter(1)) [2, 0, 1] """ from operator import itemgetter item1 = itemgetter(1) # Pair the index and sequence together, then sort by index_seq_pair = [[x, key(y)] for x, y in zip(range(len(seq)), seq)] index_seq_pair.sort(key=lambda x: natsort_key(item1(x), number_type=number_type, signed=signed, exp=exp)) return [x[0] for x in index_seq_pair]
[ "def", "index_natsorted", "(", "seq", ",", "key", "=", "lambda", "x", ":", "x", ",", "number_type", "=", "float", ",", "signed", "=", "True", ",", "exp", "=", "True", ")", ":", "from", "operator", "import", "itemgetter", "item1", "=", "itemgetter", "(", "1", ")", "# Pair the index and sequence together, then sort by", "index_seq_pair", "=", "[", "[", "x", ",", "key", "(", "y", ")", "]", "for", "x", ",", "y", "in", "zip", "(", "range", "(", "len", "(", "seq", ")", ")", ",", "seq", ")", "]", "index_seq_pair", ".", "sort", "(", "key", "=", "lambda", "x", ":", "natsort_key", "(", "item1", "(", "x", ")", ",", "number_type", "=", "number_type", ",", "signed", "=", "signed", ",", "exp", "=", "exp", ")", ")", "return", "[", "x", "[", "0", "]", "for", "x", "in", "index_seq_pair", "]" ]
\ Sorts a sequence naturally, but returns a list of sorted the indeces and not the sorted list. >>> a = ['num3', 'num5', 'num2'] >>> b = ['foo', 'bar', 'baz'] >>> index = index_natsorted(a) >>> index [2, 0, 1] >>> # Sort both lists by the sort order of a >>> [a[i] for i in index] ['num2', 'num3', 'num5'] >>> [b[i] for i in index] ['baz', 'foo', 'bar'] >>> c = [('a', 'num3'), ('b', 'num5'), ('c', 'num2')] >>> from operator import itemgetter >>> index_natsorted(c, key=itemgetter(1)) [2, 0, 1]
[ "\\", "Sorts", "a", "sequence", "naturally", "but", "returns", "a", "list", "of", "sorted", "the", "indeces", "and", "not", "the", "sorted", "list", "." ]
python
train
pandas-dev/pandas
pandas/core/dtypes/missing.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/missing.py#L449-L467
def _infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction """ if not is_list_like(val): val = [val] val = np.array(val, copy=False) if is_datetimelike(val): return np.array('NaT', dtype=val.dtype) elif is_object_dtype(val.dtype): dtype = lib.infer_dtype(ensure_object(val), skipna=False) if dtype in ['datetime', 'datetime64']: return np.array('NaT', dtype=_NS_DTYPE) elif dtype in ['timedelta', 'timedelta64']: return np.array('NaT', dtype=_TD_DTYPE) return np.nan
[ "def", "_infer_fill_value", "(", "val", ")", ":", "if", "not", "is_list_like", "(", "val", ")", ":", "val", "=", "[", "val", "]", "val", "=", "np", ".", "array", "(", "val", ",", "copy", "=", "False", ")", "if", "is_datetimelike", "(", "val", ")", ":", "return", "np", ".", "array", "(", "'NaT'", ",", "dtype", "=", "val", ".", "dtype", ")", "elif", "is_object_dtype", "(", "val", ".", "dtype", ")", ":", "dtype", "=", "lib", ".", "infer_dtype", "(", "ensure_object", "(", "val", ")", ",", "skipna", "=", "False", ")", "if", "dtype", "in", "[", "'datetime'", ",", "'datetime64'", "]", ":", "return", "np", ".", "array", "(", "'NaT'", ",", "dtype", "=", "_NS_DTYPE", ")", "elif", "dtype", "in", "[", "'timedelta'", ",", "'timedelta64'", "]", ":", "return", "np", ".", "array", "(", "'NaT'", ",", "dtype", "=", "_TD_DTYPE", ")", "return", "np", ".", "nan" ]
infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction
[ "infer", "the", "fill", "value", "for", "the", "nan", "/", "NaT", "from", "the", "provided", "scalar", "/", "ndarray", "/", "list", "-", "like", "if", "we", "are", "a", "NaT", "return", "the", "correct", "dtyped", "element", "to", "provide", "proper", "block", "construction" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/assistant_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/assistant_v1.py#L7342-L7349
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'workspaces') and self.workspaces is not None: _dict['workspaces'] = [x._to_dict() for x in self.workspaces] if hasattr(self, 'pagination') and self.pagination is not None: _dict['pagination'] = self.pagination._to_dict() return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'workspaces'", ")", "and", "self", ".", "workspaces", "is", "not", "None", ":", "_dict", "[", "'workspaces'", "]", "=", "[", "x", ".", "_to_dict", "(", ")", "for", "x", "in", "self", ".", "workspaces", "]", "if", "hasattr", "(", "self", ",", "'pagination'", ")", "and", "self", ".", "pagination", "is", "not", "None", ":", "_dict", "[", "'pagination'", "]", "=", "self", ".", "pagination", ".", "_to_dict", "(", ")", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
mitsei/dlkit
dlkit/records/assessment/orthographic_visualization/orthographic_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/orthographic_visualization/orthographic_records.py#L484-L490
def set_face_values(self, front_face_value, side_face_value, top_face_value): """stub""" if front_face_value is None or side_face_value is None or top_face_value is None: raise NullArgument() self.add_integer_value(value=int(front_face_value), label='frontFaceValue') self.add_integer_value(value=int(side_face_value), label='sideFaceValue') self.add_integer_value(value=int(top_face_value), label='topFaceValue')
[ "def", "set_face_values", "(", "self", ",", "front_face_value", ",", "side_face_value", ",", "top_face_value", ")", ":", "if", "front_face_value", "is", "None", "or", "side_face_value", "is", "None", "or", "top_face_value", "is", "None", ":", "raise", "NullArgument", "(", ")", "self", ".", "add_integer_value", "(", "value", "=", "int", "(", "front_face_value", ")", ",", "label", "=", "'frontFaceValue'", ")", "self", ".", "add_integer_value", "(", "value", "=", "int", "(", "side_face_value", ")", ",", "label", "=", "'sideFaceValue'", ")", "self", ".", "add_integer_value", "(", "value", "=", "int", "(", "top_face_value", ")", ",", "label", "=", "'topFaceValue'", ")" ]
stub
[ "stub" ]
python
train
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py#L594-L605
def _get_result(self, idx, timeout=None): """Called by the CollectorIterator object to retrieve the result's values one after another (order defined by the implementation) \param idx The index of the result we want, wrt collector's order \param timeout integer telling how long to wait (in seconds) for the result at index idx to be available, or None (wait forever) """ res = self._results[idx] res.wait(timeout) return res
[ "def", "_get_result", "(", "self", ",", "idx", ",", "timeout", "=", "None", ")", ":", "res", "=", "self", ".", "_results", "[", "idx", "]", "res", ".", "wait", "(", "timeout", ")", "return", "res" ]
Called by the CollectorIterator object to retrieve the result's values one after another (order defined by the implementation) \param idx The index of the result we want, wrt collector's order \param timeout integer telling how long to wait (in seconds) for the result at index idx to be available, or None (wait forever)
[ "Called", "by", "the", "CollectorIterator", "object", "to", "retrieve", "the", "result", "s", "values", "one", "after", "another", "(", "order", "defined", "by", "the", "implementation", ")", "\\", "param", "idx", "The", "index", "of", "the", "result", "we", "want", "wrt", "collector", "s", "order", "\\", "param", "timeout", "integer", "telling", "how", "long", "to", "wait", "(", "in", "seconds", ")", "for", "the", "result", "at", "index", "idx", "to", "be", "available", "or", "None", "(", "wait", "forever", ")" ]
python
train
elastic/elasticsearch-dsl-py
elasticsearch_dsl/index.py
https://github.com/elastic/elasticsearch-dsl-py/blob/874b52472fc47b601de0e5fa0e4300e21aff0085/elasticsearch_dsl/index.py#L575-L590
def shrink(self, using=None, **kwargs): """ The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. Any additional keyword arguments will be passed to ``Elasticsearch.indices.shrink`` unchanged. """ return self._get_connection(using).indices.shrink(index=self._name, **kwargs)
[ "def", "shrink", "(", "self", ",", "using", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_get_connection", "(", "using", ")", ".", "indices", ".", "shrink", "(", "index", "=", "self", ".", "_name", ",", "*", "*", "kwargs", ")" ]
The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. Any additional keyword arguments will be passed to ``Elasticsearch.indices.shrink`` unchanged.
[ "The", "shrink", "index", "API", "allows", "you", "to", "shrink", "an", "existing", "index", "into", "a", "new", "index", "with", "fewer", "primary", "shards", ".", "The", "number", "of", "primary", "shards", "in", "the", "target", "index", "must", "be", "a", "factor", "of", "the", "shards", "in", "the", "source", "index", ".", "For", "example", "an", "index", "with", "8", "primary", "shards", "can", "be", "shrunk", "into", "4", "2", "or", "1", "primary", "shards", "or", "an", "index", "with", "15", "primary", "shards", "can", "be", "shrunk", "into", "5", "3", "or", "1", ".", "If", "the", "number", "of", "shards", "in", "the", "index", "is", "a", "prime", "number", "it", "can", "only", "be", "shrunk", "into", "a", "single", "primary", "shard", ".", "Before", "shrinking", "a", "(", "primary", "or", "replica", ")", "copy", "of", "every", "shard", "in", "the", "index", "must", "be", "present", "on", "the", "same", "node", "." ]
python
train
googleapis/google-auth-library-python-oauthlib
google_auth_oauthlib/flow.py
https://github.com/googleapis/google-auth-library-python-oauthlib/blob/ba826565994cf20c073d79f534036747fdef2041/google_auth_oauthlib/flow.py#L330-L366
def run_console( self, authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE, authorization_code_message=_DEFAULT_AUTH_CODE_MESSAGE, **kwargs): """Run the flow using the console strategy. The console strategy instructs the user to open the authorization URL in their browser. Once the authorization is complete the authorization server will give the user a code. The user then must copy & paste this code into the application. The code is then exchanged for a token. Args: authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. authorization_code_message (str): The message to display when prompting the user for the authorization code. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user. """ kwargs.setdefault('prompt', 'consent') self.redirect_uri = self._OOB_REDIRECT_URI auth_url, _ = self.authorization_url(**kwargs) print(authorization_prompt_message.format(url=auth_url)) code = input(authorization_code_message) self.fetch_token(code=code) return self.credentials
[ "def", "run_console", "(", "self", ",", "authorization_prompt_message", "=", "_DEFAULT_AUTH_PROMPT_MESSAGE", ",", "authorization_code_message", "=", "_DEFAULT_AUTH_CODE_MESSAGE", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'prompt'", ",", "'consent'", ")", "self", ".", "redirect_uri", "=", "self", ".", "_OOB_REDIRECT_URI", "auth_url", ",", "_", "=", "self", ".", "authorization_url", "(", "*", "*", "kwargs", ")", "print", "(", "authorization_prompt_message", ".", "format", "(", "url", "=", "auth_url", ")", ")", "code", "=", "input", "(", "authorization_code_message", ")", "self", ".", "fetch_token", "(", "code", "=", "code", ")", "return", "self", ".", "credentials" ]
Run the flow using the console strategy. The console strategy instructs the user to open the authorization URL in their browser. Once the authorization is complete the authorization server will give the user a code. The user then must copy & paste this code into the application. The code is then exchanged for a token. Args: authorization_prompt_message (str): The message to display to tell the user to navigate to the authorization URL. authorization_code_message (str): The message to display when prompting the user for the authorization code. kwargs: Additional keyword arguments passed through to :meth:`authorization_url`. Returns: google.oauth2.credentials.Credentials: The OAuth 2.0 credentials for the user.
[ "Run", "the", "flow", "using", "the", "console", "strategy", "." ]
python
train
bionikspoon/pureyaml
pureyaml/_compat/singledispatch.py
https://github.com/bionikspoon/pureyaml/blob/784830b907ca14525c4cecdb6ae35306f6f8a877/pureyaml/_compat/singledispatch.py#L170-L235
def singledispatch(function): # noqa """Single-dispatch generic function decorator. Transforms a function into a generic function, which can have different behaviours depending upon the type of its first argument. The decorated function acts as the default implementation, and additional implementations can be registered using the register() attribute of the generic function. """ registry = {} dispatch_cache = WeakKeyDictionary() def ns(): pass ns.cache_token = None # noinspection PyIncorrectDocstring def dispatch(cls): """generic_func.dispatch(cls) -> <function implementation> Runs the dispatch algorithm to return the best available implementation for the given *cls* registered on *generic_func*. """ if ns.cache_token is not None: current_token = get_cache_token() if ns.cache_token != current_token: dispatch_cache.clear() ns.cache_token = current_token try: impl = dispatch_cache[cls] except KeyError: try: impl = registry[cls] except KeyError: impl = _find_impl(cls, registry) dispatch_cache[cls] = impl return impl # noinspection PyIncorrectDocstring def register(cls, func=None): """generic_func.register(cls, func) -> func Registers a new implementation for the given *cls* on a *generic_func*. """ if func is None: return lambda f: register(cls, f) registry[cls] = func if ns.cache_token is None and hasattr(cls, '__abstractmethods__'): ns.cache_token = get_cache_token() dispatch_cache.clear() return func def wrapper(*args, **kw): return dispatch(args[0].__class__)(*args, **kw) registry[object] = function wrapper.register = register wrapper.dispatch = dispatch wrapper.registry = MappingProxyType(registry) wrapper._clear_cache = dispatch_cache.clear update_wrapper(wrapper, function) return wrapper
[ "def", "singledispatch", "(", "function", ")", ":", "# noqa", "registry", "=", "{", "}", "dispatch_cache", "=", "WeakKeyDictionary", "(", ")", "def", "ns", "(", ")", ":", "pass", "ns", ".", "cache_token", "=", "None", "# noinspection PyIncorrectDocstring", "def", "dispatch", "(", "cls", ")", ":", "\"\"\"generic_func.dispatch(cls) -> <function implementation>\n\n Runs the dispatch algorithm to return the best available implementation\n for the given *cls* registered on *generic_func*.\n\n \"\"\"", "if", "ns", ".", "cache_token", "is", "not", "None", ":", "current_token", "=", "get_cache_token", "(", ")", "if", "ns", ".", "cache_token", "!=", "current_token", ":", "dispatch_cache", ".", "clear", "(", ")", "ns", ".", "cache_token", "=", "current_token", "try", ":", "impl", "=", "dispatch_cache", "[", "cls", "]", "except", "KeyError", ":", "try", ":", "impl", "=", "registry", "[", "cls", "]", "except", "KeyError", ":", "impl", "=", "_find_impl", "(", "cls", ",", "registry", ")", "dispatch_cache", "[", "cls", "]", "=", "impl", "return", "impl", "# noinspection PyIncorrectDocstring", "def", "register", "(", "cls", ",", "func", "=", "None", ")", ":", "\"\"\"generic_func.register(cls, func) -> func\n\n Registers a new implementation for the given *cls* on a *generic_func*.\n\n \"\"\"", "if", "func", "is", "None", ":", "return", "lambda", "f", ":", "register", "(", "cls", ",", "f", ")", "registry", "[", "cls", "]", "=", "func", "if", "ns", ".", "cache_token", "is", "None", "and", "hasattr", "(", "cls", ",", "'__abstractmethods__'", ")", ":", "ns", ".", "cache_token", "=", "get_cache_token", "(", ")", "dispatch_cache", ".", "clear", "(", ")", "return", "func", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "dispatch", "(", "args", "[", "0", "]", ".", "__class__", ")", "(", "*", "args", ",", "*", "*", "kw", ")", "registry", "[", "object", "]", "=", "function", "wrapper", ".", "register", "=", "register", "wrapper", ".", "dispatch", "=", "dispatch", "wrapper", ".", "registry", "=", "MappingProxyType", "(", "registry", ")", "wrapper", ".", "_clear_cache", "=", "dispatch_cache", ".", "clear", "update_wrapper", "(", "wrapper", ",", "function", ")", "return", "wrapper" ]
Single-dispatch generic function decorator. Transforms a function into a generic function, which can have different behaviours depending upon the type of its first argument. The decorated function acts as the default implementation, and additional implementations can be registered using the register() attribute of the generic function.
[ "Single", "-", "dispatch", "generic", "function", "decorator", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/Util/param_util.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/param_util.py#L4-L19
def cart2polar(x, y, center=np.array([0, 0])): """ transforms cartesian coords [x,y] into polar coords [r,phi] in the frame of the lense center :param coord: set of coordinates :type coord: array of size (n,2) :param center: rotation point :type center: array of size (2) :returns: array of same size with coords [r,phi] :raises: AttributeError, KeyError """ coordShift_x = x - center[0] coordShift_y = y - center[1] r = np.sqrt(coordShift_x**2+coordShift_y**2) phi = np.arctan2(coordShift_y, coordShift_x) return r, phi
[ "def", "cart2polar", "(", "x", ",", "y", ",", "center", "=", "np", ".", "array", "(", "[", "0", ",", "0", "]", ")", ")", ":", "coordShift_x", "=", "x", "-", "center", "[", "0", "]", "coordShift_y", "=", "y", "-", "center", "[", "1", "]", "r", "=", "np", ".", "sqrt", "(", "coordShift_x", "**", "2", "+", "coordShift_y", "**", "2", ")", "phi", "=", "np", ".", "arctan2", "(", "coordShift_y", ",", "coordShift_x", ")", "return", "r", ",", "phi" ]
transforms cartesian coords [x,y] into polar coords [r,phi] in the frame of the lense center :param coord: set of coordinates :type coord: array of size (n,2) :param center: rotation point :type center: array of size (2) :returns: array of same size with coords [r,phi] :raises: AttributeError, KeyError
[ "transforms", "cartesian", "coords", "[", "x", "y", "]", "into", "polar", "coords", "[", "r", "phi", "]", "in", "the", "frame", "of", "the", "lense", "center" ]
python
train
HPCC-Cloud-Computing/CAL
calplus/v1/network/resources/network.py
https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/network/resources/network.py#L85-L100
def get(self, req, driver): """Get info of a network Get info of a specific netowrk with id on special cloud with: :Param req :Type object Request """ response = driver.get_network(req.params, id) data = { 'action': "get", 'controller': "network", 'id': id, 'cloud': req.environ['calplus.cloud'], 'response': response } return data
[ "def", "get", "(", "self", ",", "req", ",", "driver", ")", ":", "response", "=", "driver", ".", "get_network", "(", "req", ".", "params", ",", "id", ")", "data", "=", "{", "'action'", ":", "\"get\"", ",", "'controller'", ":", "\"network\"", ",", "'id'", ":", "id", ",", "'cloud'", ":", "req", ".", "environ", "[", "'calplus.cloud'", "]", ",", "'response'", ":", "response", "}", "return", "data" ]
Get info of a network Get info of a specific netowrk with id on special cloud with: :Param req :Type object Request
[ "Get", "info", "of", "a", "network", "Get", "info", "of", "a", "specific", "netowrk", "with", "id", "on", "special", "cloud", "with", ":", ":", "Param", "req", ":", "Type", "object", "Request" ]
python
train
galaxyproject/pulsar
pulsar/util/pastescript/serve.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/pastescript/serve.py#L963-L974
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2): """ This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443]) """ atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, sleeptime=sleeptime)
[ "def", "ensure_port_cleanup", "(", "bound_addresses", ",", "maxtries", "=", "30", ",", "sleeptime", "=", "2", ")", ":", "atexit", ".", "register", "(", "_cleanup_ports", ",", "bound_addresses", ",", "maxtries", "=", "maxtries", ",", "sleeptime", "=", "sleeptime", ")" ]
This makes sure any open ports are closed. Does this by connecting to them until they give connection refused. Servers should call like:: import paste.script ensure_port_cleanup([80, 443])
[ "This", "makes", "sure", "any", "open", "ports", "are", "closed", "." ]
python
train
deepmind/sonnet
sonnet/python/modules/util.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/util.py#L140-L178
def check_initializers(initializers, keys): """Checks the given initializers. This checks that `initializers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `initializers` are functions or further dictionaries (the latter used, for example, in passing initializers to modules inside modules) that must satisfy the same constraints. Args: initializers: Dictionary of initializers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `initializers`. Returns: Copy of checked dictionary of initializers. If `initializers=None`, an empty dictionary will be returned. Raises: KeyError: If an initializer is provided for a key not in `keys`. TypeError: If a provided initializer is not a callable function, or `initializers` is not a Mapping. """ if initializers is None: return {} _assert_is_dictlike(initializers, valid_keys=keys) keys = set(keys) if not set(initializers) <= keys: extra_keys = set(initializers) - keys raise KeyError( "Invalid initializer keys {}, initializers can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(initializers, "Initializer") return dict(initializers)
[ "def", "check_initializers", "(", "initializers", ",", "keys", ")", ":", "if", "initializers", "is", "None", ":", "return", "{", "}", "_assert_is_dictlike", "(", "initializers", ",", "valid_keys", "=", "keys", ")", "keys", "=", "set", "(", "keys", ")", "if", "not", "set", "(", "initializers", ")", "<=", "keys", ":", "extra_keys", "=", "set", "(", "initializers", ")", "-", "keys", "raise", "KeyError", "(", "\"Invalid initializer keys {}, initializers can only \"", "\"be provided for {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "\"'{}'\"", ".", "format", "(", "key", ")", "for", "key", "in", "extra_keys", ")", ",", "\", \"", ".", "join", "(", "\"'{}'\"", ".", "format", "(", "key", ")", "for", "key", "in", "keys", ")", ")", ")", "_check_nested_callables", "(", "initializers", ",", "\"Initializer\"", ")", "return", "dict", "(", "initializers", ")" ]
Checks the given initializers. This checks that `initializers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `initializers` are functions or further dictionaries (the latter used, for example, in passing initializers to modules inside modules) that must satisfy the same constraints. Args: initializers: Dictionary of initializers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `initializers`. Returns: Copy of checked dictionary of initializers. If `initializers=None`, an empty dictionary will be returned. Raises: KeyError: If an initializer is provided for a key not in `keys`. TypeError: If a provided initializer is not a callable function, or `initializers` is not a Mapping.
[ "Checks", "the", "given", "initializers", "." ]
python
train
GNS3/gns3-server
gns3server/compute/base_node.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/base_node.py#L407-L432
def console(self, console): """ Changes the console port :params console: Console port (integer) or None to free the port """ if console == self._console: return if self._console_type == "vnc" and console is not None and console < 5900: raise NodeError("VNC console require a port superior or equal to 5900 currently it's {}".format(console)) if self._console: self._manager.port_manager.release_tcp_port(self._console, self._project) self._console = None if console is not None: if self.console_type == "vnc": self._console = self._manager.port_manager.reserve_tcp_port(console, self._project, port_range_start=5900, port_range_end=6000) else: self._console = self._manager.port_manager.reserve_tcp_port(console, self._project) log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name, name=self.name, id=self.id, port=console))
[ "def", "console", "(", "self", ",", "console", ")", ":", "if", "console", "==", "self", ".", "_console", ":", "return", "if", "self", ".", "_console_type", "==", "\"vnc\"", "and", "console", "is", "not", "None", "and", "console", "<", "5900", ":", "raise", "NodeError", "(", "\"VNC console require a port superior or equal to 5900 currently it's {}\"", ".", "format", "(", "console", ")", ")", "if", "self", ".", "_console", ":", "self", ".", "_manager", ".", "port_manager", ".", "release_tcp_port", "(", "self", ".", "_console", ",", "self", ".", "_project", ")", "self", ".", "_console", "=", "None", "if", "console", "is", "not", "None", ":", "if", "self", ".", "console_type", "==", "\"vnc\"", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "console", ",", "self", ".", "_project", ",", "port_range_start", "=", "5900", ",", "port_range_end", "=", "6000", ")", "else", ":", "self", ".", "_console", "=", "self", ".", "_manager", ".", "port_manager", ".", "reserve_tcp_port", "(", "console", ",", "self", ".", "_project", ")", "log", ".", "info", "(", "\"{module}: '{name}' [{id}]: console port set to {port}\"", ".", "format", "(", "module", "=", "self", ".", "manager", ".", "module_name", ",", "name", "=", "self", ".", "name", ",", "id", "=", "self", ".", "id", ",", "port", "=", "console", ")", ")" ]
Changes the console port :params console: Console port (integer) or None to free the port
[ "Changes", "the", "console", "port" ]
python
train
bitesofcode/projexui
projexui/widgets/xcalendarwidget/xcalendarscene.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcalendarwidget/xcalendarscene.py#L408-L536
def rebuildDays( self ): """ Rebuilds the interface as a week display. """ time = QTime(0, 0, 0) hour = True x = 6 y = 6 + 24 w = self.width() - 12 - 25 dh = 48 indent = 58 text_data = [] vlines = [] hlines = [QLine(x, y, w, y)] time_grids = [] for i in range(48): if ( hour ): hlines.append(QLine(x, y, w, y)) text_data.append((x, y + 6, indent - 6, dh, Qt.AlignRight | Qt.AlignTop, time.toString('hap'))) else: hlines.append(QLine(x + indent, y, w, y)) time_grids.append((time, y, dh / 2)) # move onto the next line hour = not hour time = time.addSecs(30 * 60) y += dh / 2 hlines.append(QLine(x, y, w, y)) h = y y = 6 + 24 # load the grid vlines.append(QLine(x, y, x, h)) vlines.append(QLine(x + indent, y, x + indent, h)) vlines.append(QLine(w, y, w, h)) today = QDate.currentDate() curr_date = self.currentDate() # load the days if ( self.currentMode() == XCalendarScene.Mode.Week ): date = self.currentDate() day_of_week = date.dayOfWeek() if ( day_of_week == 7 ): day_of_week = 0 min_date = date.addDays(-day_of_week) max_date = date.addDays(6-day_of_week) self._minimumDate = min_date self._maximumDate = max_date dw = (w - (x + indent)) / 7.0 vx = x + indent date = min_date for i in range(7): vlines.append(QLine(vx, y, vx, h)) text_data.append((vx + 6, 6, dw, 24, Qt.AlignCenter, date.toString('ddd MM/dd'))) self._dateGrid[date.toJulianDay()] = ((0, i), QRectF(vx, y, dw, h - y)) # create the date grid for date time options for r, data in enumerate(time_grids): time, ty, th = data dtime = QDateTime(date, time) key = dtime.toTime_t() self._dateTimeGrid[key] = ((r, i), QRectF(vx, ty, dw, th)) if ( date == curr_date ): self._buildData['curr_date'] = QRectF(vx, y, dw, h - 29) elif ( date == today ): self._buildData['today'] = QRectF(vx, y, dw, h - 29) date = date.addDays(1) vx += dw # load a single day else: date = self.currentDate() self._maximumDate = date self._minimumDate = date text_data.append((x + indent, 6, w, 24, Qt.AlignCenter, date.toString('ddd MM/dd'))) self._dateGrid[date.toJulianDay()] = ((0, 0), QRectF(x, y, w - x, h - y)) # create the date grid for date time options for r, data in enumerate(time_grids): time, ty, th = data dtime = QDateTime(date, time) key = dtime.toTime_t() rect = QRectF(x + indent, ty, w - (x + indent), th) self._dateTimeGrid[key] = ((r, 0), rect) self._buildData['grid'] = hlines + vlines self._buildData['regular_text'] = text_data rect = self.sceneRect() rect.setHeight(h + 6) super(XCalendarScene, self).setSceneRect(rect)
[ "def", "rebuildDays", "(", "self", ")", ":", "time", "=", "QTime", "(", "0", ",", "0", ",", "0", ")", "hour", "=", "True", "x", "=", "6", "y", "=", "6", "+", "24", "w", "=", "self", ".", "width", "(", ")", "-", "12", "-", "25", "dh", "=", "48", "indent", "=", "58", "text_data", "=", "[", "]", "vlines", "=", "[", "]", "hlines", "=", "[", "QLine", "(", "x", ",", "y", ",", "w", ",", "y", ")", "]", "time_grids", "=", "[", "]", "for", "i", "in", "range", "(", "48", ")", ":", "if", "(", "hour", ")", ":", "hlines", ".", "append", "(", "QLine", "(", "x", ",", "y", ",", "w", ",", "y", ")", ")", "text_data", ".", "append", "(", "(", "x", ",", "y", "+", "6", ",", "indent", "-", "6", ",", "dh", ",", "Qt", ".", "AlignRight", "|", "Qt", ".", "AlignTop", ",", "time", ".", "toString", "(", "'hap'", ")", ")", ")", "else", ":", "hlines", ".", "append", "(", "QLine", "(", "x", "+", "indent", ",", "y", ",", "w", ",", "y", ")", ")", "time_grids", ".", "append", "(", "(", "time", ",", "y", ",", "dh", "/", "2", ")", ")", "# move onto the next line\r", "hour", "=", "not", "hour", "time", "=", "time", ".", "addSecs", "(", "30", "*", "60", ")", "y", "+=", "dh", "/", "2", "hlines", ".", "append", "(", "QLine", "(", "x", ",", "y", ",", "w", ",", "y", ")", ")", "h", "=", "y", "y", "=", "6", "+", "24", "# load the grid\r", "vlines", ".", "append", "(", "QLine", "(", "x", ",", "y", ",", "x", ",", "h", ")", ")", "vlines", ".", "append", "(", "QLine", "(", "x", "+", "indent", ",", "y", ",", "x", "+", "indent", ",", "h", ")", ")", "vlines", ".", "append", "(", "QLine", "(", "w", ",", "y", ",", "w", ",", "h", ")", ")", "today", "=", "QDate", ".", "currentDate", "(", ")", "curr_date", "=", "self", ".", "currentDate", "(", ")", "# load the days\r", "if", "(", "self", ".", "currentMode", "(", ")", "==", "XCalendarScene", ".", "Mode", ".", "Week", ")", ":", "date", "=", "self", ".", "currentDate", "(", ")", "day_of_week", "=", "date", ".", "dayOfWeek", "(", ")", "if", "(", "day_of_week", "==", "7", ")", ":", "day_of_week", "=", "0", "min_date", "=", "date", ".", "addDays", "(", "-", "day_of_week", ")", "max_date", "=", "date", ".", "addDays", "(", "6", "-", "day_of_week", ")", "self", ".", "_minimumDate", "=", "min_date", "self", ".", "_maximumDate", "=", "max_date", "dw", "=", "(", "w", "-", "(", "x", "+", "indent", ")", ")", "/", "7.0", "vx", "=", "x", "+", "indent", "date", "=", "min_date", "for", "i", "in", "range", "(", "7", ")", ":", "vlines", ".", "append", "(", "QLine", "(", "vx", ",", "y", ",", "vx", ",", "h", ")", ")", "text_data", ".", "append", "(", "(", "vx", "+", "6", ",", "6", ",", "dw", ",", "24", ",", "Qt", ".", "AlignCenter", ",", "date", ".", "toString", "(", "'ddd MM/dd'", ")", ")", ")", "self", ".", "_dateGrid", "[", "date", ".", "toJulianDay", "(", ")", "]", "=", "(", "(", "0", ",", "i", ")", ",", "QRectF", "(", "vx", ",", "y", ",", "dw", ",", "h", "-", "y", ")", ")", "# create the date grid for date time options\r", "for", "r", ",", "data", "in", "enumerate", "(", "time_grids", ")", ":", "time", ",", "ty", ",", "th", "=", "data", "dtime", "=", "QDateTime", "(", "date", ",", "time", ")", "key", "=", "dtime", ".", "toTime_t", "(", ")", "self", ".", "_dateTimeGrid", "[", "key", "]", "=", "(", "(", "r", ",", "i", ")", ",", "QRectF", "(", "vx", ",", "ty", ",", "dw", ",", "th", ")", ")", "if", "(", "date", "==", "curr_date", ")", ":", "self", ".", "_buildData", "[", "'curr_date'", "]", "=", "QRectF", "(", "vx", ",", "y", ",", "dw", ",", "h", "-", "29", ")", "elif", "(", "date", "==", "today", ")", ":", "self", ".", "_buildData", "[", "'today'", "]", "=", "QRectF", "(", "vx", ",", "y", ",", "dw", ",", "h", "-", "29", ")", "date", "=", "date", ".", "addDays", "(", "1", ")", "vx", "+=", "dw", "# load a single day\r", "else", ":", "date", "=", "self", ".", "currentDate", "(", ")", "self", ".", "_maximumDate", "=", "date", "self", ".", "_minimumDate", "=", "date", "text_data", ".", "append", "(", "(", "x", "+", "indent", ",", "6", ",", "w", ",", "24", ",", "Qt", ".", "AlignCenter", ",", "date", ".", "toString", "(", "'ddd MM/dd'", ")", ")", ")", "self", ".", "_dateGrid", "[", "date", ".", "toJulianDay", "(", ")", "]", "=", "(", "(", "0", ",", "0", ")", ",", "QRectF", "(", "x", ",", "y", ",", "w", "-", "x", ",", "h", "-", "y", ")", ")", "# create the date grid for date time options\r", "for", "r", ",", "data", "in", "enumerate", "(", "time_grids", ")", ":", "time", ",", "ty", ",", "th", "=", "data", "dtime", "=", "QDateTime", "(", "date", ",", "time", ")", "key", "=", "dtime", ".", "toTime_t", "(", ")", "rect", "=", "QRectF", "(", "x", "+", "indent", ",", "ty", ",", "w", "-", "(", "x", "+", "indent", ")", ",", "th", ")", "self", ".", "_dateTimeGrid", "[", "key", "]", "=", "(", "(", "r", ",", "0", ")", ",", "rect", ")", "self", ".", "_buildData", "[", "'grid'", "]", "=", "hlines", "+", "vlines", "self", ".", "_buildData", "[", "'regular_text'", "]", "=", "text_data", "rect", "=", "self", ".", "sceneRect", "(", ")", "rect", ".", "setHeight", "(", "h", "+", "6", ")", "super", "(", "XCalendarScene", ",", "self", ")", ".", "setSceneRect", "(", "rect", ")" ]
Rebuilds the interface as a week display.
[ "Rebuilds", "the", "interface", "as", "a", "week", "display", "." ]
python
train
pgmpy/pgmpy
pgmpy/factors/discrete/DiscreteFactor.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/factors/discrete/DiscreteFactor.py#L721-L753
def _str(self, phi_or_p="phi", tablefmt="grid", print_state_names=True): """ Generate the string from `__str__` method. Parameters ---------- phi_or_p: 'phi' | 'p' 'phi': When used for Factors. 'p': When used for CPDs. print_state_names: boolean If True, the user defined state names are displayed. """ string_header = list(map(lambda x: six.text_type(x), self.scope())) string_header.append('{phi_or_p}({variables})'.format(phi_or_p=phi_or_p, variables=','.join(string_header))) value_index = 0 factor_table = [] for prob in product(*[range(card) for card in self.cardinality]): if self.state_names and print_state_names: prob_list = ["{var}({state})".format( var=list(self.variables)[i], state=self.state_names[list( self.variables)[i]][prob[i]]) for i in range(len(self.variables))] else: prob_list = ["{s}_{d}".format(s=list(self.variables)[i], d=prob[i]) for i in range(len(self.variables))] prob_list.append(self.values.ravel()[value_index]) factor_table.append(prob_list) value_index += 1 return tabulate(factor_table, headers=string_header, tablefmt=tablefmt, floatfmt=".4f")
[ "def", "_str", "(", "self", ",", "phi_or_p", "=", "\"phi\"", ",", "tablefmt", "=", "\"grid\"", ",", "print_state_names", "=", "True", ")", ":", "string_header", "=", "list", "(", "map", "(", "lambda", "x", ":", "six", ".", "text_type", "(", "x", ")", ",", "self", ".", "scope", "(", ")", ")", ")", "string_header", ".", "append", "(", "'{phi_or_p}({variables})'", ".", "format", "(", "phi_or_p", "=", "phi_or_p", ",", "variables", "=", "','", ".", "join", "(", "string_header", ")", ")", ")", "value_index", "=", "0", "factor_table", "=", "[", "]", "for", "prob", "in", "product", "(", "*", "[", "range", "(", "card", ")", "for", "card", "in", "self", ".", "cardinality", "]", ")", ":", "if", "self", ".", "state_names", "and", "print_state_names", ":", "prob_list", "=", "[", "\"{var}({state})\"", ".", "format", "(", "var", "=", "list", "(", "self", ".", "variables", ")", "[", "i", "]", ",", "state", "=", "self", ".", "state_names", "[", "list", "(", "self", ".", "variables", ")", "[", "i", "]", "]", "[", "prob", "[", "i", "]", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "variables", ")", ")", "]", "else", ":", "prob_list", "=", "[", "\"{s}_{d}\"", ".", "format", "(", "s", "=", "list", "(", "self", ".", "variables", ")", "[", "i", "]", ",", "d", "=", "prob", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "variables", ")", ")", "]", "prob_list", ".", "append", "(", "self", ".", "values", ".", "ravel", "(", ")", "[", "value_index", "]", ")", "factor_table", ".", "append", "(", "prob_list", ")", "value_index", "+=", "1", "return", "tabulate", "(", "factor_table", ",", "headers", "=", "string_header", ",", "tablefmt", "=", "tablefmt", ",", "floatfmt", "=", "\".4f\"", ")" ]
Generate the string from `__str__` method. Parameters ---------- phi_or_p: 'phi' | 'p' 'phi': When used for Factors. 'p': When used for CPDs. print_state_names: boolean If True, the user defined state names are displayed.
[ "Generate", "the", "string", "from", "__str__", "method", "." ]
python
train
rackerlabs/rackspace-python-neutronclient
neutronclient/shell.py
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/shell.py#L510-L557
def build_option_parser(self, description, version): """Return an argparse option parser for this application. Subclasses may override this method to extend the parser with more global options. :param description: full description of the application :paramtype description: str :param version: version number for the application :paramtype version: str """ parser = argparse.ArgumentParser( description=description, add_help=False, ) parser.add_argument( '--version', action='version', version=__version__, ) parser.add_argument( '-v', '--verbose', '--debug', action='count', dest='verbose_level', default=self.DEFAULT_VERBOSE_LEVEL, help=_('Increase verbosity of output and show tracebacks on' ' errors. You can repeat this option.')) parser.add_argument( '-q', '--quiet', action='store_const', dest='verbose_level', const=0, help=_('Suppress output except warnings and errors.')) parser.add_argument( '-h', '--help', action=HelpAction, nargs=0, default=self, # tricky help=_("Show this help message and exit.")) parser.add_argument( '-r', '--retries', metavar="NUM", type=check_non_negative_int, default=0, help=_("How many times the request to the Neutron server should " "be retried if it fails.")) # FIXME(bklei): this method should come from keystoneauth1 self._append_global_identity_args(parser) return parser
[ "def", "build_option_parser", "(", "self", ",", "description", ",", "version", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "description", ",", "add_help", "=", "False", ",", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "__version__", ",", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbose'", ",", "'--debug'", ",", "action", "=", "'count'", ",", "dest", "=", "'verbose_level'", ",", "default", "=", "self", ".", "DEFAULT_VERBOSE_LEVEL", ",", "help", "=", "_", "(", "'Increase verbosity of output and show tracebacks on'", "' errors. You can repeat this option.'", ")", ")", "parser", ".", "add_argument", "(", "'-q'", ",", "'--quiet'", ",", "action", "=", "'store_const'", ",", "dest", "=", "'verbose_level'", ",", "const", "=", "0", ",", "help", "=", "_", "(", "'Suppress output except warnings and errors.'", ")", ")", "parser", ".", "add_argument", "(", "'-h'", ",", "'--help'", ",", "action", "=", "HelpAction", ",", "nargs", "=", "0", ",", "default", "=", "self", ",", "# tricky", "help", "=", "_", "(", "\"Show this help message and exit.\"", ")", ")", "parser", ".", "add_argument", "(", "'-r'", ",", "'--retries'", ",", "metavar", "=", "\"NUM\"", ",", "type", "=", "check_non_negative_int", ",", "default", "=", "0", ",", "help", "=", "_", "(", "\"How many times the request to the Neutron server should \"", "\"be retried if it fails.\"", ")", ")", "# FIXME(bklei): this method should come from keystoneauth1", "self", ".", "_append_global_identity_args", "(", "parser", ")", "return", "parser" ]
Return an argparse option parser for this application. Subclasses may override this method to extend the parser with more global options. :param description: full description of the application :paramtype description: str :param version: version number for the application :paramtype version: str
[ "Return", "an", "argparse", "option", "parser", "for", "this", "application", "." ]
python
train
Autodesk/aomi
aomi/util.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/util.py#L109-L130
def vault_time_to_s(time_string): """Will convert a time string, as recognized by other Vault tooling, into an integer representation of seconds""" if not time_string or len(time_string) < 2: raise aomi.exceptions \ .AomiData("Invalid timestring %s" % time_string) last_char = time_string[len(time_string) - 1] if last_char == 's': return int(time_string[0:len(time_string) - 1]) elif last_char == 'm': cur = int(time_string[0:len(time_string) - 1]) return cur * 60 elif last_char == 'h': cur = int(time_string[0:len(time_string) - 1]) return cur * 3600 elif last_char == 'd': cur = int(time_string[0:len(time_string) - 1]) return cur * 86400 else: raise aomi.exceptions \ .AomiData("Invalid time scale %s" % last_char)
[ "def", "vault_time_to_s", "(", "time_string", ")", ":", "if", "not", "time_string", "or", "len", "(", "time_string", ")", "<", "2", ":", "raise", "aomi", ".", "exceptions", ".", "AomiData", "(", "\"Invalid timestring %s\"", "%", "time_string", ")", "last_char", "=", "time_string", "[", "len", "(", "time_string", ")", "-", "1", "]", "if", "last_char", "==", "'s'", ":", "return", "int", "(", "time_string", "[", "0", ":", "len", "(", "time_string", ")", "-", "1", "]", ")", "elif", "last_char", "==", "'m'", ":", "cur", "=", "int", "(", "time_string", "[", "0", ":", "len", "(", "time_string", ")", "-", "1", "]", ")", "return", "cur", "*", "60", "elif", "last_char", "==", "'h'", ":", "cur", "=", "int", "(", "time_string", "[", "0", ":", "len", "(", "time_string", ")", "-", "1", "]", ")", "return", "cur", "*", "3600", "elif", "last_char", "==", "'d'", ":", "cur", "=", "int", "(", "time_string", "[", "0", ":", "len", "(", "time_string", ")", "-", "1", "]", ")", "return", "cur", "*", "86400", "else", ":", "raise", "aomi", ".", "exceptions", ".", "AomiData", "(", "\"Invalid time scale %s\"", "%", "last_char", ")" ]
Will convert a time string, as recognized by other Vault tooling, into an integer representation of seconds
[ "Will", "convert", "a", "time", "string", "as", "recognized", "by", "other", "Vault", "tooling", "into", "an", "integer", "representation", "of", "seconds" ]
python
train
fermiPy/fermipy
fermipy/jobs/job_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L519-L523
def _fill_cache(self): """Fill the cache from the `astropy.table.Table`""" for irow in range(len(self._table)): job_details = self.make_job_details(irow) self._cache[job_details.fullkey] = job_details
[ "def", "_fill_cache", "(", "self", ")", ":", "for", "irow", "in", "range", "(", "len", "(", "self", ".", "_table", ")", ")", ":", "job_details", "=", "self", ".", "make_job_details", "(", "irow", ")", "self", ".", "_cache", "[", "job_details", ".", "fullkey", "]", "=", "job_details" ]
Fill the cache from the `astropy.table.Table`
[ "Fill", "the", "cache", "from", "the", "astropy", ".", "table", ".", "Table" ]
python
train
MycroftAI/mycroft-skills-manager
msm/mycroft_skills_manager.py
https://github.com/MycroftAI/mycroft-skills-manager/blob/5acef240de42e8ceae2e82bc7492ffee33288b00/msm/mycroft_skills_manager.py#L163-L168
def write_skills_data(self, data=None): """ Write skills data hash if it has been modified. """ data = data or self.skills_data if skills_data_hash(data) != self.skills_data_hash: write_skills_data(data) self.skills_data_hash = skills_data_hash(data)
[ "def", "write_skills_data", "(", "self", ",", "data", "=", "None", ")", ":", "data", "=", "data", "or", "self", ".", "skills_data", "if", "skills_data_hash", "(", "data", ")", "!=", "self", ".", "skills_data_hash", ":", "write_skills_data", "(", "data", ")", "self", ".", "skills_data_hash", "=", "skills_data_hash", "(", "data", ")" ]
Write skills data hash if it has been modified.
[ "Write", "skills", "data", "hash", "if", "it", "has", "been", "modified", "." ]
python
train
merll/docker-map
dockermap/build/dockerfile.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/build/dockerfile.py#L483-L525
def finalize(self): """ Finalizes the Dockerfile. Before the buffer is practically marked as read-only, the following Dockerfile commands are written: * ``RUN rm -R`` on each files marked for automatic removal; * ``VOLUME`` for shared volumes; * ``USER`` as the default user for following commands; * ``WORKDIR`` as the working directory for following commands; * ``SHELL`` if the default shell is to be changed; * ``ENTRYPOINT`` and ``CMD``, each formatted as a shell or exec command according to :attr:`command_shell`; * ``EXPOSE`` for exposed ports; * ``LABEL``, ``STOPSIGNAL``, and ``HEALTHCHECK`` instructions for the image; An attempt to finalize an already-finalized instance has no effect. """ if self._finalized: return if self._remove_files: for filename in self._remove_files: self.prefix('RUN', 'rm -Rf', filename) self.blank() if self._volumes is not None: self.prefix('VOLUME', json.dumps(self._volumes)) if self._cmd_user: self.prefix('USER', self._cmd_user) if self._cmd_workdir: self.prefix('WORKDIR', self._cmd_workdir) if self._shell: self.prefix('SHELL', self._shell) if self._entrypoint is not None: self.prefix('ENTRYPOINT', format_command(self._entrypoint, self._command_shell)) if self._command is not None: self.prefix('CMD', format_command(self._command, self._command_shell)) if self._expose is not None: self.prefix('EXPOSE', *format_expose(self._expose)) if self._labels: self.prefix('LABEL', *format_labels(self._labels)) if self._stopsignal: self.prefix('STOPSIGNAL', self._stopsignal) if self._healthcheck: self.prefix('HEALTHCHECK', self._healthcheck) super(DockerFile, self).finalize()
[ "def", "finalize", "(", "self", ")", ":", "if", "self", ".", "_finalized", ":", "return", "if", "self", ".", "_remove_files", ":", "for", "filename", "in", "self", ".", "_remove_files", ":", "self", ".", "prefix", "(", "'RUN'", ",", "'rm -Rf'", ",", "filename", ")", "self", ".", "blank", "(", ")", "if", "self", ".", "_volumes", "is", "not", "None", ":", "self", ".", "prefix", "(", "'VOLUME'", ",", "json", ".", "dumps", "(", "self", ".", "_volumes", ")", ")", "if", "self", ".", "_cmd_user", ":", "self", ".", "prefix", "(", "'USER'", ",", "self", ".", "_cmd_user", ")", "if", "self", ".", "_cmd_workdir", ":", "self", ".", "prefix", "(", "'WORKDIR'", ",", "self", ".", "_cmd_workdir", ")", "if", "self", ".", "_shell", ":", "self", ".", "prefix", "(", "'SHELL'", ",", "self", ".", "_shell", ")", "if", "self", ".", "_entrypoint", "is", "not", "None", ":", "self", ".", "prefix", "(", "'ENTRYPOINT'", ",", "format_command", "(", "self", ".", "_entrypoint", ",", "self", ".", "_command_shell", ")", ")", "if", "self", ".", "_command", "is", "not", "None", ":", "self", ".", "prefix", "(", "'CMD'", ",", "format_command", "(", "self", ".", "_command", ",", "self", ".", "_command_shell", ")", ")", "if", "self", ".", "_expose", "is", "not", "None", ":", "self", ".", "prefix", "(", "'EXPOSE'", ",", "*", "format_expose", "(", "self", ".", "_expose", ")", ")", "if", "self", ".", "_labels", ":", "self", ".", "prefix", "(", "'LABEL'", ",", "*", "format_labels", "(", "self", ".", "_labels", ")", ")", "if", "self", ".", "_stopsignal", ":", "self", ".", "prefix", "(", "'STOPSIGNAL'", ",", "self", ".", "_stopsignal", ")", "if", "self", ".", "_healthcheck", ":", "self", ".", "prefix", "(", "'HEALTHCHECK'", ",", "self", ".", "_healthcheck", ")", "super", "(", "DockerFile", ",", "self", ")", ".", "finalize", "(", ")" ]
Finalizes the Dockerfile. Before the buffer is practically marked as read-only, the following Dockerfile commands are written: * ``RUN rm -R`` on each files marked for automatic removal; * ``VOLUME`` for shared volumes; * ``USER`` as the default user for following commands; * ``WORKDIR`` as the working directory for following commands; * ``SHELL`` if the default shell is to be changed; * ``ENTRYPOINT`` and ``CMD``, each formatted as a shell or exec command according to :attr:`command_shell`; * ``EXPOSE`` for exposed ports; * ``LABEL``, ``STOPSIGNAL``, and ``HEALTHCHECK`` instructions for the image; An attempt to finalize an already-finalized instance has no effect.
[ "Finalizes", "the", "Dockerfile", ".", "Before", "the", "buffer", "is", "practically", "marked", "as", "read", "-", "only", "the", "following", "Dockerfile", "commands", "are", "written", ":" ]
python
train
PmagPy/PmagPy
programs/bootams.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/bootams.py#L10-L78
def main(): """ NAME bootams.py DESCRIPTION calculates bootstrap statistics for tensor data SYNTAX bootams.py [-h][command line options] OPTIONS: -h prints help message and quits -f FILE specifies input file name -par specifies parametric bootstrap [by whole data set] -n N specifies the number of bootstrap samples, default is N=1000 INPUT x11 x22 x33 x12 x23 x13 OUTPUT tau_1 tau_1_sigma V1_dec V1_inc V1_eta V1_eta_dec V1_eta_inc V1_zeta V1_zeta_dec V1_zeta_inc tau_2 tau_2_sigma V2_dec V2_inc V2_eta V2_eta_dec V2_eta_inc V2_zeta V2_zeta_dec V2_zeta_inc tau_3 tau_2_sigma V3_dec V3_inc V3_eta V3_eta_dec V3_eta_inc V3_zeta V3_zeta_dec V3_zeta_inc """ # set options ipar,nb=0,5000 if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-f' in sys.argv: ind=sys.argv.index('-f') file=sys.argv[ind+1] Ss=np.loadtxt(file) #f=open(file,'r') #data=f.readlines() if '-par' in sys.argv:ipar=1 if '-n' in sys.argv: ind=sys.argv.index('-n') nb=int(sys.argv[ind+1]) # read in the data print("Doing bootstrap - be patient") #Ss=[] #for line in data: # s=[] # rec=line.split() # for i in range(6): # s.append(float(rec[i])) # Ss.append(s) Tmean,Vmean,Taus,Vs=pmag.s_boot(Ss,ipar=ipar,nb=nb) bpars=pmag.sbootpars(Taus,Vs) # calculate kent parameters for bootstrap bpars["v1_dec"]=Vmean[0][0] bpars["v1_inc"]=Vmean[0][1] bpars["v2_dec"]=Vmean[1][0] bpars["v2_inc"]=Vmean[1][1] bpars["v3_dec"]=Vmean[2][0] bpars["v3_inc"]=Vmean[2][1] bpars["t1"]=Tmean[0] bpars["t2"]=Tmean[1] bpars["t3"]=Tmean[2] print(""" tau tau_sigma V_dec V_inc V_eta V_eta_dec V_eta_inc V_zeta V_zeta_dec V_zeta_inc """) outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(bpars["t1"],bpars["t1_sigma"],bpars["v1_dec"],bpars["v1_inc"],bpars["v1_zeta"],bpars["v1_zeta_dec"],bpars["v1_zeta_inc"],bpars["v1_eta"],bpars["v1_eta_dec"],bpars["v1_eta_inc"]) print(outstring) outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(bpars["t2"],bpars["t2_sigma"],bpars["v2_dec"],bpars["v2_inc"],bpars["v2_zeta"],bpars["v2_zeta_dec"],bpars["v2_zeta_inc"],bpars["v2_eta"],bpars["v2_eta_dec"],bpars["v2_eta_inc"]) print(outstring) outstring='%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'%(bpars["t3"],bpars["t3_sigma"],bpars["v3_dec"],bpars["v3_inc"],bpars["v3_zeta"],bpars["v3_zeta_dec"],bpars["v3_zeta_inc"],bpars["v3_eta"],bpars["v3_eta_dec"],bpars["v3_eta_inc"]) print(outstring)
[ "def", "main", "(", ")", ":", "# set options", "ipar", ",", "nb", "=", "0", ",", "5000", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-f'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "file", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "Ss", "=", "np", ".", "loadtxt", "(", "file", ")", "#f=open(file,'r')", "#data=f.readlines()", "if", "'-par'", "in", "sys", ".", "argv", ":", "ipar", "=", "1", "if", "'-n'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-n'", ")", "nb", "=", "int", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "# read in the data", "print", "(", "\"Doing bootstrap - be patient\"", ")", "#Ss=[]", "#for line in data:", "# s=[]", "# rec=line.split()", "# for i in range(6):", "# s.append(float(rec[i]))", "# Ss.append(s)", "Tmean", ",", "Vmean", ",", "Taus", ",", "Vs", "=", "pmag", ".", "s_boot", "(", "Ss", ",", "ipar", "=", "ipar", ",", "nb", "=", "nb", ")", "bpars", "=", "pmag", ".", "sbootpars", "(", "Taus", ",", "Vs", ")", "# calculate kent parameters for bootstrap", "bpars", "[", "\"v1_dec\"", "]", "=", "Vmean", "[", "0", "]", "[", "0", "]", "bpars", "[", "\"v1_inc\"", "]", "=", "Vmean", "[", "0", "]", "[", "1", "]", "bpars", "[", "\"v2_dec\"", "]", "=", "Vmean", "[", "1", "]", "[", "0", "]", "bpars", "[", "\"v2_inc\"", "]", "=", "Vmean", "[", "1", "]", "[", "1", "]", "bpars", "[", "\"v3_dec\"", "]", "=", "Vmean", "[", "2", "]", "[", "0", "]", "bpars", "[", "\"v3_inc\"", "]", "=", "Vmean", "[", "2", "]", "[", "1", "]", "bpars", "[", "\"t1\"", "]", "=", "Tmean", "[", "0", "]", "bpars", "[", "\"t2\"", "]", "=", "Tmean", "[", "1", "]", "bpars", "[", "\"t3\"", "]", "=", "Tmean", "[", "2", "]", "print", "(", "\"\"\"\ntau tau_sigma V_dec V_inc V_eta V_eta_dec V_eta_inc V_zeta V_zeta_dec V_zeta_inc\n\"\"\"", ")", "outstring", "=", "'%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", "%", "(", "bpars", "[", "\"t1\"", "]", ",", "bpars", "[", "\"t1_sigma\"", "]", ",", "bpars", "[", "\"v1_dec\"", "]", ",", "bpars", "[", "\"v1_inc\"", "]", ",", "bpars", "[", "\"v1_zeta\"", "]", ",", "bpars", "[", "\"v1_zeta_dec\"", "]", ",", "bpars", "[", "\"v1_zeta_inc\"", "]", ",", "bpars", "[", "\"v1_eta\"", "]", ",", "bpars", "[", "\"v1_eta_dec\"", "]", ",", "bpars", "[", "\"v1_eta_inc\"", "]", ")", "print", "(", "outstring", ")", "outstring", "=", "'%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", "%", "(", "bpars", "[", "\"t2\"", "]", ",", "bpars", "[", "\"t2_sigma\"", "]", ",", "bpars", "[", "\"v2_dec\"", "]", ",", "bpars", "[", "\"v2_inc\"", "]", ",", "bpars", "[", "\"v2_zeta\"", "]", ",", "bpars", "[", "\"v2_zeta_dec\"", "]", ",", "bpars", "[", "\"v2_zeta_inc\"", "]", ",", "bpars", "[", "\"v2_eta\"", "]", ",", "bpars", "[", "\"v2_eta_dec\"", "]", ",", "bpars", "[", "\"v2_eta_inc\"", "]", ")", "print", "(", "outstring", ")", "outstring", "=", "'%7.5f %7.5f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f %7.1f'", "%", "(", "bpars", "[", "\"t3\"", "]", ",", "bpars", "[", "\"t3_sigma\"", "]", ",", "bpars", "[", "\"v3_dec\"", "]", ",", "bpars", "[", "\"v3_inc\"", "]", ",", "bpars", "[", "\"v3_zeta\"", "]", ",", "bpars", "[", "\"v3_zeta_dec\"", "]", ",", "bpars", "[", "\"v3_zeta_inc\"", "]", ",", "bpars", "[", "\"v3_eta\"", "]", ",", "bpars", "[", "\"v3_eta_dec\"", "]", ",", "bpars", "[", "\"v3_eta_inc\"", "]", ")", "print", "(", "outstring", ")" ]
NAME bootams.py DESCRIPTION calculates bootstrap statistics for tensor data SYNTAX bootams.py [-h][command line options] OPTIONS: -h prints help message and quits -f FILE specifies input file name -par specifies parametric bootstrap [by whole data set] -n N specifies the number of bootstrap samples, default is N=1000 INPUT x11 x22 x33 x12 x23 x13 OUTPUT tau_1 tau_1_sigma V1_dec V1_inc V1_eta V1_eta_dec V1_eta_inc V1_zeta V1_zeta_dec V1_zeta_inc tau_2 tau_2_sigma V2_dec V2_inc V2_eta V2_eta_dec V2_eta_inc V2_zeta V2_zeta_dec V2_zeta_inc tau_3 tau_2_sigma V3_dec V3_inc V3_eta V3_eta_dec V3_eta_inc V3_zeta V3_zeta_dec V3_zeta_inc
[ "NAME", "bootams", ".", "py" ]
python
train
whyscream/dspam-milter
dspam/client.py
https://github.com/whyscream/dspam-milter/blob/f9939b718eed02cb7e56f8c5b921db4cfe1cd85a/dspam/client.py#L92-L109
def _send(self, line): """ Write a line of data to the server. Args: line -- A single line of data to write to the socket. """ if not line.endswith('\r\n'): if line.endswith('\n'): logger.debug('Fixing bare LF before sending data to socket') line = line[0:-1] + '\r\n' else: logger.debug( 'Fixing missing CRLF before sending data to socket') line = line + '\r\n' logger.debug('Client sent: ' + line.rstrip()) self._socket.send(line)
[ "def", "_send", "(", "self", ",", "line", ")", ":", "if", "not", "line", ".", "endswith", "(", "'\\r\\n'", ")", ":", "if", "line", ".", "endswith", "(", "'\\n'", ")", ":", "logger", ".", "debug", "(", "'Fixing bare LF before sending data to socket'", ")", "line", "=", "line", "[", "0", ":", "-", "1", "]", "+", "'\\r\\n'", "else", ":", "logger", ".", "debug", "(", "'Fixing missing CRLF before sending data to socket'", ")", "line", "=", "line", "+", "'\\r\\n'", "logger", ".", "debug", "(", "'Client sent: '", "+", "line", ".", "rstrip", "(", ")", ")", "self", ".", "_socket", ".", "send", "(", "line", ")" ]
Write a line of data to the server. Args: line -- A single line of data to write to the socket.
[ "Write", "a", "line", "of", "data", "to", "the", "server", "." ]
python
train
nicolargo/glances
glances/plugins/glances_plugin.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_plugin.py#L238-L260
def get_stats_history(self, item=None, nb=0): """Return the stats history (JSON format).""" s = self.get_json_history(nb=nb) if item is None: return self._json_dumps(s) if isinstance(s, dict): try: return self._json_dumps({item: s[item]}) except KeyError as e: logger.error("Cannot get item history {} ({})".format(item, e)) return None elif isinstance(s, list): try: # Source: # http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list return self._json_dumps({item: map(itemgetter(item), s)}) except (KeyError, ValueError) as e: logger.error("Cannot get item history {} ({})".format(item, e)) return None else: return None
[ "def", "get_stats_history", "(", "self", ",", "item", "=", "None", ",", "nb", "=", "0", ")", ":", "s", "=", "self", ".", "get_json_history", "(", "nb", "=", "nb", ")", "if", "item", "is", "None", ":", "return", "self", ".", "_json_dumps", "(", "s", ")", "if", "isinstance", "(", "s", ",", "dict", ")", ":", "try", ":", "return", "self", ".", "_json_dumps", "(", "{", "item", ":", "s", "[", "item", "]", "}", ")", "except", "KeyError", "as", "e", ":", "logger", ".", "error", "(", "\"Cannot get item history {} ({})\"", ".", "format", "(", "item", ",", "e", ")", ")", "return", "None", "elif", "isinstance", "(", "s", ",", "list", ")", ":", "try", ":", "# Source:", "# http://stackoverflow.com/questions/4573875/python-get-index-of-dictionary-item-in-list", "return", "self", ".", "_json_dumps", "(", "{", "item", ":", "map", "(", "itemgetter", "(", "item", ")", ",", "s", ")", "}", ")", "except", "(", "KeyError", ",", "ValueError", ")", "as", "e", ":", "logger", ".", "error", "(", "\"Cannot get item history {} ({})\"", ".", "format", "(", "item", ",", "e", ")", ")", "return", "None", "else", ":", "return", "None" ]
Return the stats history (JSON format).
[ "Return", "the", "stats", "history", "(", "JSON", "format", ")", "." ]
python
train
mrstephenneal/mysql-toolkit
mysql/toolkit/components/operations/alter.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/operations/alter.py#L6-L19
def rename(self, old_table, new_table): """ Rename a table. You must have ALTER and DROP privileges for the original table, and CREATE and INSERT privileges for the new table. """ try: command = 'RENAME TABLE {0} TO {1}'.format(wrap(old_table), wrap(new_table)) except: command = 'ALTER TABLE {0} RENAME {1}'.format(wrap(old_table), wrap(new_table)) self.execute(command) self._printer('Renamed {0} to {1}'.format(wrap(old_table), wrap(new_table))) return old_table, new_table
[ "def", "rename", "(", "self", ",", "old_table", ",", "new_table", ")", ":", "try", ":", "command", "=", "'RENAME TABLE {0} TO {1}'", ".", "format", "(", "wrap", "(", "old_table", ")", ",", "wrap", "(", "new_table", ")", ")", "except", ":", "command", "=", "'ALTER TABLE {0} RENAME {1}'", ".", "format", "(", "wrap", "(", "old_table", ")", ",", "wrap", "(", "new_table", ")", ")", "self", ".", "execute", "(", "command", ")", "self", ".", "_printer", "(", "'Renamed {0} to {1}'", ".", "format", "(", "wrap", "(", "old_table", ")", ",", "wrap", "(", "new_table", ")", ")", ")", "return", "old_table", ",", "new_table" ]
Rename a table. You must have ALTER and DROP privileges for the original table, and CREATE and INSERT privileges for the new table.
[ "Rename", "a", "table", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/logs.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/logs.py#L140-L176
def get_colour_handler(extranames: List[str] = None, with_process_id: bool = False, with_thread_id: bool = False, stream: TextIO = None) -> logging.StreamHandler: """ Gets a colour log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler` """ fmt = "%(white)s%(asctime)s.%(msecs)03d" # this is dim white = grey if with_process_id or with_thread_id: procinfo = [] # type: List[str] if with_process_id: procinfo.append("p%(process)d") if with_thread_id: procinfo.append("t%(thread)d") fmt += " [{}]".format(".".join(procinfo)) extras = ":" + ":".join(extranames) if extranames else "" fmt += " %(name)s{extras}:%(levelname)s: ".format(extras=extras) fmt += "%(reset)s%(log_color)s%(message)s" cf = ColoredFormatter(fmt, datefmt=LOG_DATEFMT, reset=True, log_colors=LOG_COLORS, secondary_log_colors={}, style='%') ch = logging.StreamHandler(stream) ch.setFormatter(cf) return ch
[ "def", "get_colour_handler", "(", "extranames", ":", "List", "[", "str", "]", "=", "None", ",", "with_process_id", ":", "bool", "=", "False", ",", "with_thread_id", ":", "bool", "=", "False", ",", "stream", ":", "TextIO", "=", "None", ")", "->", "logging", ".", "StreamHandler", ":", "fmt", "=", "\"%(white)s%(asctime)s.%(msecs)03d\"", "# this is dim white = grey", "if", "with_process_id", "or", "with_thread_id", ":", "procinfo", "=", "[", "]", "# type: List[str]", "if", "with_process_id", ":", "procinfo", ".", "append", "(", "\"p%(process)d\"", ")", "if", "with_thread_id", ":", "procinfo", ".", "append", "(", "\"t%(thread)d\"", ")", "fmt", "+=", "\" [{}]\"", ".", "format", "(", "\".\"", ".", "join", "(", "procinfo", ")", ")", "extras", "=", "\":\"", "+", "\":\"", ".", "join", "(", "extranames", ")", "if", "extranames", "else", "\"\"", "fmt", "+=", "\" %(name)s{extras}:%(levelname)s: \"", ".", "format", "(", "extras", "=", "extras", ")", "fmt", "+=", "\"%(reset)s%(log_color)s%(message)s\"", "cf", "=", "ColoredFormatter", "(", "fmt", ",", "datefmt", "=", "LOG_DATEFMT", ",", "reset", "=", "True", ",", "log_colors", "=", "LOG_COLORS", ",", "secondary_log_colors", "=", "{", "}", ",", "style", "=", "'%'", ")", "ch", "=", "logging", ".", "StreamHandler", "(", "stream", ")", "ch", ".", "setFormatter", "(", "cf", ")", "return", "ch" ]
Gets a colour log handler using a standard format. Args: extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? stream: ``TextIO`` stream to send log output to Returns: the :class:`logging.StreamHandler`
[ "Gets", "a", "colour", "log", "handler", "using", "a", "standard", "format", "." ]
python
train
mitsei/dlkit
dlkit/json_/logging_/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/logging_/sessions.py#L346-L387
def get_log_entries_by_ids(self, log_entry_ids): """Gets a ``LogEntryList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the entries specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible logentries may be omitted from the list and may present the elements in any order including returning a unique set. arg: log_entry_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.logging.LogEntryList) - the returned ``LogEntry list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``log_entry_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources_by_ids # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('logging', collection='LogEntry', runtime=self._runtime) object_id_list = [] for i in log_entry_ids: object_id_list.append(ObjectId(self._get_id(i, 'logging').get_identifier())) result = collection.find( dict({'_id': {'$in': object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map['_id'] == object_id: sorted_result.append(object_map) break return objects.LogEntryList(sorted_result, runtime=self._runtime, proxy=self._proxy)
[ "def", "get_log_entries_by_ids", "(", "self", ",", "log_entry_ids", ")", ":", "# Implemented from template for", "# osid.resource.ResourceLookupSession.get_resources_by_ids", "# NOTE: This implementation currently ignores plenary view", "collection", "=", "JSONClientValidated", "(", "'logging'", ",", "collection", "=", "'LogEntry'", ",", "runtime", "=", "self", ".", "_runtime", ")", "object_id_list", "=", "[", "]", "for", "i", "in", "log_entry_ids", ":", "object_id_list", ".", "append", "(", "ObjectId", "(", "self", ".", "_get_id", "(", "i", ",", "'logging'", ")", ".", "get_identifier", "(", ")", ")", ")", "result", "=", "collection", ".", "find", "(", "dict", "(", "{", "'_id'", ":", "{", "'$in'", ":", "object_id_list", "}", "}", ",", "*", "*", "self", ".", "_view_filter", "(", ")", ")", ")", "result", "=", "list", "(", "result", ")", "sorted_result", "=", "[", "]", "for", "object_id", "in", "object_id_list", ":", "for", "object_map", "in", "result", ":", "if", "object_map", "[", "'_id'", "]", "==", "object_id", ":", "sorted_result", ".", "append", "(", "object_map", ")", "break", "return", "objects", ".", "LogEntryList", "(", "sorted_result", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets a ``LogEntryList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the entries specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible logentries may be omitted from the list and may present the elements in any order including returning a unique set. arg: log_entry_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.logging.LogEntryList) - the returned ``LogEntry list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``log_entry_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "a", "LogEntryList", "corresponding", "to", "the", "given", "IdList", "." ]
python
train
brianhie/scanorama
scanorama/t_sne_approx.py
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L71-L124
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors. """ t0 = time() # Compute conditional probabilities such that they approximately match # the desired perplexity n_samples, k = neighbors.shape distances = distances.astype(np.float32, copy=False) neighbors = neighbors.astype(np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) assert np.all(np.isfinite(conditional_P)), \ "All probabilities should be finite" # Symmetrize the joint probability distribution using sparse operations P = csr_matrix((conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)), shape=(n_samples, n_samples)) P = P + P.T # Normalize the joint probability distribution sum_P = np.maximum(P.sum(), MACHINE_EPSILON) P /= sum_P assert np.all(np.abs(P.data) <= 1.0) if verbose >= 2: duration = time() - t0 print("[t-SNE] Computed conditional probabilities in {:.3f}s" .format(duration)) return P
[ "def", "_joint_probabilities_nn", "(", "distances", ",", "neighbors", ",", "desired_perplexity", ",", "verbose", ")", ":", "t0", "=", "time", "(", ")", "# Compute conditional probabilities such that they approximately match", "# the desired perplexity", "n_samples", ",", "k", "=", "neighbors", ".", "shape", "distances", "=", "distances", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", "neighbors", "=", "neighbors", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")", "conditional_P", "=", "_utils", ".", "_binary_search_perplexity", "(", "distances", ",", "neighbors", ",", "desired_perplexity", ",", "verbose", ")", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "conditional_P", ")", ")", ",", "\"All probabilities should be finite\"", "# Symmetrize the joint probability distribution using sparse operations", "P", "=", "csr_matrix", "(", "(", "conditional_P", ".", "ravel", "(", ")", ",", "neighbors", ".", "ravel", "(", ")", ",", "range", "(", "0", ",", "n_samples", "*", "k", "+", "1", ",", "k", ")", ")", ",", "shape", "=", "(", "n_samples", ",", "n_samples", ")", ")", "P", "=", "P", "+", "P", ".", "T", "# Normalize the joint probability distribution", "sum_P", "=", "np", ".", "maximum", "(", "P", ".", "sum", "(", ")", ",", "MACHINE_EPSILON", ")", "P", "/=", "sum_P", "assert", "np", ".", "all", "(", "np", ".", "abs", "(", "P", ".", "data", ")", "<=", "1.0", ")", "if", "verbose", ">=", "2", ":", "duration", "=", "time", "(", ")", "-", "t0", "print", "(", "\"[t-SNE] Computed conditional probabilities in {:.3f}s\"", ".", "format", "(", "duration", ")", ")", "return", "P" ]
Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors.
[ "Compute", "joint", "probabilities", "p_ij", "from", "distances", "using", "just", "nearest", "neighbors", "." ]
python
train
fastai/fastai
fastai/gen_doc/nbdoc.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/gen_doc/nbdoc.py#L230-L239
def get_inner_fts(elt)->List[str]: "List the inner functions of a class." fts = [] for ft_name in elt.__dict__.keys(): if ft_name.startswith('_'): continue ft = getattr(elt, ft_name) if inspect.isfunction(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.ismethod(ft): fts.append(f'{elt.__name__}.{ft_name}') if inspect.isclass(ft): fts += [f'{elt.__name__}.{n}' for n in get_inner_fts(ft)] return fts
[ "def", "get_inner_fts", "(", "elt", ")", "->", "List", "[", "str", "]", ":", "fts", "=", "[", "]", "for", "ft_name", "in", "elt", ".", "__dict__", ".", "keys", "(", ")", ":", "if", "ft_name", ".", "startswith", "(", "'_'", ")", ":", "continue", "ft", "=", "getattr", "(", "elt", ",", "ft_name", ")", "if", "inspect", ".", "isfunction", "(", "ft", ")", ":", "fts", ".", "append", "(", "f'{elt.__name__}.{ft_name}'", ")", "if", "inspect", ".", "ismethod", "(", "ft", ")", ":", "fts", ".", "append", "(", "f'{elt.__name__}.{ft_name}'", ")", "if", "inspect", ".", "isclass", "(", "ft", ")", ":", "fts", "+=", "[", "f'{elt.__name__}.{n}'", "for", "n", "in", "get_inner_fts", "(", "ft", ")", "]", "return", "fts" ]
List the inner functions of a class.
[ "List", "the", "inner", "functions", "of", "a", "class", "." ]
python
train
fakedrake/overlay_parse
overlay_parse/overlays.py
https://github.com/fakedrake/overlay_parse/blob/9ac362d6aef1ea41aff7375af088c6ebef93d0cd/overlay_parse/overlays.py#L193-L198
def get_overlays(self, **kw): """ See Overlay.match() for arguments. """ return [o for o in self.overlays if o.match(**kw)]
[ "def", "get_overlays", "(", "self", ",", "*", "*", "kw", ")", ":", "return", "[", "o", "for", "o", "in", "self", ".", "overlays", "if", "o", ".", "match", "(", "*", "*", "kw", ")", "]" ]
See Overlay.match() for arguments.
[ "See", "Overlay", ".", "match", "()", "for", "arguments", "." ]
python
train
jorahn/icy
icy/icy.py
https://github.com/jorahn/icy/blob/d0bd765c933b2d9bff4d7d646c0938348b9c5c25/icy/icy.py#L266-L362
def read(path, cfg={}, raise_on_error=False, silent=False, verbose=False, return_errors=False): """Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources Parameters ---------- path : str Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`). Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://. Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available. Parser will be selected based on file extension. cfg : dict or str, optional Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output) or str with path to YAML, that will be parsed. Special keys: **filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv']) **default** : kwargs to be used for every file **custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs. silent : boolean, optional If True, doesn't print to stdout. verbose : boolean, optional If True, prints parsing arguments for each file processed to stdout. raise_on_error : boolean, optional Raise exception or only display warning, if a file cannot be parsed successfully. return_errors : boolean, optional If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed. Returns ------- data : dict Dictionary of parsed pandas.DataFrames, with file names as keys. Notes ----- - Start with basic cfg and tune until the desired parsing result is achieved. - File extensions are critical to determine the parser, make sure they are *common*. - Avoid files named 'default' or 'filters'. - Avoid duplicate file names. - Subfolders and file names beginning with '.' or '_' are ignored. - If an https:// URI isn't correctly processed, try http:// instead. - To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment. """ if type(cfg) == str: cfg = os.path.abspath(os.path.expanduser(cfg)) yml = _read_yaml(cfg) if yml == None: if not silent: print('creating read.yml config file draft ...') cfg = {'filters': ['.csv'], 'default': {'sep': ',', 'parse_dates': []}} with open('local/read.yml', 'xt') as f: yaml.dump(cfg, f) yml = _read_yaml('local/read.yml') if 'filters' in yml: filters = yml['filters'] if type(filters) == str: filters = [filters] del yml['filters'] else: filters = [] cfg = yml data = {} errors = [] if not silent: print('processing', path, '...') for f in _path_to_objs(path): if type(f) == str: fname = os.path.basename(f) elif type(f) == zipfile.ZipExtFile: fname = f.name else: raise RuntimeError('_path_to_objs() returned unknown type', f) data, errors = _read_append(data=data, errors=errors, path=f, fname=fname, \ cfg=cfg, raise_on_error=raise_on_error, silent=silent, verbose=verbose) if raise_on_error and data == {}: raise IOError('path is invalid or empty') if not silent: print('imported {} DataFrames'.format(len(data))) if len(data) > 0: print('total memory usage: {}'.format(mem(data))) if len(errors) > 0: print('import errors in files: {}'.format(', '.join(errors))) if return_errors: return data, errors else: return data
[ "def", "read", "(", "path", ",", "cfg", "=", "{", "}", ",", "raise_on_error", "=", "False", ",", "silent", "=", "False", ",", "verbose", "=", "False", ",", "return_errors", "=", "False", ")", ":", "if", "type", "(", "cfg", ")", "==", "str", ":", "cfg", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "cfg", ")", ")", "yml", "=", "_read_yaml", "(", "cfg", ")", "if", "yml", "==", "None", ":", "if", "not", "silent", ":", "print", "(", "'creating read.yml config file draft ...'", ")", "cfg", "=", "{", "'filters'", ":", "[", "'.csv'", "]", ",", "'default'", ":", "{", "'sep'", ":", "','", ",", "'parse_dates'", ":", "[", "]", "}", "}", "with", "open", "(", "'local/read.yml'", ",", "'xt'", ")", "as", "f", ":", "yaml", ".", "dump", "(", "cfg", ",", "f", ")", "yml", "=", "_read_yaml", "(", "'local/read.yml'", ")", "if", "'filters'", "in", "yml", ":", "filters", "=", "yml", "[", "'filters'", "]", "if", "type", "(", "filters", ")", "==", "str", ":", "filters", "=", "[", "filters", "]", "del", "yml", "[", "'filters'", "]", "else", ":", "filters", "=", "[", "]", "cfg", "=", "yml", "data", "=", "{", "}", "errors", "=", "[", "]", "if", "not", "silent", ":", "print", "(", "'processing'", ",", "path", ",", "'...'", ")", "for", "f", "in", "_path_to_objs", "(", "path", ")", ":", "if", "type", "(", "f", ")", "==", "str", ":", "fname", "=", "os", ".", "path", ".", "basename", "(", "f", ")", "elif", "type", "(", "f", ")", "==", "zipfile", ".", "ZipExtFile", ":", "fname", "=", "f", ".", "name", "else", ":", "raise", "RuntimeError", "(", "'_path_to_objs() returned unknown type'", ",", "f", ")", "data", ",", "errors", "=", "_read_append", "(", "data", "=", "data", ",", "errors", "=", "errors", ",", "path", "=", "f", ",", "fname", "=", "fname", ",", "cfg", "=", "cfg", ",", "raise_on_error", "=", "raise_on_error", ",", "silent", "=", "silent", ",", "verbose", "=", "verbose", ")", "if", "raise_on_error", "and", "data", "==", "{", "}", ":", "raise", "IOError", "(", "'path is invalid or empty'", ")", "if", "not", "silent", ":", "print", "(", "'imported {} DataFrames'", ".", "format", "(", "len", "(", "data", ")", ")", ")", "if", "len", "(", "data", ")", ">", "0", ":", "print", "(", "'total memory usage: {}'", ".", "format", "(", "mem", "(", "data", ")", ")", ")", "if", "len", "(", "errors", ")", ">", "0", ":", "print", "(", "'import errors in files: {}'", ".", "format", "(", "', '", ".", "join", "(", "errors", ")", ")", ")", "if", "return_errors", ":", "return", "data", ",", "errors", "else", ":", "return", "data" ]
Wraps pandas.IO & odo to create a dictionary of pandas.DataFrames from multiple different sources Parameters ---------- path : str Location of file, folder or zip-file to be parsed. Can include globbing (e.g. `*.csv`). Can be remote with URI-notation beginning with e.g. http://, https://, file://, ftp://, s3:// and ssh://. Can be odo-supported database (SQL, MongoDB, Hadoop, Spark) if dependencies are available. Parser will be selected based on file extension. cfg : dict or str, optional Dictionary of kwargs to be provided to the pandas parser (http://pandas.pydata.org/pandas-docs/stable/api.html#input-output) or str with path to YAML, that will be parsed. Special keys: **filters** : str or list of strings, optional. For a file to be processed, it must contain one of the Strings (e.g. ['.csv', '.tsv']) **default** : kwargs to be used for every file **custom_date_parser** : strptime-format string (https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior), generates a parser that used as the *date_parser* argument If filename in keys, use kwargs from that key in addition to or overwriting *default* kwargs. silent : boolean, optional If True, doesn't print to stdout. verbose : boolean, optional If True, prints parsing arguments for each file processed to stdout. raise_on_error : boolean, optional Raise exception or only display warning, if a file cannot be parsed successfully. return_errors : boolean, optional If True, read() returns (data, errors) tuple instead of only data, with errors as a list of all files that could not be parsed. Returns ------- data : dict Dictionary of parsed pandas.DataFrames, with file names as keys. Notes ----- - Start with basic cfg and tune until the desired parsing result is achieved. - File extensions are critical to determine the parser, make sure they are *common*. - Avoid files named 'default' or 'filters'. - Avoid duplicate file names. - Subfolders and file names beginning with '.' or '_' are ignored. - If an https:// URI isn't correctly processed, try http:// instead. - To connect to a database or s3-bucket, make sure the required dependencies like sqlalchemy, pymongo, pyspark or boto are available in the active environment.
[ "Wraps", "pandas", ".", "IO", "&", "odo", "to", "create", "a", "dictionary", "of", "pandas", ".", "DataFrames", "from", "multiple", "different", "sources", "Parameters", "----------", "path", ":", "str", "Location", "of", "file", "folder", "or", "zip", "-", "file", "to", "be", "parsed", ".", "Can", "include", "globbing", "(", "e", ".", "g", ".", "*", ".", "csv", ")", ".", "Can", "be", "remote", "with", "URI", "-", "notation", "beginning", "with", "e", ".", "g", ".", "http", ":", "//", "https", ":", "//", "file", ":", "//", "ftp", ":", "//", "s3", ":", "//", "and", "ssh", ":", "//", ".", "Can", "be", "odo", "-", "supported", "database", "(", "SQL", "MongoDB", "Hadoop", "Spark", ")", "if", "dependencies", "are", "available", ".", "Parser", "will", "be", "selected", "based", "on", "file", "extension", ".", "cfg", ":", "dict", "or", "str", "optional", "Dictionary", "of", "kwargs", "to", "be", "provided", "to", "the", "pandas", "parser", "(", "http", ":", "//", "pandas", ".", "pydata", ".", "org", "/", "pandas", "-", "docs", "/", "stable", "/", "api", ".", "html#input", "-", "output", ")", "or", "str", "with", "path", "to", "YAML", "that", "will", "be", "parsed", ".", "Special", "keys", ":", "**", "filters", "**", ":", "str", "or", "list", "of", "strings", "optional", ".", "For", "a", "file", "to", "be", "processed", "it", "must", "contain", "one", "of", "the", "Strings", "(", "e", ".", "g", ".", "[", ".", "csv", ".", "tsv", "]", ")", "**", "default", "**", ":", "kwargs", "to", "be", "used", "for", "every", "file", "**", "custom_date_parser", "**", ":", "strptime", "-", "format", "string", "(", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "library", "/", "datetime", ".", "html#strftime", "-", "strptime", "-", "behavior", ")", "generates", "a", "parser", "that", "used", "as", "the", "*", "date_parser", "*", "argument", "If", "filename", "in", "keys", "use", "kwargs", "from", "that", "key", "in", "addition", "to", "or", "overwriting", "*", "default", "*", "kwargs", ".", "silent", ":", "boolean", "optional", "If", "True", "doesn", "t", "print", "to", "stdout", ".", "verbose", ":", "boolean", "optional", "If", "True", "prints", "parsing", "arguments", "for", "each", "file", "processed", "to", "stdout", ".", "raise_on_error", ":", "boolean", "optional", "Raise", "exception", "or", "only", "display", "warning", "if", "a", "file", "cannot", "be", "parsed", "successfully", ".", "return_errors", ":", "boolean", "optional", "If", "True", "read", "()", "returns", "(", "data", "errors", ")", "tuple", "instead", "of", "only", "data", "with", "errors", "as", "a", "list", "of", "all", "files", "that", "could", "not", "be", "parsed", ".", "Returns", "-------", "data", ":", "dict", "Dictionary", "of", "parsed", "pandas", ".", "DataFrames", "with", "file", "names", "as", "keys", ".", "Notes", "-----", "-", "Start", "with", "basic", "cfg", "and", "tune", "until", "the", "desired", "parsing", "result", "is", "achieved", ".", "-", "File", "extensions", "are", "critical", "to", "determine", "the", "parser", "make", "sure", "they", "are", "*", "common", "*", ".", "-", "Avoid", "files", "named", "default", "or", "filters", ".", "-", "Avoid", "duplicate", "file", "names", ".", "-", "Subfolders", "and", "file", "names", "beginning", "with", ".", "or", "_", "are", "ignored", ".", "-", "If", "an", "https", ":", "//", "URI", "isn", "t", "correctly", "processed", "try", "http", ":", "//", "instead", ".", "-", "To", "connect", "to", "a", "database", "or", "s3", "-", "bucket", "make", "sure", "the", "required", "dependencies", "like", "sqlalchemy", "pymongo", "pyspark", "or", "boto", "are", "available", "in", "the", "active", "environment", "." ]
python
train
AoiKuiyuyou/AoikLiveReload
tools/waf/aoikwafutil.py
https://github.com/AoiKuiyuyou/AoikLiveReload/blob/0d5adb12118a33749e6690a8165fdb769cff7d5c/tools/waf/aoikwafutil.py#L2162-L2204
def git_clean(ctx): """ Delete all files untracked by git. :param ctx: Context object. :return: None. """ # Get command parts cmd_part_s = [ # Program path 'git', # Clean untracked files 'clean', # Remove all untracked files '-x', # Remove untracked directories too '-d', # Force to remove '-f', # Give two `-f` flags to remove sub-repositories too '-f', ] # Print title print_title('git_clean') # Print the command in multi-line format print_text(_format_multi_line_command(cmd_part_s)) # Create subprocess to run the command in top directory proc = subprocess.Popen(cmd_part_s, cwd=ctx.top_dir) # Wait the subprocess to finish proc.wait() # Print end title print_title('git_clean', is_end=True)
[ "def", "git_clean", "(", "ctx", ")", ":", "# Get command parts", "cmd_part_s", "=", "[", "# Program path", "'git'", ",", "# Clean untracked files", "'clean'", ",", "# Remove all untracked files", "'-x'", ",", "# Remove untracked directories too", "'-d'", ",", "# Force to remove", "'-f'", ",", "# Give two `-f` flags to remove sub-repositories too", "'-f'", ",", "]", "# Print title", "print_title", "(", "'git_clean'", ")", "# Print the command in multi-line format", "print_text", "(", "_format_multi_line_command", "(", "cmd_part_s", ")", ")", "# Create subprocess to run the command in top directory", "proc", "=", "subprocess", ".", "Popen", "(", "cmd_part_s", ",", "cwd", "=", "ctx", ".", "top_dir", ")", "# Wait the subprocess to finish", "proc", ".", "wait", "(", ")", "# Print end title", "print_title", "(", "'git_clean'", ",", "is_end", "=", "True", ")" ]
Delete all files untracked by git. :param ctx: Context object. :return: None.
[ "Delete", "all", "files", "untracked", "by", "git", "." ]
python
train
jciskey/pygraph
pygraph/functions/biconnected_components.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/functions/biconnected_components.py#L28-L37
def find_biconnected_components_as_subgraphs(graph): """Finds the biconnected components and returns them as subgraphs.""" list_of_graphs = [] list_of_components = find_biconnected_components(graph) for edge_list in list_of_components: subgraph = get_subgraph_from_edge_list(graph, edge_list) list_of_graphs.append(subgraph) return list_of_graphs
[ "def", "find_biconnected_components_as_subgraphs", "(", "graph", ")", ":", "list_of_graphs", "=", "[", "]", "list_of_components", "=", "find_biconnected_components", "(", "graph", ")", "for", "edge_list", "in", "list_of_components", ":", "subgraph", "=", "get_subgraph_from_edge_list", "(", "graph", ",", "edge_list", ")", "list_of_graphs", ".", "append", "(", "subgraph", ")", "return", "list_of_graphs" ]
Finds the biconnected components and returns them as subgraphs.
[ "Finds", "the", "biconnected", "components", "and", "returns", "them", "as", "subgraphs", "." ]
python
train
kgori/treeCl
treeCl/tree.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/tree.py#L998-L1010
def prune_to_subset(self, subset, inplace=False): """ Prunes the Tree to just the taxon set given in `subset` """ if not subset.issubset(self.labels): print('"subset" is not a subset') return if not inplace: t = self.copy() else: t = self t._tree.retain_taxa_with_labels(subset) t._tree.encode_bipartitions() t._dirty = True return t
[ "def", "prune_to_subset", "(", "self", ",", "subset", ",", "inplace", "=", "False", ")", ":", "if", "not", "subset", ".", "issubset", "(", "self", ".", "labels", ")", ":", "print", "(", "'\"subset\" is not a subset'", ")", "return", "if", "not", "inplace", ":", "t", "=", "self", ".", "copy", "(", ")", "else", ":", "t", "=", "self", "t", ".", "_tree", ".", "retain_taxa_with_labels", "(", "subset", ")", "t", ".", "_tree", ".", "encode_bipartitions", "(", ")", "t", ".", "_dirty", "=", "True", "return", "t" ]
Prunes the Tree to just the taxon set given in `subset`
[ "Prunes", "the", "Tree", "to", "just", "the", "taxon", "set", "given", "in", "subset" ]
python
train
hannes-brt/hebel
hebel/__init__.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/__init__.py#L96-L143
def init(device_id=None, random_seed=None): """Initialize Hebel. This function creates a CUDA context, CUBLAS context and initializes and seeds the pseudo-random number generator. **Parameters:** device_id : integer, optional The ID of the GPU device to use. If this is omitted, PyCUDA's default context is used, which by default uses the fastest available device on the system. Alternatively, you can put the device id in the environment variable ``CUDA_DEVICE`` or into the file ``.cuda-device`` in the user's home directory. random_seed : integer, optional The seed to use for the pseudo-random number generator. If this is omitted, the seed is taken from the environment variable ``RANDOM_SEED`` and if that is not defined, a random integer is used as a seed. """ if device_id is None: random_seed = _os.environ.get('CUDA_DEVICE') if random_seed is None: random_seed = _os.environ.get('RANDOM_SEED') global is_initialized if not is_initialized: is_initialized = True global context context.init_context(device_id) from pycuda import gpuarray, driver, curandom # Initialize memory pool global memory_pool memory_pool.init() # Initialize PRG global sampler sampler.set_seed(random_seed) # Initialize pycuda_ops from hebel import pycuda_ops pycuda_ops.init()
[ "def", "init", "(", "device_id", "=", "None", ",", "random_seed", "=", "None", ")", ":", "if", "device_id", "is", "None", ":", "random_seed", "=", "_os", ".", "environ", ".", "get", "(", "'CUDA_DEVICE'", ")", "if", "random_seed", "is", "None", ":", "random_seed", "=", "_os", ".", "environ", ".", "get", "(", "'RANDOM_SEED'", ")", "global", "is_initialized", "if", "not", "is_initialized", ":", "is_initialized", "=", "True", "global", "context", "context", ".", "init_context", "(", "device_id", ")", "from", "pycuda", "import", "gpuarray", ",", "driver", ",", "curandom", "# Initialize memory pool", "global", "memory_pool", "memory_pool", ".", "init", "(", ")", "# Initialize PRG", "global", "sampler", "sampler", ".", "set_seed", "(", "random_seed", ")", "# Initialize pycuda_ops", "from", "hebel", "import", "pycuda_ops", "pycuda_ops", ".", "init", "(", ")" ]
Initialize Hebel. This function creates a CUDA context, CUBLAS context and initializes and seeds the pseudo-random number generator. **Parameters:** device_id : integer, optional The ID of the GPU device to use. If this is omitted, PyCUDA's default context is used, which by default uses the fastest available device on the system. Alternatively, you can put the device id in the environment variable ``CUDA_DEVICE`` or into the file ``.cuda-device`` in the user's home directory. random_seed : integer, optional The seed to use for the pseudo-random number generator. If this is omitted, the seed is taken from the environment variable ``RANDOM_SEED`` and if that is not defined, a random integer is used as a seed.
[ "Initialize", "Hebel", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/pipeline/fastq.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/fastq.py#L122-L140
def _merge_list_fastqs(files, out_file, config): """merge list of fastq files into one""" if not all(map(fastq.is_fastq, files)): raise ValueError("Not all of the files to merge are fastq files: %s " % (files)) assert all(map(utils.file_exists, files)), ("Not all of the files to merge " "exist: %s" % (files)) if not file_exists(out_file): files = [_gzip_fastq(fn) for fn in files] if len(files) == 1: if "remove_source" in config and config["remove_source"]: shutil.move(files[0], out_file) else: os.symlink(files[0], out_file) return out_file with file_transaction(out_file) as file_txt_out: files_str = " ".join(list(files)) cmd = "cat {files_str} > {file_txt_out}".format(**locals()) do.run(cmd, "merge fastq files %s" % files) return out_file
[ "def", "_merge_list_fastqs", "(", "files", ",", "out_file", ",", "config", ")", ":", "if", "not", "all", "(", "map", "(", "fastq", ".", "is_fastq", ",", "files", ")", ")", ":", "raise", "ValueError", "(", "\"Not all of the files to merge are fastq files: %s \"", "%", "(", "files", ")", ")", "assert", "all", "(", "map", "(", "utils", ".", "file_exists", ",", "files", ")", ")", ",", "(", "\"Not all of the files to merge \"", "\"exist: %s\"", "%", "(", "files", ")", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "files", "=", "[", "_gzip_fastq", "(", "fn", ")", "for", "fn", "in", "files", "]", "if", "len", "(", "files", ")", "==", "1", ":", "if", "\"remove_source\"", "in", "config", "and", "config", "[", "\"remove_source\"", "]", ":", "shutil", ".", "move", "(", "files", "[", "0", "]", ",", "out_file", ")", "else", ":", "os", ".", "symlink", "(", "files", "[", "0", "]", ",", "out_file", ")", "return", "out_file", "with", "file_transaction", "(", "out_file", ")", "as", "file_txt_out", ":", "files_str", "=", "\" \"", ".", "join", "(", "list", "(", "files", ")", ")", "cmd", "=", "\"cat {files_str} > {file_txt_out}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "do", ".", "run", "(", "cmd", ",", "\"merge fastq files %s\"", "%", "files", ")", "return", "out_file" ]
merge list of fastq files into one
[ "merge", "list", "of", "fastq", "files", "into", "one" ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ClientFactory.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ClientFactory.py#L586-L592
def create_tipo_rede(self): """Get an instance of tipo_rede services facade.""" return TipoRede( self.networkapi_url, self.user, self.password, self.user_ldap)
[ "def", "create_tipo_rede", "(", "self", ")", ":", "return", "TipoRede", "(", "self", ".", "networkapi_url", ",", "self", ".", "user", ",", "self", ".", "password", ",", "self", ".", "user_ldap", ")" ]
Get an instance of tipo_rede services facade.
[ "Get", "an", "instance", "of", "tipo_rede", "services", "facade", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/image_streamer/resources/golden_images.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/image_streamer/resources/golden_images.py#L90-L106
def upload(self, file_path, golden_image_info): """ Adds a Golden Image resource from the file that is uploaded from a local drive. Only the .zip format file can be used for the upload. Args: file_path (str): File name to upload. golden_image_info (dict): Golden Image information. Returns: dict: Golden Image. """ uri = "{0}?name={1}&description={2}".format(self.URI, quote(golden_image_info.get('name', '')), quote(golden_image_info.get('description', ''))) return self._client.upload(file_path, uri)
[ "def", "upload", "(", "self", ",", "file_path", ",", "golden_image_info", ")", ":", "uri", "=", "\"{0}?name={1}&description={2}\"", ".", "format", "(", "self", ".", "URI", ",", "quote", "(", "golden_image_info", ".", "get", "(", "'name'", ",", "''", ")", ")", ",", "quote", "(", "golden_image_info", ".", "get", "(", "'description'", ",", "''", ")", ")", ")", "return", "self", ".", "_client", ".", "upload", "(", "file_path", ",", "uri", ")" ]
Adds a Golden Image resource from the file that is uploaded from a local drive. Only the .zip format file can be used for the upload. Args: file_path (str): File name to upload. golden_image_info (dict): Golden Image information. Returns: dict: Golden Image.
[ "Adds", "a", "Golden", "Image", "resource", "from", "the", "file", "that", "is", "uploaded", "from", "a", "local", "drive", ".", "Only", "the", ".", "zip", "format", "file", "can", "be", "used", "for", "the", "upload", "." ]
python
train
Robin8Put/pmes
wallet_manager/withdraw_client/withdraw_cli.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/wallet_manager/withdraw_client/withdraw_cli.py#L92-L105
def is_valid_address(self, *args, **kwargs): """ check address Accepts: - address [hex string] (withdrawal address in hex form) - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) Returns dictionary with following fields: - bool [Bool] """ client = HTTPClient(self.withdraw_server_address + self.withdraw_endpoint) return client.request('is_valid_address', kwargs)
[ "def", "is_valid_address", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "client", "=", "HTTPClient", "(", "self", ".", "withdraw_server_address", "+", "self", ".", "withdraw_endpoint", ")", "return", "client", ".", "request", "(", "'is_valid_address'", ",", "kwargs", ")" ]
check address Accepts: - address [hex string] (withdrawal address in hex form) - coinid [string] (blockchain id (example: BTCTEST, LTCTEST)) Returns dictionary with following fields: - bool [Bool]
[ "check", "address" ]
python
train
google/identity-toolkit-python-client
identitytoolkit/gitkitclient.py
https://github.com/google/identity-toolkit-python-client/blob/4cfe3013569c21576daa5d22ad21f9f4f8b30c4d/identitytoolkit/gitkitclient.py#L314-L330
def GetAllUsers(self, pagination_size=10): """Gets all user info from Gitkit server. Args: pagination_size: int, how many users should be returned per request. The account info are retrieved in pagination. Yields: A generator to iterate all users. """ next_page_token, accounts = self.rpc_helper.DownloadAccount( None, pagination_size) while accounts: for account in accounts: yield GitkitUser.FromApiResponse(account) next_page_token, accounts = self.rpc_helper.DownloadAccount( next_page_token, pagination_size)
[ "def", "GetAllUsers", "(", "self", ",", "pagination_size", "=", "10", ")", ":", "next_page_token", ",", "accounts", "=", "self", ".", "rpc_helper", ".", "DownloadAccount", "(", "None", ",", "pagination_size", ")", "while", "accounts", ":", "for", "account", "in", "accounts", ":", "yield", "GitkitUser", ".", "FromApiResponse", "(", "account", ")", "next_page_token", ",", "accounts", "=", "self", ".", "rpc_helper", ".", "DownloadAccount", "(", "next_page_token", ",", "pagination_size", ")" ]
Gets all user info from Gitkit server. Args: pagination_size: int, how many users should be returned per request. The account info are retrieved in pagination. Yields: A generator to iterate all users.
[ "Gets", "all", "user", "info", "from", "Gitkit", "server", "." ]
python
train
mapbox/mapboxgl-jupyter
mapboxgl/utils.py
https://github.com/mapbox/mapboxgl-jupyter/blob/f6e403c13eaa910e70659c7d179e8e32ce95ae34/mapboxgl/utils.py#L400-L460
def height_map(lookup, height_stops, default_height=0.0): """Return a height value (in meters) interpolated from given height_stops; for use with vector-based visualizations using fill-extrusion layers """ # if no height_stops, use default height if len(height_stops) == 0: return default_height # dictionary to lookup height from match-type height_stops match_map = dict((x, y) for (x, y) in height_stops) # if lookup matches stop exactly, return corresponding height (first priority) # (includes non-numeric height_stop "keys" for finding height by match) if lookup in match_map.keys(): return match_map.get(lookup) # if lookup value numeric, map height by interpolating from height scale if isinstance(lookup, (int, float, complex)): # try ordering stops try: stops, heights = zip(*sorted(height_stops)) # if not all stops are numeric, attempt looking up as if categorical stops except TypeError: return match_map.get(lookup, default_height) # for interpolation, all stops must be numeric if not all(isinstance(x, (int, float, complex)) for x in stops): return default_height # check if lookup value in stops bounds if float(lookup) <= stops[0]: return heights[0] elif float(lookup) >= stops[-1]: return heights[-1] # check if lookup value matches any stop value elif float(lookup) in stops: return heights[stops.index(lookup)] # interpolation required else: # identify bounding height stop values lower = max([stops[0]] + [x for x in stops if x < lookup]) upper = min([stops[-1]] + [x for x in stops if x > lookup]) # heights from bounding stops lower_height = heights[stops.index(lower)] upper_height = heights[stops.index(upper)] # compute linear "relative distance" from lower bound height to upper bound height distance = (lookup - lower) / (upper - lower) # return string representing rgb height value return lower_height + distance * (upper_height - lower_height) # default height value catch-all return default_height
[ "def", "height_map", "(", "lookup", ",", "height_stops", ",", "default_height", "=", "0.0", ")", ":", "# if no height_stops, use default height", "if", "len", "(", "height_stops", ")", "==", "0", ":", "return", "default_height", "# dictionary to lookup height from match-type height_stops", "match_map", "=", "dict", "(", "(", "x", ",", "y", ")", "for", "(", "x", ",", "y", ")", "in", "height_stops", ")", "# if lookup matches stop exactly, return corresponding height (first priority)", "# (includes non-numeric height_stop \"keys\" for finding height by match)", "if", "lookup", "in", "match_map", ".", "keys", "(", ")", ":", "return", "match_map", ".", "get", "(", "lookup", ")", "# if lookup value numeric, map height by interpolating from height scale", "if", "isinstance", "(", "lookup", ",", "(", "int", ",", "float", ",", "complex", ")", ")", ":", "# try ordering stops ", "try", ":", "stops", ",", "heights", "=", "zip", "(", "*", "sorted", "(", "height_stops", ")", ")", "# if not all stops are numeric, attempt looking up as if categorical stops", "except", "TypeError", ":", "return", "match_map", ".", "get", "(", "lookup", ",", "default_height", ")", "# for interpolation, all stops must be numeric", "if", "not", "all", "(", "isinstance", "(", "x", ",", "(", "int", ",", "float", ",", "complex", ")", ")", "for", "x", "in", "stops", ")", ":", "return", "default_height", "# check if lookup value in stops bounds", "if", "float", "(", "lookup", ")", "<=", "stops", "[", "0", "]", ":", "return", "heights", "[", "0", "]", "elif", "float", "(", "lookup", ")", ">=", "stops", "[", "-", "1", "]", ":", "return", "heights", "[", "-", "1", "]", "# check if lookup value matches any stop value", "elif", "float", "(", "lookup", ")", "in", "stops", ":", "return", "heights", "[", "stops", ".", "index", "(", "lookup", ")", "]", "# interpolation required", "else", ":", "# identify bounding height stop values", "lower", "=", "max", "(", "[", "stops", "[", "0", "]", "]", "+", "[", "x", "for", "x", "in", "stops", "if", "x", "<", "lookup", "]", ")", "upper", "=", "min", "(", "[", "stops", "[", "-", "1", "]", "]", "+", "[", "x", "for", "x", "in", "stops", "if", "x", ">", "lookup", "]", ")", "# heights from bounding stops", "lower_height", "=", "heights", "[", "stops", ".", "index", "(", "lower", ")", "]", "upper_height", "=", "heights", "[", "stops", ".", "index", "(", "upper", ")", "]", "# compute linear \"relative distance\" from lower bound height to upper bound height", "distance", "=", "(", "lookup", "-", "lower", ")", "/", "(", "upper", "-", "lower", ")", "# return string representing rgb height value", "return", "lower_height", "+", "distance", "*", "(", "upper_height", "-", "lower_height", ")", "# default height value catch-all", "return", "default_height" ]
Return a height value (in meters) interpolated from given height_stops; for use with vector-based visualizations using fill-extrusion layers
[ "Return", "a", "height", "value", "(", "in", "meters", ")", "interpolated", "from", "given", "height_stops", ";", "for", "use", "with", "vector", "-", "based", "visualizations", "using", "fill", "-", "extrusion", "layers" ]
python
train
deepmind/sonnet
sonnet/python/modules/util.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/util.py#L181-L219
def check_partitioners(partitioners, keys): """Checks the given partitioners. This checks that `partitioners` is a dictionary that only contains keys in `keys`, and furthermore the entries in `partitioners` are functions or further dictionaries (the latter used, for example, in passing partitioners to modules inside modules) that must satisfy the same constraints. Args: partitioners: Dictionary of partitioners (allowing nested dictionaries) or None. keys: Iterable of valid keys for `partitioners`. Returns: Checked dictionary of partitioners. If `partitioners=None`, an empty dictionary will be returned. Raises: KeyError: If an partitioner is provided for a key not in `keys`. TypeError: If a provided partitioner is not a callable function, or `partitioners` is not a Mapping. """ if partitioners is None: return {} _assert_is_dictlike(partitioners, valid_keys=keys) keys = set(keys) if not set(partitioners) <= keys: extra_keys = set(partitioners) - keys raise KeyError( "Invalid partitioner keys {}, partitioners can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(partitioners, "Partitioner") return partitioners
[ "def", "check_partitioners", "(", "partitioners", ",", "keys", ")", ":", "if", "partitioners", "is", "None", ":", "return", "{", "}", "_assert_is_dictlike", "(", "partitioners", ",", "valid_keys", "=", "keys", ")", "keys", "=", "set", "(", "keys", ")", "if", "not", "set", "(", "partitioners", ")", "<=", "keys", ":", "extra_keys", "=", "set", "(", "partitioners", ")", "-", "keys", "raise", "KeyError", "(", "\"Invalid partitioner keys {}, partitioners can only \"", "\"be provided for {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "\"'{}'\"", ".", "format", "(", "key", ")", "for", "key", "in", "extra_keys", ")", ",", "\", \"", ".", "join", "(", "\"'{}'\"", ".", "format", "(", "key", ")", "for", "key", "in", "keys", ")", ")", ")", "_check_nested_callables", "(", "partitioners", ",", "\"Partitioner\"", ")", "return", "partitioners" ]
Checks the given partitioners. This checks that `partitioners` is a dictionary that only contains keys in `keys`, and furthermore the entries in `partitioners` are functions or further dictionaries (the latter used, for example, in passing partitioners to modules inside modules) that must satisfy the same constraints. Args: partitioners: Dictionary of partitioners (allowing nested dictionaries) or None. keys: Iterable of valid keys for `partitioners`. Returns: Checked dictionary of partitioners. If `partitioners=None`, an empty dictionary will be returned. Raises: KeyError: If an partitioner is provided for a key not in `keys`. TypeError: If a provided partitioner is not a callable function, or `partitioners` is not a Mapping.
[ "Checks", "the", "given", "partitioners", "." ]
python
train
blockstack/virtualchain
virtualchain/lib/blockchain/bitcoin_blockchain/keys.py
https://github.com/blockstack/virtualchain/blob/fcfc970064ca7dfcab26ebd3ab955870a763ea39/virtualchain/lib/blockchain/bitcoin_blockchain/keys.py#L473-L484
def btc_is_p2wsh_address( address ): """ Is the given address a p2wsh address? """ wver, whash = segwit_addr_decode(address) if whash is None: return False if len(whash) != 32: return False return True
[ "def", "btc_is_p2wsh_address", "(", "address", ")", ":", "wver", ",", "whash", "=", "segwit_addr_decode", "(", "address", ")", "if", "whash", "is", "None", ":", "return", "False", "if", "len", "(", "whash", ")", "!=", "32", ":", "return", "False", "return", "True" ]
Is the given address a p2wsh address?
[ "Is", "the", "given", "address", "a", "p2wsh", "address?" ]
python
train
ramses-tech/nefertari
nefertari/engine.py
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/engine.py#L59-L66
def _import_public_names(module): "Import public names from module into this module, like import *" self = sys.modules[__name__] for name in module.__all__: if hasattr(self, name): # don't overwrite existing names continue setattr(self, name, getattr(module, name))
[ "def", "_import_public_names", "(", "module", ")", ":", "self", "=", "sys", ".", "modules", "[", "__name__", "]", "for", "name", "in", "module", ".", "__all__", ":", "if", "hasattr", "(", "self", ",", "name", ")", ":", "# don't overwrite existing names", "continue", "setattr", "(", "self", ",", "name", ",", "getattr", "(", "module", ",", "name", ")", ")" ]
Import public names from module into this module, like import *
[ "Import", "public", "names", "from", "module", "into", "this", "module", "like", "import", "*" ]
python
train
pytorch/vision
references/classification/utils.py
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/references/classification/utils.py#L30-L41
def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1]
[ "def", "synchronize_between_processes", "(", "self", ")", ":", "if", "not", "is_dist_avail_and_initialized", "(", ")", ":", "return", "t", "=", "torch", ".", "tensor", "(", "[", "self", ".", "count", ",", "self", ".", "total", "]", ",", "dtype", "=", "torch", ".", "float64", ",", "device", "=", "'cuda'", ")", "dist", ".", "barrier", "(", ")", "dist", ".", "all_reduce", "(", "t", ")", "t", "=", "t", ".", "tolist", "(", ")", "self", ".", "count", "=", "int", "(", "t", "[", "0", "]", ")", "self", ".", "total", "=", "t", "[", "1", "]" ]
Warning: does not synchronize the deque!
[ "Warning", ":", "does", "not", "synchronize", "the", "deque!" ]
python
test
tomislater/RandomWords
random_words/lorem_ipsum.py
https://github.com/tomislater/RandomWords/blob/601aa48732d3c389f4c17ba0ed98ffe0e4821d78/random_words/lorem_ipsum.py#L27-L49
def get_sentences_list(self, sentences=1): """ Return sentences in list. :param int sentences: how many sentences :returns: list of strings with sentence :rtype: list """ if sentences < 1: raise ValueError('Param "sentences" must be greater than 0.') sentences_list = [] while sentences: num_rand_words = random.randint(self.MIN_WORDS, self.MAX_WORDS) random_sentence = self.make_sentence( random.sample(self.words, num_rand_words)) sentences_list.append(random_sentence) sentences -= 1 return sentences_list
[ "def", "get_sentences_list", "(", "self", ",", "sentences", "=", "1", ")", ":", "if", "sentences", "<", "1", ":", "raise", "ValueError", "(", "'Param \"sentences\" must be greater than 0.'", ")", "sentences_list", "=", "[", "]", "while", "sentences", ":", "num_rand_words", "=", "random", ".", "randint", "(", "self", ".", "MIN_WORDS", ",", "self", ".", "MAX_WORDS", ")", "random_sentence", "=", "self", ".", "make_sentence", "(", "random", ".", "sample", "(", "self", ".", "words", ",", "num_rand_words", ")", ")", "sentences_list", ".", "append", "(", "random_sentence", ")", "sentences", "-=", "1", "return", "sentences_list" ]
Return sentences in list. :param int sentences: how many sentences :returns: list of strings with sentence :rtype: list
[ "Return", "sentences", "in", "list", "." ]
python
train
ibis-project/ibis
ibis/bigquery/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/bigquery/client.py#L101-L104
def columns(self): """Return the columns of the result set.""" result = self.query.result() return [field.name for field in result.schema]
[ "def", "columns", "(", "self", ")", ":", "result", "=", "self", ".", "query", ".", "result", "(", ")", "return", "[", "field", ".", "name", "for", "field", "in", "result", ".", "schema", "]" ]
Return the columns of the result set.
[ "Return", "the", "columns", "of", "the", "result", "set", "." ]
python
train
benedictpaten/sonLib
tree.py
https://github.com/benedictpaten/sonLib/blob/1decb75bb439b70721ec776f685ce98e25217d26/tree.py#L180-L185
def normaliseWV(wV, normFac=1.0): """ make char probs divisible by one """ f = sum(wV) / normFac return [ i/f for i in wV ]
[ "def", "normaliseWV", "(", "wV", ",", "normFac", "=", "1.0", ")", ":", "f", "=", "sum", "(", "wV", ")", "/", "normFac", "return", "[", "i", "/", "f", "for", "i", "in", "wV", "]" ]
make char probs divisible by one
[ "make", "char", "probs", "divisible", "by", "one" ]
python
train
sdss/sdss_access
python/sdss_access/sync/http.py
https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/sync/http.py#L34-L56
def remote(self, remote_base=None, username=None, password=None): """ Configures remote access Parameters ---------- remote_base : str base URL path for remote repository username : str user name for remote repository password : str password for local repository """ if remote_base is not None: self.remote_base = remote_base self._remote = True self.set_auth(username=username, password=password) if self.auth.ready(): passman = HTTPPasswordMgrWithDefaultRealm() passman.add_password(None, self.remote_base, self.auth.username, self.auth.password) authhandler = HTTPBasicAuthHandler(passman) opener = build_opener(authhandler) install_opener(opener)
[ "def", "remote", "(", "self", ",", "remote_base", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "remote_base", "is", "not", "None", ":", "self", ".", "remote_base", "=", "remote_base", "self", ".", "_remote", "=", "True", "self", ".", "set_auth", "(", "username", "=", "username", ",", "password", "=", "password", ")", "if", "self", ".", "auth", ".", "ready", "(", ")", ":", "passman", "=", "HTTPPasswordMgrWithDefaultRealm", "(", ")", "passman", ".", "add_password", "(", "None", ",", "self", ".", "remote_base", ",", "self", ".", "auth", ".", "username", ",", "self", ".", "auth", ".", "password", ")", "authhandler", "=", "HTTPBasicAuthHandler", "(", "passman", ")", "opener", "=", "build_opener", "(", "authhandler", ")", "install_opener", "(", "opener", ")" ]
Configures remote access Parameters ---------- remote_base : str base URL path for remote repository username : str user name for remote repository password : str password for local repository
[ "Configures", "remote", "access" ]
python
train
OnroerendErfgoed/crabpy_pyramid
crabpy_pyramid/renderers/capakey.py
https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/renderers/capakey.py#L97-L114
def item_sectie_adapter(obj, request): """ Adapter for rendering an object of :class: `crabpy.gateway.capakey.Sectie` to json. """ return { 'id': obj.id, 'afdeling': { 'id': obj.afdeling.id, 'naam': obj.afdeling.naam, 'gemeente': { 'id': obj.afdeling.gemeente.id, 'naam': obj.afdeling.gemeente.naam }, }, 'centroid': obj.centroid, 'bounding_box': obj.bounding_box }
[ "def", "item_sectie_adapter", "(", "obj", ",", "request", ")", ":", "return", "{", "'id'", ":", "obj", ".", "id", ",", "'afdeling'", ":", "{", "'id'", ":", "obj", ".", "afdeling", ".", "id", ",", "'naam'", ":", "obj", ".", "afdeling", ".", "naam", ",", "'gemeente'", ":", "{", "'id'", ":", "obj", ".", "afdeling", ".", "gemeente", ".", "id", ",", "'naam'", ":", "obj", ".", "afdeling", ".", "gemeente", ".", "naam", "}", ",", "}", ",", "'centroid'", ":", "obj", ".", "centroid", ",", "'bounding_box'", ":", "obj", ".", "bounding_box", "}" ]
Adapter for rendering an object of :class: `crabpy.gateway.capakey.Sectie` to json.
[ "Adapter", "for", "rendering", "an", "object", "of", ":", "class", ":", "crabpy", ".", "gateway", ".", "capakey", ".", "Sectie", "to", "json", "." ]
python
train
fedora-infra/fedora-messaging
fedora_messaging/_session.py
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/_session.py#L429-L443
def call_later(self, delay, callback): """Schedule a one-shot timeout given delay seconds. This method is only useful for compatibility with older versions of pika. Args: delay (float): Non-negative number of seconds from now until expiration callback (method): The callback method, having the signature `callback()` """ if hasattr(self._connection.ioloop, "call_later"): self._connection.ioloop.call_later(delay, callback) else: self._connection.ioloop.add_timeout(delay, callback)
[ "def", "call_later", "(", "self", ",", "delay", ",", "callback", ")", ":", "if", "hasattr", "(", "self", ".", "_connection", ".", "ioloop", ",", "\"call_later\"", ")", ":", "self", ".", "_connection", ".", "ioloop", ".", "call_later", "(", "delay", ",", "callback", ")", "else", ":", "self", ".", "_connection", ".", "ioloop", ".", "add_timeout", "(", "delay", ",", "callback", ")" ]
Schedule a one-shot timeout given delay seconds. This method is only useful for compatibility with older versions of pika. Args: delay (float): Non-negative number of seconds from now until expiration callback (method): The callback method, having the signature `callback()`
[ "Schedule", "a", "one", "-", "shot", "timeout", "given", "delay", "seconds", "." ]
python
train
onnx/onnxmltools
onnxmltools/convert/coreml/operator_converters/neural_network/Pool.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/coreml/operator_converters/neural_network/Pool.py#L49-L80
def create_legacy_pad(scope, input_name, output_name, H_in, W_in, k_h, k_w, s_h, s_w, p_h, p_w, padded_value, container): ''' This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of CoreML' pooling under IncludeLastPixel padding mode. :param scope: :param input_name: :param output_name: :param H_in: input dimension along H-axis :param W_in: input dimension along W-axis :param k_h: kernel's H-axis dimension :param k_w: kernel's W-axis dimension :param s_h: stride along H-axis :param s_w: stride along W-axis :param p_h: padding amount at the beginning and the end of H-axis :param p_w: padding amount at the beginning and the end of W-axis :param padded_value: value used to fill padded area :param container: Container object ''' # Add a Pad operator to pre-process 4-D tensor pad_t, pad_b = calculate_legacy_pad_amount(H_in, p_h, k_h, s_h) pad_l, pad_r = calculate_legacy_pad_amount(W_in, p_w, k_w, s_w) # CoreML pooling operator pads only their H- and W-axes. Here we assume the shape of the tensor to be padded # is [N, C, H, W], so we have 8 padding amounts # pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index, # N_end_index, C_end_index, H_end_index, W_end_index] # Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros. pads = [0, 0, pad_t, pad_l, 0, 0, pad_b, pad_r] apply_pad(scope, input_name, output_name, container, pads=pads, value=padded_value)
[ "def", "create_legacy_pad", "(", "scope", ",", "input_name", ",", "output_name", ",", "H_in", ",", "W_in", ",", "k_h", ",", "k_w", ",", "s_h", ",", "s_w", ",", "p_h", ",", "p_w", ",", "padded_value", ",", "container", ")", ":", "# Add a Pad operator to pre-process 4-D tensor", "pad_t", ",", "pad_b", "=", "calculate_legacy_pad_amount", "(", "H_in", ",", "p_h", ",", "k_h", ",", "s_h", ")", "pad_l", ",", "pad_r", "=", "calculate_legacy_pad_amount", "(", "W_in", ",", "p_w", ",", "k_w", ",", "s_w", ")", "# CoreML pooling operator pads only their H- and W-axes. Here we assume the shape of the tensor to be padded", "# is [N, C, H, W], so we have 8 padding amounts", "# pads = [N_begin_index, C_begin_index, H_begin_index, W_begin_index,", "# N_end_index, C_end_index, H_end_index, W_end_index]", "# Because only H- and W-axes are padded in CoreML, we leave padding amounts of N- and C-axes zeros.", "pads", "=", "[", "0", ",", "0", ",", "pad_t", ",", "pad_l", ",", "0", ",", "0", ",", "pad_b", ",", "pad_r", "]", "apply_pad", "(", "scope", ",", "input_name", ",", "output_name", ",", "container", ",", "pads", "=", "pads", ",", "value", "=", "padded_value", ")" ]
This function adds one Pad operator into its last argument, which is a Container object. By feeding the output of the created Pad operator into Pool operator under valid padding mode, we can achieve the same functionality of CoreML' pooling under IncludeLastPixel padding mode. :param scope: :param input_name: :param output_name: :param H_in: input dimension along H-axis :param W_in: input dimension along W-axis :param k_h: kernel's H-axis dimension :param k_w: kernel's W-axis dimension :param s_h: stride along H-axis :param s_w: stride along W-axis :param p_h: padding amount at the beginning and the end of H-axis :param p_w: padding amount at the beginning and the end of W-axis :param padded_value: value used to fill padded area :param container: Container object
[ "This", "function", "adds", "one", "Pad", "operator", "into", "its", "last", "argument", "which", "is", "a", "Container", "object", ".", "By", "feeding", "the", "output", "of", "the", "created", "Pad", "operator", "into", "Pool", "operator", "under", "valid", "padding", "mode", "we", "can", "achieve", "the", "same", "functionality", "of", "CoreML", "pooling", "under", "IncludeLastPixel", "padding", "mode", "." ]
python
train
twilio/twilio-python
twilio/rest/autopilot/v1/assistant/dialogue.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/autopilot/v1/assistant/dialogue.py#L89-L98
def get_instance(self, payload): """ Build an instance of DialogueInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance """ return DialogueInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "DialogueInstance", "(", "self", ".", "_version", ",", "payload", ",", "assistant_sid", "=", "self", ".", "_solution", "[", "'assistant_sid'", "]", ",", ")" ]
Build an instance of DialogueInstance :param dict payload: Payload response from the API :returns: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance :rtype: twilio.rest.autopilot.v1.assistant.dialogue.DialogueInstance
[ "Build", "an", "instance", "of", "DialogueInstance" ]
python
train
sarugaku/virtenv
virtenv.py
https://github.com/sarugaku/virtenv/blob/fc42a9d8dc9f1821d3893899df78e08a081f6ca3/virtenv.py#L150-L163
def create(python, env_dir, system, prompt, bare, virtualenv_py=None): """Main entry point to use this as a module. """ if not python or python == sys.executable: _create_with_this( env_dir=env_dir, system=system, prompt=prompt, bare=bare, virtualenv_py=virtualenv_py, ) else: _create_with_python( python=python, env_dir=env_dir, system=system, prompt=prompt, bare=bare, virtualenv_py=virtualenv_py, )
[ "def", "create", "(", "python", ",", "env_dir", ",", "system", ",", "prompt", ",", "bare", ",", "virtualenv_py", "=", "None", ")", ":", "if", "not", "python", "or", "python", "==", "sys", ".", "executable", ":", "_create_with_this", "(", "env_dir", "=", "env_dir", ",", "system", "=", "system", ",", "prompt", "=", "prompt", ",", "bare", "=", "bare", ",", "virtualenv_py", "=", "virtualenv_py", ",", ")", "else", ":", "_create_with_python", "(", "python", "=", "python", ",", "env_dir", "=", "env_dir", ",", "system", "=", "system", ",", "prompt", "=", "prompt", ",", "bare", "=", "bare", ",", "virtualenv_py", "=", "virtualenv_py", ",", ")" ]
Main entry point to use this as a module.
[ "Main", "entry", "point", "to", "use", "this", "as", "a", "module", "." ]
python
train
numenta/nupic
src/nupic/algorithms/spatial_pooler.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L978-L993
def _updateMinDutyCyclesLocal(self): """ Updates the minimum duty cycles. The minimum duty cycles are determined locally. Each column's minimum duty cycles are set to be a percent of the maximum duty cycles in the column's neighborhood. Unlike _updateMinDutyCyclesGlobal, here the values can be quite different for different columns. """ for column in xrange(self._numColumns): neighborhood = self._getColumnNeighborhood(column) maxActiveDuty = self._activeDutyCycles[neighborhood].max() maxOverlapDuty = self._overlapDutyCycles[neighborhood].max() self._minOverlapDutyCycles[column] = (maxOverlapDuty * self._minPctOverlapDutyCycles)
[ "def", "_updateMinDutyCyclesLocal", "(", "self", ")", ":", "for", "column", "in", "xrange", "(", "self", ".", "_numColumns", ")", ":", "neighborhood", "=", "self", ".", "_getColumnNeighborhood", "(", "column", ")", "maxActiveDuty", "=", "self", ".", "_activeDutyCycles", "[", "neighborhood", "]", ".", "max", "(", ")", "maxOverlapDuty", "=", "self", ".", "_overlapDutyCycles", "[", "neighborhood", "]", ".", "max", "(", ")", "self", ".", "_minOverlapDutyCycles", "[", "column", "]", "=", "(", "maxOverlapDuty", "*", "self", ".", "_minPctOverlapDutyCycles", ")" ]
Updates the minimum duty cycles. The minimum duty cycles are determined locally. Each column's minimum duty cycles are set to be a percent of the maximum duty cycles in the column's neighborhood. Unlike _updateMinDutyCyclesGlobal, here the values can be quite different for different columns.
[ "Updates", "the", "minimum", "duty", "cycles", ".", "The", "minimum", "duty", "cycles", "are", "determined", "locally", ".", "Each", "column", "s", "minimum", "duty", "cycles", "are", "set", "to", "be", "a", "percent", "of", "the", "maximum", "duty", "cycles", "in", "the", "column", "s", "neighborhood", ".", "Unlike", "_updateMinDutyCyclesGlobal", "here", "the", "values", "can", "be", "quite", "different", "for", "different", "columns", "." ]
python
valid
calmjs/calmjs
src/calmjs/registry.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/registry.py#L66-L96
def _init(self): """ Turn the records into actual usable keys. """ self._entry_points = {} for entry_point in self.raw_entry_points: if entry_point.dist.project_name != self.reserved.get( entry_point.name, entry_point.dist.project_name): logger.error( "registry '%s' for '%s' is reserved for package '%s'", entry_point.name, self.registry_name, self.reserved[entry_point.name], ) continue if self.get_record(entry_point.name): logger.warning( "registry '%s' for '%s' is already registered.", entry_point.name, self.registry_name, ) existing = self._entry_points[entry_point.name] logger.debug( "registered '%s' from '%s'", existing, existing.dist) logger.debug( "discarded '%s' from '%s'", entry_point, entry_point.dist) continue logger.debug( "recording '%s' from '%s'", entry_point, entry_point.dist) self._entry_points[entry_point.name] = entry_point
[ "def", "_init", "(", "self", ")", ":", "self", ".", "_entry_points", "=", "{", "}", "for", "entry_point", "in", "self", ".", "raw_entry_points", ":", "if", "entry_point", ".", "dist", ".", "project_name", "!=", "self", ".", "reserved", ".", "get", "(", "entry_point", ".", "name", ",", "entry_point", ".", "dist", ".", "project_name", ")", ":", "logger", ".", "error", "(", "\"registry '%s' for '%s' is reserved for package '%s'\"", ",", "entry_point", ".", "name", ",", "self", ".", "registry_name", ",", "self", ".", "reserved", "[", "entry_point", ".", "name", "]", ",", ")", "continue", "if", "self", ".", "get_record", "(", "entry_point", ".", "name", ")", ":", "logger", ".", "warning", "(", "\"registry '%s' for '%s' is already registered.\"", ",", "entry_point", ".", "name", ",", "self", ".", "registry_name", ",", ")", "existing", "=", "self", ".", "_entry_points", "[", "entry_point", ".", "name", "]", "logger", ".", "debug", "(", "\"registered '%s' from '%s'\"", ",", "existing", ",", "existing", ".", "dist", ")", "logger", ".", "debug", "(", "\"discarded '%s' from '%s'\"", ",", "entry_point", ",", "entry_point", ".", "dist", ")", "continue", "logger", ".", "debug", "(", "\"recording '%s' from '%s'\"", ",", "entry_point", ",", "entry_point", ".", "dist", ")", "self", ".", "_entry_points", "[", "entry_point", ".", "name", "]", "=", "entry_point" ]
Turn the records into actual usable keys.
[ "Turn", "the", "records", "into", "actual", "usable", "keys", "." ]
python
train
astropy/photutils
photutils/segmentation/properties.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L1061-L1078
def eccentricity(self): """ The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively. """ l1, l2 = self.covariance_eigvals if l1 == 0: return 0. # pragma: no cover return np.sqrt(1. - (l2 / l1))
[ "def", "eccentricity", "(", "self", ")", ":", "l1", ",", "l2", "=", "self", ".", "covariance_eigvals", "if", "l1", "==", "0", ":", "return", "0.", "# pragma: no cover", "return", "np", ".", "sqrt", "(", "1.", "-", "(", "l2", "/", "l1", ")", ")" ]
The eccentricity of the 2D Gaussian function that has the same second-order moments as the source. The eccentricity is the fraction of the distance along the semimajor axis at which the focus lies. .. math:: e = \\sqrt{1 - \\frac{b^2}{a^2}} where :math:`a` and :math:`b` are the lengths of the semimajor and semiminor axes, respectively.
[ "The", "eccentricity", "of", "the", "2D", "Gaussian", "function", "that", "has", "the", "same", "second", "-", "order", "moments", "as", "the", "source", "." ]
python
train
soldag/python-pwmled
pwmled/led/rgb.py
https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/led/rgb.py#L140-L150
def _assert_is_color(value): """ Assert that the given value is a valid brightness. :param value: The value to check. """ if not isinstance(value, tuple) or len(value) != 3: raise ValueError("Color must be a RGB tuple.") if not all(0 <= x <= 255 for x in value): raise ValueError("RGB values of color must be between 0 and 255.")
[ "def", "_assert_is_color", "(", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "tuple", ")", "or", "len", "(", "value", ")", "!=", "3", ":", "raise", "ValueError", "(", "\"Color must be a RGB tuple.\"", ")", "if", "not", "all", "(", "0", "<=", "x", "<=", "255", "for", "x", "in", "value", ")", ":", "raise", "ValueError", "(", "\"RGB values of color must be between 0 and 255.\"", ")" ]
Assert that the given value is a valid brightness. :param value: The value to check.
[ "Assert", "that", "the", "given", "value", "is", "a", "valid", "brightness", "." ]
python
train
marcelcaraciolo/foursquare
examples/django/example/djfoursquare/views.py
https://github.com/marcelcaraciolo/foursquare/blob/a8bda33cc2d61e25aa8df72011246269fd98aa13/examples/django/example/djfoursquare/views.py#L73-L85
def check_key(request): """ Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to. """ try: access_key = request.session.get('oauth_token', None) if not access_key: return False except KeyError: return False return True
[ "def", "check_key", "(", "request", ")", ":", "try", ":", "access_key", "=", "request", ".", "session", ".", "get", "(", "'oauth_token'", ",", "None", ")", "if", "not", "access_key", ":", "return", "False", "except", "KeyError", ":", "return", "False", "return", "True" ]
Check to see if we already have an access_key stored, if we do then we have already gone through OAuth. If not then we haven't and we probably need to.
[ "Check", "to", "see", "if", "we", "already", "have", "an", "access_key", "stored", "if", "we", "do", "then", "we", "have", "already", "gone", "through", "OAuth", ".", "If", "not", "then", "we", "haven", "t", "and", "we", "probably", "need", "to", "." ]
python
train
spyder-ide/conda-manager
conda_manager/api/conda_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L863-L882
def environment_exists(self, name=None, prefix=None, abspath=True, log=True): """Check if an environment exists by 'name' or by 'prefix'. If query is by 'name' only the default conda environments directory is searched. """ if log: logger.debug(str((name, prefix))) if name and prefix: raise TypeError("Exactly one of 'name' or 'prefix' is required.") if name: prefix = self.get_prefix_envname(name, log=log) if prefix is None: prefix = self.ROOT_PREFIX return os.path.isdir(os.path.join(prefix, 'conda-meta'))
[ "def", "environment_exists", "(", "self", ",", "name", "=", "None", ",", "prefix", "=", "None", ",", "abspath", "=", "True", ",", "log", "=", "True", ")", ":", "if", "log", ":", "logger", ".", "debug", "(", "str", "(", "(", "name", ",", "prefix", ")", ")", ")", "if", "name", "and", "prefix", ":", "raise", "TypeError", "(", "\"Exactly one of 'name' or 'prefix' is required.\"", ")", "if", "name", ":", "prefix", "=", "self", ".", "get_prefix_envname", "(", "name", ",", "log", "=", "log", ")", "if", "prefix", "is", "None", ":", "prefix", "=", "self", ".", "ROOT_PREFIX", "return", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "join", "(", "prefix", ",", "'conda-meta'", ")", ")" ]
Check if an environment exists by 'name' or by 'prefix'. If query is by 'name' only the default conda environments directory is searched.
[ "Check", "if", "an", "environment", "exists", "by", "name", "or", "by", "prefix", "." ]
python
train