id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
250,400
abe-winter/pg13-py
pg13/pg.py
Row.updatewhere
def updatewhere(clas,pool_or_cursor,where_keys,**update_keys): "this doesn't allow raw_keys for now" # if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField? if not where_keys or not update_keys: raise ValueError setclause=','.join(k+'=%s' for k in update_keys) whereclause=' and '.join(eqexpr(k,v) for k,v in where_keys.items()) q='update %s set %s where %s'%(clas.TABLE,setclause,whereclause) vals = tuple(update_keys.values()+where_keys.values()) commit_or_execute(pool_or_cursor,q,vals)
python
def updatewhere(clas,pool_or_cursor,where_keys,**update_keys): "this doesn't allow raw_keys for now" # if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField? if not where_keys or not update_keys: raise ValueError setclause=','.join(k+'=%s' for k in update_keys) whereclause=' and '.join(eqexpr(k,v) for k,v in where_keys.items()) q='update %s set %s where %s'%(clas.TABLE,setclause,whereclause) vals = tuple(update_keys.values()+where_keys.values()) commit_or_execute(pool_or_cursor,q,vals)
[ "def", "updatewhere", "(", "clas", ",", "pool_or_cursor", ",", "where_keys", ",", "*", "*", "update_keys", ")", ":", "# if clas.JSONFIELDS: raise NotImplementedError # todo(awinter): do I need to make the same change for SpecialField?\r", "if", "not", "where_keys", "or", "not", "update_keys", ":", "raise", "ValueError", "setclause", "=", "','", ".", "join", "(", "k", "+", "'=%s'", "for", "k", "in", "update_keys", ")", "whereclause", "=", "' and '", ".", "join", "(", "eqexpr", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "where_keys", ".", "items", "(", ")", ")", "q", "=", "'update %s set %s where %s'", "%", "(", "clas", ".", "TABLE", ",", "setclause", ",", "whereclause", ")", "vals", "=", "tuple", "(", "update_keys", ".", "values", "(", ")", "+", "where_keys", ".", "values", "(", ")", ")", "commit_or_execute", "(", "pool_or_cursor", ",", "q", ",", "vals", ")" ]
this doesn't allow raw_keys for now
[ "this", "doesn", "t", "allow", "raw_keys", "for", "now" ]
c78806f99f35541a8756987e86edca3438aa97f5
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/pg.py#L277-L285
250,401
ulf1/oxyba
oxyba/clean_to_decimal.py
clean_to_decimal
def clean_to_decimal(x, prec=28): """Convert an string, int or float to Decimal object Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A string, int or float number, or a list, array or dataframe of these. digits : int (Default prec=None) Set the getcontext precision Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame Decimal object or array of Decimal objects Example ------- clean_to_decimal('12.345') Decimal('12.345') clean_to_decimal('12.345', prec=2) Decimal('12') clean_to_decimal(12.345) Decimal('12.34500000000000063948846218') clean_to_decimal(12.345, prec=5) Decimal('12.345') """ import numpy as np import pandas as pd import decimal def proc_elem(e): try: return decimal.Decimal(e) + decimal.Decimal('0.0') except Exception as e: print(e) return None def proc_list(x): return [proc_elem(e) for e in x] def proc_ndarray(x): tmp = proc_list(list(x.reshape((x.size,)))) return np.array(tmp).reshape(x.shape) # set precision if prec: decimal.getcontext().prec = prec # transform string, list/tuple, numpy array, pandas dataframe if isinstance(x, (str, int, float)): return proc_elem(x) elif isinstance(x, (list, tuple)): return proc_list(x) elif isinstance(x, np.ndarray): return proc_ndarray(x) elif isinstance(x, pd.DataFrame): return pd.DataFrame(proc_ndarray(x.values), columns=x.columns, index=x.index) else: return None
python
def clean_to_decimal(x, prec=28): """Convert an string, int or float to Decimal object Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A string, int or float number, or a list, array or dataframe of these. digits : int (Default prec=None) Set the getcontext precision Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame Decimal object or array of Decimal objects Example ------- clean_to_decimal('12.345') Decimal('12.345') clean_to_decimal('12.345', prec=2) Decimal('12') clean_to_decimal(12.345) Decimal('12.34500000000000063948846218') clean_to_decimal(12.345, prec=5) Decimal('12.345') """ import numpy as np import pandas as pd import decimal def proc_elem(e): try: return decimal.Decimal(e) + decimal.Decimal('0.0') except Exception as e: print(e) return None def proc_list(x): return [proc_elem(e) for e in x] def proc_ndarray(x): tmp = proc_list(list(x.reshape((x.size,)))) return np.array(tmp).reshape(x.shape) # set precision if prec: decimal.getcontext().prec = prec # transform string, list/tuple, numpy array, pandas dataframe if isinstance(x, (str, int, float)): return proc_elem(x) elif isinstance(x, (list, tuple)): return proc_list(x) elif isinstance(x, np.ndarray): return proc_ndarray(x) elif isinstance(x, pd.DataFrame): return pd.DataFrame(proc_ndarray(x.values), columns=x.columns, index=x.index) else: return None
[ "def", "clean_to_decimal", "(", "x", ",", "prec", "=", "28", ")", ":", "import", "numpy", "as", "np", "import", "pandas", "as", "pd", "import", "decimal", "def", "proc_elem", "(", "e", ")", ":", "try", ":", "return", "decimal", ".", "Decimal", "(", "e", ")", "+", "decimal", ".", "Decimal", "(", "'0.0'", ")", "except", "Exception", "as", "e", ":", "print", "(", "e", ")", "return", "None", "def", "proc_list", "(", "x", ")", ":", "return", "[", "proc_elem", "(", "e", ")", "for", "e", "in", "x", "]", "def", "proc_ndarray", "(", "x", ")", ":", "tmp", "=", "proc_list", "(", "list", "(", "x", ".", "reshape", "(", "(", "x", ".", "size", ",", ")", ")", ")", ")", "return", "np", ".", "array", "(", "tmp", ")", ".", "reshape", "(", "x", ".", "shape", ")", "# set precision", "if", "prec", ":", "decimal", ".", "getcontext", "(", ")", ".", "prec", "=", "prec", "# transform string, list/tuple, numpy array, pandas dataframe", "if", "isinstance", "(", "x", ",", "(", "str", ",", "int", ",", "float", ")", ")", ":", "return", "proc_elem", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "proc_list", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "return", "proc_ndarray", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "pd", ".", "DataFrame", ")", ":", "return", "pd", ".", "DataFrame", "(", "proc_ndarray", "(", "x", ".", "values", ")", ",", "columns", "=", "x", ".", "columns", ",", "index", "=", "x", ".", "index", ")", "else", ":", "return", "None" ]
Convert an string, int or float to Decimal object Parameters ---------- x : str, list, tuple, numpy.ndarray, pandas.DataFrame A string, int or float number, or a list, array or dataframe of these. digits : int (Default prec=None) Set the getcontext precision Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame Decimal object or array of Decimal objects Example ------- clean_to_decimal('12.345') Decimal('12.345') clean_to_decimal('12.345', prec=2) Decimal('12') clean_to_decimal(12.345) Decimal('12.34500000000000063948846218') clean_to_decimal(12.345, prec=5) Decimal('12.345')
[ "Convert", "an", "string", "int", "or", "float", "to", "Decimal", "object" ]
b3043116050de275124365cb11e7df91fb40169d
https://github.com/ulf1/oxyba/blob/b3043116050de275124365cb11e7df91fb40169d/oxyba/clean_to_decimal.py#L2-L69
250,402
minhhoit/yacms
yacms/generic/templatetags/rating_tags.py
rating_for
def rating_for(context, obj): """ Provides a generic context variable name for the object that ratings are being rendered for, and the rating form. """ context["rating_object"] = context["rating_obj"] = obj context["rating_form"] = RatingForm(context["request"], obj) ratings = context["request"].COOKIES.get("yacms-rating", "") rating_string = "%s.%s" % (obj._meta, obj.pk) context["rated"] = (rating_string in ratings) rating_name = obj.get_ratingfield_name() for f in ("average", "count", "sum"): context["rating_" + f] = getattr(obj, "%s_%s" % (rating_name, f)) return context
python
def rating_for(context, obj): """ Provides a generic context variable name for the object that ratings are being rendered for, and the rating form. """ context["rating_object"] = context["rating_obj"] = obj context["rating_form"] = RatingForm(context["request"], obj) ratings = context["request"].COOKIES.get("yacms-rating", "") rating_string = "%s.%s" % (obj._meta, obj.pk) context["rated"] = (rating_string in ratings) rating_name = obj.get_ratingfield_name() for f in ("average", "count", "sum"): context["rating_" + f] = getattr(obj, "%s_%s" % (rating_name, f)) return context
[ "def", "rating_for", "(", "context", ",", "obj", ")", ":", "context", "[", "\"rating_object\"", "]", "=", "context", "[", "\"rating_obj\"", "]", "=", "obj", "context", "[", "\"rating_form\"", "]", "=", "RatingForm", "(", "context", "[", "\"request\"", "]", ",", "obj", ")", "ratings", "=", "context", "[", "\"request\"", "]", ".", "COOKIES", ".", "get", "(", "\"yacms-rating\"", ",", "\"\"", ")", "rating_string", "=", "\"%s.%s\"", "%", "(", "obj", ".", "_meta", ",", "obj", ".", "pk", ")", "context", "[", "\"rated\"", "]", "=", "(", "rating_string", "in", "ratings", ")", "rating_name", "=", "obj", ".", "get_ratingfield_name", "(", ")", "for", "f", "in", "(", "\"average\"", ",", "\"count\"", ",", "\"sum\"", ")", ":", "context", "[", "\"rating_\"", "+", "f", "]", "=", "getattr", "(", "obj", ",", "\"%s_%s\"", "%", "(", "rating_name", ",", "f", ")", ")", "return", "context" ]
Provides a generic context variable name for the object that ratings are being rendered for, and the rating form.
[ "Provides", "a", "generic", "context", "variable", "name", "for", "the", "object", "that", "ratings", "are", "being", "rendered", "for", "and", "the", "rating", "form", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/templatetags/rating_tags.py#L11-L24
250,403
jldantas/libmft
libmft/attribute.py
get_attr_info
def get_attr_info(binary_view): '''Gets basic information from a binary stream to allow correct processing of the attribute header. This function allows the interpretation of the Attribute type, attribute length and if the attribute is non resident. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: An tuple with the attribute type, the attribute length, in bytes, and if the attribute is resident or not. ''' global _ATTR_BASIC attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9]) return (AttrTypes(attr_type), attr_len, bool(non_resident))
python
def get_attr_info(binary_view): '''Gets basic information from a binary stream to allow correct processing of the attribute header. This function allows the interpretation of the Attribute type, attribute length and if the attribute is non resident. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: An tuple with the attribute type, the attribute length, in bytes, and if the attribute is resident or not. ''' global _ATTR_BASIC attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9]) return (AttrTypes(attr_type), attr_len, bool(non_resident))
[ "def", "get_attr_info", "(", "binary_view", ")", ":", "global", "_ATTR_BASIC", "attr_type", ",", "attr_len", ",", "non_resident", "=", "_ATTR_BASIC", ".", "unpack", "(", "binary_view", "[", ":", "9", "]", ")", "return", "(", "AttrTypes", "(", "attr_type", ")", ",", "attr_len", ",", "bool", "(", "non_resident", ")", ")" ]
Gets basic information from a binary stream to allow correct processing of the attribute header. This function allows the interpretation of the Attribute type, attribute length and if the attribute is non resident. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: An tuple with the attribute type, the attribute length, in bytes, and if the attribute is resident or not.
[ "Gets", "basic", "information", "from", "a", "binary", "stream", "to", "allow", "correct", "processing", "of", "the", "attribute", "header", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L59-L78
250,404
jldantas/libmft
libmft/attribute.py
_create_attrcontent_class
def _create_attrcontent_class(name, fields, inheritance=(object,), data_structure=None, extra_functions=None, docstring=""): '''Helper function that creates a class for attribute contents. This function creates is a boilerplate to create all the expected methods of an attributes. The basic methods work in the same way for all classes. Once it executes it defines a dynamic class with the methods "__init__", "__repr__" and "__eq__" based on the fields passed in the ``fields`` parameter. If the ``data_structure`` parameter is present, the classmethod ``get_representation_size`` and the class variable ``_REPR`` will also be present. It is also possible to define the inheritance using this method by passing a list of classes in the ``inheritance`` parameter. If the ``extra_functions`` argument is present, they will be added to the class. Note: If the ``extra_functions`` has defined any of dinamically created methods, they will *replace* the ones created. Args: name (str): Name of the class that will be created. fields (tuple(str)): The attributes that will be added to the class. inherited (tuple(object)): List of objects that will be inherited by the new class extra_functions (dict(str : function)): A dictionary where the key will be the name of the function in the class and the content of the key is a function that will be bound to the class doctring (str): Class' docstring Returns: A new class with the ``name`` as it's name. ''' def create_func_from_str(f_name, args, content, docstring=""): '''Helper function to create functions from strings. To improve performance, the standard functions are created at runtime based on the string derived from the content. This way the function, from the interpreter point of view, looks like statically defined. Note: This function should be used only for methods that will receive ``self`` (instace methods). The ``self`` argument is added automatically. Args: f_name (str): Function name args (list(str)): List of extra arguments that the function will receive content (str): Content of the function docstring (str): Function's docstring Returns: A new function object that can be inserted in the class. ''' exec_namespace = {"__name__" : f"{f_name}"} new_args = ", ".join(["self"] + args) func_str = f"def {f_name}({new_args}): {content}" exec(func_str, exec_namespace) func = exec_namespace[f_name] func.__doc__ = docstring return func #creates the functions necessary for the new class slots = fields init_content = ", ".join([f"self.{field}" for field in fields]) + " = content" __init__ = create_func_from_str("__init__", [f"content=(None,)*{len(fields)}"], init_content) temp = ", ".join([f"{field}={{self.{field}}}" for field in fields]) repr = "return " + f"f\'{{self.__class__.__name__}}({temp})\'" __repr__ = create_func_from_str("__repr__", [], repr) temp = " and ".join([f"self.{field} == other.{field}" for field in fields]) eq = f"return {temp} if isinstance(other, {name}) else False" __eq__ = create_func_from_str("__eq__", ["other"], eq) @classmethod def get_representation_size(cls): return cls._REPR.size #adapted from namedtuple code # Modify function metadata to help with introspection and debugging for method in (__init__, get_representation_size.__func__, __eq__, __repr__): method.__qualname__ = f'{name}.{method.__name__}' #map class namespace for the class creation namespace = {"__slots__" : slots, "__init__" : __init__, "__repr__" : __repr__, "__eq__" : __eq__ } if data_structure is not None: namespace["_REPR"] = struct.Struct(data_structure) namespace["get_representation_size"] = get_representation_size if docstring: namespace["__doc__"] = docstring #some new mappings can be set or overload the ones defined if extra_functions is not None: for method in extra_functions.values(): try: method.__qualname__ = f'{name}.{method.__name__}' except AttributeError: try: method.__func__.__qualname__ = f'{name}.{method.__func__.__name__}' except AttributeError: #if we got here, it is not a method or classmethod, must be an attribute #TODO feels like a hack, change it #TODO design a test for this pass namespace = {**namespace, **extra_functions} #TODO check if docstring was provided, issue a warning new_class = type(name, inheritance, namespace) # adapted from namedtuple code # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. try: new_class.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass return new_class
python
def _create_attrcontent_class(name, fields, inheritance=(object,), data_structure=None, extra_functions=None, docstring=""): '''Helper function that creates a class for attribute contents. This function creates is a boilerplate to create all the expected methods of an attributes. The basic methods work in the same way for all classes. Once it executes it defines a dynamic class with the methods "__init__", "__repr__" and "__eq__" based on the fields passed in the ``fields`` parameter. If the ``data_structure`` parameter is present, the classmethod ``get_representation_size`` and the class variable ``_REPR`` will also be present. It is also possible to define the inheritance using this method by passing a list of classes in the ``inheritance`` parameter. If the ``extra_functions`` argument is present, they will be added to the class. Note: If the ``extra_functions`` has defined any of dinamically created methods, they will *replace* the ones created. Args: name (str): Name of the class that will be created. fields (tuple(str)): The attributes that will be added to the class. inherited (tuple(object)): List of objects that will be inherited by the new class extra_functions (dict(str : function)): A dictionary where the key will be the name of the function in the class and the content of the key is a function that will be bound to the class doctring (str): Class' docstring Returns: A new class with the ``name`` as it's name. ''' def create_func_from_str(f_name, args, content, docstring=""): '''Helper function to create functions from strings. To improve performance, the standard functions are created at runtime based on the string derived from the content. This way the function, from the interpreter point of view, looks like statically defined. Note: This function should be used only for methods that will receive ``self`` (instace methods). The ``self`` argument is added automatically. Args: f_name (str): Function name args (list(str)): List of extra arguments that the function will receive content (str): Content of the function docstring (str): Function's docstring Returns: A new function object that can be inserted in the class. ''' exec_namespace = {"__name__" : f"{f_name}"} new_args = ", ".join(["self"] + args) func_str = f"def {f_name}({new_args}): {content}" exec(func_str, exec_namespace) func = exec_namespace[f_name] func.__doc__ = docstring return func #creates the functions necessary for the new class slots = fields init_content = ", ".join([f"self.{field}" for field in fields]) + " = content" __init__ = create_func_from_str("__init__", [f"content=(None,)*{len(fields)}"], init_content) temp = ", ".join([f"{field}={{self.{field}}}" for field in fields]) repr = "return " + f"f\'{{self.__class__.__name__}}({temp})\'" __repr__ = create_func_from_str("__repr__", [], repr) temp = " and ".join([f"self.{field} == other.{field}" for field in fields]) eq = f"return {temp} if isinstance(other, {name}) else False" __eq__ = create_func_from_str("__eq__", ["other"], eq) @classmethod def get_representation_size(cls): return cls._REPR.size #adapted from namedtuple code # Modify function metadata to help with introspection and debugging for method in (__init__, get_representation_size.__func__, __eq__, __repr__): method.__qualname__ = f'{name}.{method.__name__}' #map class namespace for the class creation namespace = {"__slots__" : slots, "__init__" : __init__, "__repr__" : __repr__, "__eq__" : __eq__ } if data_structure is not None: namespace["_REPR"] = struct.Struct(data_structure) namespace["get_representation_size"] = get_representation_size if docstring: namespace["__doc__"] = docstring #some new mappings can be set or overload the ones defined if extra_functions is not None: for method in extra_functions.values(): try: method.__qualname__ = f'{name}.{method.__name__}' except AttributeError: try: method.__func__.__qualname__ = f'{name}.{method.__func__.__name__}' except AttributeError: #if we got here, it is not a method or classmethod, must be an attribute #TODO feels like a hack, change it #TODO design a test for this pass namespace = {**namespace, **extra_functions} #TODO check if docstring was provided, issue a warning new_class = type(name, inheritance, namespace) # adapted from namedtuple code # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. try: new_class.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass return new_class
[ "def", "_create_attrcontent_class", "(", "name", ",", "fields", ",", "inheritance", "=", "(", "object", ",", ")", ",", "data_structure", "=", "None", ",", "extra_functions", "=", "None", ",", "docstring", "=", "\"\"", ")", ":", "def", "create_func_from_str", "(", "f_name", ",", "args", ",", "content", ",", "docstring", "=", "\"\"", ")", ":", "'''Helper function to create functions from strings.\n\n To improve performance, the standard functions are created at runtime\n based on the string derived from the content. This way the function, from\n the interpreter point of view, looks like statically defined.\n\n Note:\n This function should be used only for methods that will receive\n ``self`` (instace methods). The ``self`` argument is added automatically.\n\n Args:\n f_name (str): Function name\n args (list(str)): List of extra arguments that the function will receive\n content (str): Content of the function\n docstring (str): Function's docstring\n\n Returns:\n A new function object that can be inserted in the class.\n '''", "exec_namespace", "=", "{", "\"__name__\"", ":", "f\"{f_name}\"", "}", "new_args", "=", "\", \"", ".", "join", "(", "[", "\"self\"", "]", "+", "args", ")", "func_str", "=", "f\"def {f_name}({new_args}): {content}\"", "exec", "(", "func_str", ",", "exec_namespace", ")", "func", "=", "exec_namespace", "[", "f_name", "]", "func", ".", "__doc__", "=", "docstring", "return", "func", "#creates the functions necessary for the new class", "slots", "=", "fields", "init_content", "=", "\", \"", ".", "join", "(", "[", "f\"self.{field}\"", "for", "field", "in", "fields", "]", ")", "+", "\" = content\"", "__init__", "=", "create_func_from_str", "(", "\"__init__\"", ",", "[", "f\"content=(None,)*{len(fields)}\"", "]", ",", "init_content", ")", "temp", "=", "\", \"", ".", "join", "(", "[", "f\"{field}={{self.{field}}}\"", "for", "field", "in", "fields", "]", ")", "repr", "=", "\"return \"", "+", "f\"f\\'{{self.__class__.__name__}}({temp})\\'\"", "__repr__", "=", "create_func_from_str", "(", "\"__repr__\"", ",", "[", "]", ",", "repr", ")", "temp", "=", "\" and \"", ".", "join", "(", "[", "f\"self.{field} == other.{field}\"", "for", "field", "in", "fields", "]", ")", "eq", "=", "f\"return {temp} if isinstance(other, {name}) else False\"", "__eq__", "=", "create_func_from_str", "(", "\"__eq__\"", ",", "[", "\"other\"", "]", ",", "eq", ")", "@", "classmethod", "def", "get_representation_size", "(", "cls", ")", ":", "return", "cls", ".", "_REPR", ".", "size", "#adapted from namedtuple code", "# Modify function metadata to help with introspection and debugging", "for", "method", "in", "(", "__init__", ",", "get_representation_size", ".", "__func__", ",", "__eq__", ",", "__repr__", ")", ":", "method", ".", "__qualname__", "=", "f'{name}.{method.__name__}'", "#map class namespace for the class creation", "namespace", "=", "{", "\"__slots__\"", ":", "slots", ",", "\"__init__\"", ":", "__init__", ",", "\"__repr__\"", ":", "__repr__", ",", "\"__eq__\"", ":", "__eq__", "}", "if", "data_structure", "is", "not", "None", ":", "namespace", "[", "\"_REPR\"", "]", "=", "struct", ".", "Struct", "(", "data_structure", ")", "namespace", "[", "\"get_representation_size\"", "]", "=", "get_representation_size", "if", "docstring", ":", "namespace", "[", "\"__doc__\"", "]", "=", "docstring", "#some new mappings can be set or overload the ones defined", "if", "extra_functions", "is", "not", "None", ":", "for", "method", "in", "extra_functions", ".", "values", "(", ")", ":", "try", ":", "method", ".", "__qualname__", "=", "f'{name}.{method.__name__}'", "except", "AttributeError", ":", "try", ":", "method", ".", "__func__", ".", "__qualname__", "=", "f'{name}.{method.__func__.__name__}'", "except", "AttributeError", ":", "#if we got here, it is not a method or classmethod, must be an attribute", "#TODO feels like a hack, change it", "#TODO design a test for this", "pass", "namespace", "=", "{", "*", "*", "namespace", ",", "*", "*", "extra_functions", "}", "#TODO check if docstring was provided, issue a warning", "new_class", "=", "type", "(", "name", ",", "inheritance", ",", "namespace", ")", "# adapted from namedtuple code", "# For pickling to work, the __module__ variable needs to be set to the frame", "# where the named tuple is created. Bypass this step in environments where", "# sys._getframe is not defined (Jython for example) or sys._getframe is not", "# defined for arguments greater than 0 (IronPython), or where the user has", "# specified a particular module.", "try", ":", "new_class", ".", "__module__", "=", "_sys", ".", "_getframe", "(", "1", ")", ".", "f_globals", ".", "get", "(", "'__name__'", ",", "'__main__'", ")", "except", "(", "AttributeError", ",", "ValueError", ")", ":", "pass", "return", "new_class" ]
Helper function that creates a class for attribute contents. This function creates is a boilerplate to create all the expected methods of an attributes. The basic methods work in the same way for all classes. Once it executes it defines a dynamic class with the methods "__init__", "__repr__" and "__eq__" based on the fields passed in the ``fields`` parameter. If the ``data_structure`` parameter is present, the classmethod ``get_representation_size`` and the class variable ``_REPR`` will also be present. It is also possible to define the inheritance using this method by passing a list of classes in the ``inheritance`` parameter. If the ``extra_functions`` argument is present, they will be added to the class. Note: If the ``extra_functions`` has defined any of dinamically created methods, they will *replace* the ones created. Args: name (str): Name of the class that will be created. fields (tuple(str)): The attributes that will be added to the class. inherited (tuple(object)): List of objects that will be inherited by the new class extra_functions (dict(str : function)): A dictionary where the key will be the name of the function in the class and the content of the key is a function that will be bound to the class doctring (str): Class' docstring Returns: A new class with the ``name`` as it's name.
[ "Helper", "function", "that", "creates", "a", "class", "for", "attribute", "contents", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L80-L210
250,405
jldantas/libmft
libmft/attribute.py
_astimezone_ts
def _astimezone_ts(self, timezone): """Changes the time zones of all timestamps. Receives a new timezone and applies to all timestamps, if necessary. Args: timezone (:obj:`tzinfo`): Time zone to be applied Returns: A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``. """ if self.created.tzinfo is timezone: return self else: nw_obj = Timestamps((None,)*4) nw_obj.created = self.created.astimezone(timezone) nw_obj.changed = self.changed.astimezone(timezone) nw_obj.mft_changed = self.mft_changed.astimezone(timezone) nw_obj.accessed = self.accessed.astimezone(timezone) return nw_obj
python
def _astimezone_ts(self, timezone): """Changes the time zones of all timestamps. Receives a new timezone and applies to all timestamps, if necessary. Args: timezone (:obj:`tzinfo`): Time zone to be applied Returns: A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``. """ if self.created.tzinfo is timezone: return self else: nw_obj = Timestamps((None,)*4) nw_obj.created = self.created.astimezone(timezone) nw_obj.changed = self.changed.astimezone(timezone) nw_obj.mft_changed = self.mft_changed.astimezone(timezone) nw_obj.accessed = self.accessed.astimezone(timezone) return nw_obj
[ "def", "_astimezone_ts", "(", "self", ",", "timezone", ")", ":", "if", "self", ".", "created", ".", "tzinfo", "is", "timezone", ":", "return", "self", "else", ":", "nw_obj", "=", "Timestamps", "(", "(", "None", ",", ")", "*", "4", ")", "nw_obj", ".", "created", "=", "self", ".", "created", ".", "astimezone", "(", "timezone", ")", "nw_obj", ".", "changed", "=", "self", ".", "changed", ".", "astimezone", "(", "timezone", ")", "nw_obj", ".", "mft_changed", "=", "self", ".", "mft_changed", ".", "astimezone", "(", "timezone", ")", "nw_obj", ".", "accessed", "=", "self", ".", "accessed", ".", "astimezone", "(", "timezone", ")", "return", "nw_obj" ]
Changes the time zones of all timestamps. Receives a new timezone and applies to all timestamps, if necessary. Args: timezone (:obj:`tzinfo`): Time zone to be applied Returns: A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.
[ "Changes", "the", "time", "zones", "of", "all", "timestamps", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L695-L715
250,406
jldantas/libmft
libmft/attribute.py
_len_objid
def _len_objid(self): '''Get the actual size of the content, as some attributes have variable sizes''' try: return self._size except AttributeError: temp = (self.object_id, self.birth_vol_id, self.birth_object_id, self.birth_domain_id) self._size = sum([ObjectID._UUID_SIZE for data in temp if data is not None]) return self._size
python
def _len_objid(self): '''Get the actual size of the content, as some attributes have variable sizes''' try: return self._size except AttributeError: temp = (self.object_id, self.birth_vol_id, self.birth_object_id, self.birth_domain_id) self._size = sum([ObjectID._UUID_SIZE for data in temp if data is not None]) return self._size
[ "def", "_len_objid", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_size", "except", "AttributeError", ":", "temp", "=", "(", "self", ".", "object_id", ",", "self", ".", "birth_vol_id", ",", "self", ".", "birth_object_id", ",", "self", ".", "birth_domain_id", ")", "self", ".", "_size", "=", "sum", "(", "[", "ObjectID", ".", "_UUID_SIZE", "for", "data", "in", "temp", "if", "data", "is", "not", "None", "]", ")", "return", "self", ".", "_size" ]
Get the actual size of the content, as some attributes have variable sizes
[ "Get", "the", "actual", "size", "of", "the", "content", "as", "some", "attributes", "have", "variable", "sizes" ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L978-L985
250,407
jldantas/libmft
libmft/attribute.py
_allocated_entries_bitmap
def _allocated_entries_bitmap(self): '''Creates a generator that returns all allocated entries in the bitmap. Yields: int: The bit index of the allocated entries. ''' for entry_number in range(len(self._bitmap) * 8): if self.entry_allocated(entry_number): yield entry_number
python
def _allocated_entries_bitmap(self): '''Creates a generator that returns all allocated entries in the bitmap. Yields: int: The bit index of the allocated entries. ''' for entry_number in range(len(self._bitmap) * 8): if self.entry_allocated(entry_number): yield entry_number
[ "def", "_allocated_entries_bitmap", "(", "self", ")", ":", "for", "entry_number", "in", "range", "(", "len", "(", "self", ".", "_bitmap", ")", "*", "8", ")", ":", "if", "self", ".", "entry_allocated", "(", "entry_number", ")", ":", "yield", "entry_number" ]
Creates a generator that returns all allocated entries in the bitmap. Yields: int: The bit index of the allocated entries.
[ "Creates", "a", "generator", "that", "returns", "all", "allocated", "entries", "in", "the", "bitmap", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1427-L1437
250,408
jldantas/libmft
libmft/attribute.py
_entry_allocated_bitmap
def _entry_allocated_bitmap(self, entry_number): """Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise. """ index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
python
def _entry_allocated_bitmap(self, entry_number): """Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise. """ index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
[ "def", "_entry_allocated_bitmap", "(", "self", ",", "entry_number", ")", ":", "index", ",", "offset", "=", "divmod", "(", "entry_number", ",", "8", ")", "return", "bool", "(", "self", ".", "_bitmap", "[", "index", "]", "&", "(", "1", "<<", "offset", ")", ")" ]
Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise.
[ "Checks", "if", "a", "particular", "index", "is", "allocated", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1439-L1449
250,409
jldantas/libmft
libmft/attribute.py
_get_next_empty_bitmap
def _get_next_empty_bitmap(self): """Returns the next empty entry. Returns: int: The value of the empty entry """ #TODO probably not the best way, redo for i, byte in enumerate(self._bitmap): if byte != 255: for offset in range(8): if not byte & (1 << offset): return (i * 8) + offset
python
def _get_next_empty_bitmap(self): """Returns the next empty entry. Returns: int: The value of the empty entry """ #TODO probably not the best way, redo for i, byte in enumerate(self._bitmap): if byte != 255: for offset in range(8): if not byte & (1 << offset): return (i * 8) + offset
[ "def", "_get_next_empty_bitmap", "(", "self", ")", ":", "#TODO probably not the best way, redo", "for", "i", ",", "byte", "in", "enumerate", "(", "self", ".", "_bitmap", ")", ":", "if", "byte", "!=", "255", ":", "for", "offset", "in", "range", "(", "8", ")", ":", "if", "not", "byte", "&", "(", "1", "<<", "offset", ")", ":", "return", "(", "i", "*", "8", ")", "+", "offset" ]
Returns the next empty entry. Returns: int: The value of the empty entry
[ "Returns", "the", "next", "empty", "entry", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1451-L1462
250,410
jldantas/libmft
libmft/attribute.py
_len_ea_entry
def _len_ea_entry(self): '''Returns the size of the entry''' return EaEntry._REPR.size + len(self.name.encode("ascii")) + self.value_len
python
def _len_ea_entry(self): '''Returns the size of the entry''' return EaEntry._REPR.size + len(self.name.encode("ascii")) + self.value_len
[ "def", "_len_ea_entry", "(", "self", ")", ":", "return", "EaEntry", ".", "_REPR", ".", "size", "+", "len", "(", "self", ".", "name", ".", "encode", "(", "\"ascii\"", ")", ")", "+", "self", ".", "value_len" ]
Returns the size of the entry
[ "Returns", "the", "size", "of", "the", "entry" ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1750-L1752
250,411
jldantas/libmft
libmft/attribute.py
_str_sid
def _str_sid(self): 'Return a nicely formatted representation string' sub_auths = "-".join([str(sub) for sub in self.sub_authorities]) return f'S-{self.revision_number}-{self.authority}-{sub_auths}'
python
def _str_sid(self): 'Return a nicely formatted representation string' sub_auths = "-".join([str(sub) for sub in self.sub_authorities]) return f'S-{self.revision_number}-{self.authority}-{sub_auths}'
[ "def", "_str_sid", "(", "self", ")", ":", "sub_auths", "=", "\"-\"", ".", "join", "(", "[", "str", "(", "sub", ")", "for", "sub", "in", "self", ".", "sub_authorities", "]", ")", "return", "f'S-{self.revision_number}-{self.authority}-{sub_auths}'" ]
Return a nicely formatted representation string
[ "Return", "a", "nicely", "formatted", "representation", "string" ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1982-L1985
250,412
jldantas/libmft
libmft/attribute.py
_len_sec_desc
def _len_sec_desc(self): '''Returns the logical size of the file''' return len(self.header) + len(self.owner_sid) + len(self.group_sid) + len(self.sacl) + len(self.dacl)
python
def _len_sec_desc(self): '''Returns the logical size of the file''' return len(self.header) + len(self.owner_sid) + len(self.group_sid) + len(self.sacl) + len(self.dacl)
[ "def", "_len_sec_desc", "(", "self", ")", ":", "return", "len", "(", "self", ".", "header", ")", "+", "len", "(", "self", ".", "owner_sid", ")", "+", "len", "(", "self", ".", "group_sid", ")", "+", "len", "(", "self", ".", "sacl", ")", "+", "len", "(", "self", ".", "dacl", ")" ]
Returns the logical size of the file
[ "Returns", "the", "logical", "size", "of", "the", "file" ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L2269-L2271
250,413
jldantas/libmft
libmft/attribute.py
DataRuns.create_from_binary
def create_from_binary(cls, binary_view): '''Creates a new object DataRuns from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: DataRuns: New object using hte binary stream as source ''' nw_obj = cls() offset = 0 previous_dr_offset = 0 header_size = cls._INFO.size #"header" of a data run is always a byte while binary_view[offset] != 0: #the runlist ends with an 0 as the "header" header = cls._INFO.unpack(binary_view[offset:offset+header_size])[0] length_len = header & 0x0F length_offset = (header & 0xF0) >> 4 temp_len = offset+header_size+length_len #helper variable just to make things simpler dr_length = int.from_bytes(binary_view[offset+header_size:temp_len], "little", signed=False) if length_offset: #the offset is relative to the previous data run dr_offset = int.from_bytes(binary_view[temp_len:temp_len+length_offset], "little", signed=True) + previous_dr_offset previous_dr_offset = dr_offset else: #if it is sparse, requires a a different approach dr_offset = None offset += header_size + length_len + length_offset nw_obj.data_runs.append((dr_length, dr_offset)) #nw_obj.data_runs.append(DataRun(dr_length, dr_offset)) _MOD_LOGGER.debug("DataRuns object created successfully") return nw_obj
python
def create_from_binary(cls, binary_view): '''Creates a new object DataRuns from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: DataRuns: New object using hte binary stream as source ''' nw_obj = cls() offset = 0 previous_dr_offset = 0 header_size = cls._INFO.size #"header" of a data run is always a byte while binary_view[offset] != 0: #the runlist ends with an 0 as the "header" header = cls._INFO.unpack(binary_view[offset:offset+header_size])[0] length_len = header & 0x0F length_offset = (header & 0xF0) >> 4 temp_len = offset+header_size+length_len #helper variable just to make things simpler dr_length = int.from_bytes(binary_view[offset+header_size:temp_len], "little", signed=False) if length_offset: #the offset is relative to the previous data run dr_offset = int.from_bytes(binary_view[temp_len:temp_len+length_offset], "little", signed=True) + previous_dr_offset previous_dr_offset = dr_offset else: #if it is sparse, requires a a different approach dr_offset = None offset += header_size + length_len + length_offset nw_obj.data_runs.append((dr_length, dr_offset)) #nw_obj.data_runs.append(DataRun(dr_length, dr_offset)) _MOD_LOGGER.debug("DataRuns object created successfully") return nw_obj
[ "def", "create_from_binary", "(", "cls", ",", "binary_view", ")", ":", "nw_obj", "=", "cls", "(", ")", "offset", "=", "0", "previous_dr_offset", "=", "0", "header_size", "=", "cls", ".", "_INFO", ".", "size", "#\"header\" of a data run is always a byte", "while", "binary_view", "[", "offset", "]", "!=", "0", ":", "#the runlist ends with an 0 as the \"header\"", "header", "=", "cls", ".", "_INFO", ".", "unpack", "(", "binary_view", "[", "offset", ":", "offset", "+", "header_size", "]", ")", "[", "0", "]", "length_len", "=", "header", "&", "0x0F", "length_offset", "=", "(", "header", "&", "0xF0", ")", ">>", "4", "temp_len", "=", "offset", "+", "header_size", "+", "length_len", "#helper variable just to make things simpler", "dr_length", "=", "int", ".", "from_bytes", "(", "binary_view", "[", "offset", "+", "header_size", ":", "temp_len", "]", ",", "\"little\"", ",", "signed", "=", "False", ")", "if", "length_offset", ":", "#the offset is relative to the previous data run", "dr_offset", "=", "int", ".", "from_bytes", "(", "binary_view", "[", "temp_len", ":", "temp_len", "+", "length_offset", "]", ",", "\"little\"", ",", "signed", "=", "True", ")", "+", "previous_dr_offset", "previous_dr_offset", "=", "dr_offset", "else", ":", "#if it is sparse, requires a a different approach", "dr_offset", "=", "None", "offset", "+=", "header_size", "+", "length_len", "+", "length_offset", "nw_obj", ".", "data_runs", ".", "append", "(", "(", "dr_length", ",", "dr_offset", ")", ")", "#nw_obj.data_runs.append(DataRun(dr_length, dr_offset))", "_MOD_LOGGER", ".", "debug", "(", "\"DataRuns object created successfully\"", ")", "return", "nw_obj" ]
Creates a new object DataRuns from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: DataRuns: New object using hte binary stream as source
[ "Creates", "a", "new", "object", "DataRuns", "from", "a", "binary", "stream", ".", "The", "binary", "stream", "can", "be", "represented", "by", "a", "byte", "string", "bytearray", "or", "a", "memoryview", "of", "the", "bytearray", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L276-L311
250,414
jldantas/libmft
libmft/attribute.py
ResidentAttrHeader.create_from_binary
def create_from_binary(cls, binary_view): '''Creates a new object AttributeHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: AttributeHeader: New object using hte binary stream as source ''' attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \ content_len, content_offset, indexed_flag = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le") else: name = None nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (content_len, content_offset, indexed_flag)) return nw_obj
python
def create_from_binary(cls, binary_view): '''Creates a new object AttributeHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: AttributeHeader: New object using hte binary stream as source ''' attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \ content_len, content_offset, indexed_flag = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le") else: name = None nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (content_len, content_offset, indexed_flag)) return nw_obj
[ "def", "create_from_binary", "(", "cls", ",", "binary_view", ")", ":", "attr_type", ",", "attr_len", ",", "non_resident", ",", "name_len", ",", "name_offset", ",", "flags", ",", "attr_id", ",", "content_len", ",", "content_offset", ",", "indexed_flag", "=", "cls", ".", "_REPR", ".", "unpack", "(", "binary_view", "[", ":", "cls", ".", "_REPR", ".", "size", "]", ")", "if", "name_len", ":", "name", "=", "binary_view", "[", "name_offset", ":", "name_offset", "+", "(", "2", "*", "name_len", ")", "]", ".", "tobytes", "(", ")", ".", "decode", "(", "\"utf_16_le\"", ")", "else", ":", "name", "=", "None", "nw_obj", "=", "cls", "(", "(", "AttrTypes", "(", "attr_type", ")", ",", "attr_len", ",", "bool", "(", "non_resident", ")", ",", "AttrFlags", "(", "flags", ")", ",", "attr_id", ",", "name", ")", ",", "(", "content_len", ",", "content_offset", ",", "indexed_flag", ")", ")", "return", "nw_obj" ]
Creates a new object AttributeHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: binary_view (memoryview of bytearray) - A binary stream with the information of the attribute Returns: AttributeHeader: New object using hte binary stream as source
[ "Creates", "a", "new", "object", "AttributeHeader", "from", "a", "binary", "stream", ".", "The", "binary", "stream", "can", "be", "represented", "by", "a", "byte", "string", "bytearray", "or", "a", "memoryview", "of", "the", "bytearray", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L470-L493
250,415
jldantas/libmft
libmft/attribute.py
NonResidentAttrHeader.create_from_binary
def create_from_binary(cls, load_dataruns, binary_view): '''Creates a new object NonResidentAttrHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: load_dataruns (bool) - Indicates if the dataruns are to be loaded binary_view (memoryview of bytearray) - A binary stream with the information of the attribute non_resident_offset (int) - The offset where the non resident header begins Returns: NonResidentAttrHeader: New object using hte binary stream as source ''' attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \ start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, \ init_sstream = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le") else: name = None #content = cls._REPR.unpack(binary_view[non_resident_offset:non_resident_offset+cls._REPR.size]) nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, init_sstream)) if load_dataruns: nw_obj.data_runs = DataRuns.create_from_binary(binary_view[nw_obj.rl_offset:]) _MOD_LOGGER.debug("NonResidentAttrHeader object created successfully") return nw_obj
python
def create_from_binary(cls, load_dataruns, binary_view): '''Creates a new object NonResidentAttrHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: load_dataruns (bool) - Indicates if the dataruns are to be loaded binary_view (memoryview of bytearray) - A binary stream with the information of the attribute non_resident_offset (int) - The offset where the non resident header begins Returns: NonResidentAttrHeader: New object using hte binary stream as source ''' attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \ start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, \ init_sstream = cls._REPR.unpack(binary_view[:cls._REPR.size]) if name_len: name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le") else: name = None #content = cls._REPR.unpack(binary_view[non_resident_offset:non_resident_offset+cls._REPR.size]) nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name), (start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, init_sstream)) if load_dataruns: nw_obj.data_runs = DataRuns.create_from_binary(binary_view[nw_obj.rl_offset:]) _MOD_LOGGER.debug("NonResidentAttrHeader object created successfully") return nw_obj
[ "def", "create_from_binary", "(", "cls", ",", "load_dataruns", ",", "binary_view", ")", ":", "attr_type", ",", "attr_len", ",", "non_resident", ",", "name_len", ",", "name_offset", ",", "flags", ",", "attr_id", ",", "start_vcn", ",", "end_vcn", ",", "rl_offset", ",", "compress_usize", ",", "alloc_sstream", ",", "curr_sstream", ",", "init_sstream", "=", "cls", ".", "_REPR", ".", "unpack", "(", "binary_view", "[", ":", "cls", ".", "_REPR", ".", "size", "]", ")", "if", "name_len", ":", "name", "=", "binary_view", "[", "name_offset", ":", "name_offset", "+", "(", "2", "*", "name_len", ")", "]", ".", "tobytes", "(", ")", ".", "decode", "(", "\"utf_16_le\"", ")", "else", ":", "name", "=", "None", "#content = cls._REPR.unpack(binary_view[non_resident_offset:non_resident_offset+cls._REPR.size])", "nw_obj", "=", "cls", "(", "(", "AttrTypes", "(", "attr_type", ")", ",", "attr_len", ",", "bool", "(", "non_resident", ")", ",", "AttrFlags", "(", "flags", ")", ",", "attr_id", ",", "name", ")", ",", "(", "start_vcn", ",", "end_vcn", ",", "rl_offset", ",", "compress_usize", ",", "alloc_sstream", ",", "curr_sstream", ",", "init_sstream", ")", ")", "if", "load_dataruns", ":", "nw_obj", ".", "data_runs", "=", "DataRuns", ".", "create_from_binary", "(", "binary_view", "[", "nw_obj", ".", "rl_offset", ":", "]", ")", "_MOD_LOGGER", ".", "debug", "(", "\"NonResidentAttrHeader object created successfully\"", ")", "return", "nw_obj" ]
Creates a new object NonResidentAttrHeader from a binary stream. The binary stream can be represented by a byte string, bytearray or a memoryview of the bytearray. Args: load_dataruns (bool) - Indicates if the dataruns are to be loaded binary_view (memoryview of bytearray) - A binary stream with the information of the attribute non_resident_offset (int) - The offset where the non resident header begins Returns: NonResidentAttrHeader: New object using hte binary stream as source
[ "Creates", "a", "new", "object", "NonResidentAttrHeader", "from", "a", "binary", "stream", ".", "The", "binary", "stream", "can", "be", "represented", "by", "a", "byte", "string", "bytearray", "or", "a", "memoryview", "of", "the", "bytearray", "." ]
65a988605fe7663b788bd81dcb52c0a4eaad1549
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L561-L593
250,416
openbermuda/ripl
ripl/slide2png.py
Slide2png.write_slide_list
def write_slide_list(self, logname, slides): """ Write list of slides to logfile """ # Write slides.txt with list of slides with open('%s/%s' % (self.cache, logname), 'w') as logfile: for slide in slides: heading = slide['heading']['text'] filename = self.get_image_name(heading) print('%s,%d' % (filename, slide.get('time', 0)), file=logfile)
python
def write_slide_list(self, logname, slides): """ Write list of slides to logfile """ # Write slides.txt with list of slides with open('%s/%s' % (self.cache, logname), 'w') as logfile: for slide in slides: heading = slide['heading']['text'] filename = self.get_image_name(heading) print('%s,%d' % (filename, slide.get('time', 0)), file=logfile)
[ "def", "write_slide_list", "(", "self", ",", "logname", ",", "slides", ")", ":", "# Write slides.txt with list of slides", "with", "open", "(", "'%s/%s'", "%", "(", "self", ".", "cache", ",", "logname", ")", ",", "'w'", ")", "as", "logfile", ":", "for", "slide", "in", "slides", ":", "heading", "=", "slide", "[", "'heading'", "]", "[", "'text'", "]", "filename", "=", "self", ".", "get_image_name", "(", "heading", ")", "print", "(", "'%s,%d'", "%", "(", "filename", ",", "slide", ".", "get", "(", "'time'", ",", "0", ")", ")", ",", "file", "=", "logfile", ")" ]
Write list of slides to logfile
[ "Write", "list", "of", "slides", "to", "logfile" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/slide2png.py#L58-L67
250,417
openbermuda/ripl
ripl/slide2png.py
Slide2png.rotate
def rotate(self, img): """ Rotate image if exif says it needs it """ try: exif = image2exif.get_exif(img) except AttributeError: # image format doesn't support exif return img orientation = exif.get('Orientation', 1) landscape = img.height < img.width if orientation == 6 and landscape: print("ROTATING") return img.rotate(-90) return img
python
def rotate(self, img): """ Rotate image if exif says it needs it """ try: exif = image2exif.get_exif(img) except AttributeError: # image format doesn't support exif return img orientation = exif.get('Orientation', 1) landscape = img.height < img.width if orientation == 6 and landscape: print("ROTATING") return img.rotate(-90) return img
[ "def", "rotate", "(", "self", ",", "img", ")", ":", "try", ":", "exif", "=", "image2exif", ".", "get_exif", "(", "img", ")", "except", "AttributeError", ":", "# image format doesn't support exif", "return", "img", "orientation", "=", "exif", ".", "get", "(", "'Orientation'", ",", "1", ")", "landscape", "=", "img", ".", "height", "<", "img", ".", "width", "if", "orientation", "==", "6", "and", "landscape", ":", "print", "(", "\"ROTATING\"", ")", "return", "img", ".", "rotate", "(", "-", "90", ")", "return", "img" ]
Rotate image if exif says it needs it
[ "Rotate", "image", "if", "exif", "says", "it", "needs", "it" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/slide2png.py#L147-L163
250,418
openbermuda/ripl
ripl/slide2png.py
Slide2png.draw_image
def draw_image(self, image, item, source): """ Add an image to the image """ top, left = item['top'], item['left'] width, height = item['width'], item['height'] image_file = item['image'] img = Image.open(source) img = self.rotate(img) iwidth, iheight = img.size wratio = width / iwidth hratio = height / iheight ratio = min(wratio, hratio) img = img.resize((int(iwidth * ratio), int(iheight * ratio)), Image.ANTIALIAS) # get updated image size iwidth, iheight = img.size # Adjust top, left for actual size of image so centre # is in the same place as it would have been top += (height - iheight) // 2 left += (width - iwidth) // 2 # now paste the image image.paste(img, (left, top))
python
def draw_image(self, image, item, source): """ Add an image to the image """ top, left = item['top'], item['left'] width, height = item['width'], item['height'] image_file = item['image'] img = Image.open(source) img = self.rotate(img) iwidth, iheight = img.size wratio = width / iwidth hratio = height / iheight ratio = min(wratio, hratio) img = img.resize((int(iwidth * ratio), int(iheight * ratio)), Image.ANTIALIAS) # get updated image size iwidth, iheight = img.size # Adjust top, left for actual size of image so centre # is in the same place as it would have been top += (height - iheight) // 2 left += (width - iwidth) // 2 # now paste the image image.paste(img, (left, top))
[ "def", "draw_image", "(", "self", ",", "image", ",", "item", ",", "source", ")", ":", "top", ",", "left", "=", "item", "[", "'top'", "]", ",", "item", "[", "'left'", "]", "width", ",", "height", "=", "item", "[", "'width'", "]", ",", "item", "[", "'height'", "]", "image_file", "=", "item", "[", "'image'", "]", "img", "=", "Image", ".", "open", "(", "source", ")", "img", "=", "self", ".", "rotate", "(", "img", ")", "iwidth", ",", "iheight", "=", "img", ".", "size", "wratio", "=", "width", "/", "iwidth", "hratio", "=", "height", "/", "iheight", "ratio", "=", "min", "(", "wratio", ",", "hratio", ")", "img", "=", "img", ".", "resize", "(", "(", "int", "(", "iwidth", "*", "ratio", ")", ",", "int", "(", "iheight", "*", "ratio", ")", ")", ",", "Image", ".", "ANTIALIAS", ")", "# get updated image size", "iwidth", ",", "iheight", "=", "img", ".", "size", "# Adjust top, left for actual size of image so centre", "# is in the same place as it would have been", "top", "+=", "(", "height", "-", "iheight", ")", "//", "2", "left", "+=", "(", "width", "-", "iwidth", ")", "//", "2", "# now paste the image", "image", ".", "paste", "(", "img", ",", "(", "left", ",", "top", ")", ")" ]
Add an image to the image
[ "Add", "an", "image", "to", "the", "image" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/slide2png.py#L166-L198
250,419
openbermuda/ripl
ripl/slide2png.py
Slide2png.slugify
def slugify(self, name): """ Turn name into a slug suitable for an image file name """ slug = '' last = '' for char in name.replace('#', '').lower().strip(): if not char.isalnum(): char = '_' if last == '_' and char == '_': continue slug += char last = char return slug
python
def slugify(self, name): """ Turn name into a slug suitable for an image file name """ slug = '' last = '' for char in name.replace('#', '').lower().strip(): if not char.isalnum(): char = '_' if last == '_' and char == '_': continue slug += char last = char return slug
[ "def", "slugify", "(", "self", ",", "name", ")", ":", "slug", "=", "''", "last", "=", "''", "for", "char", "in", "name", ".", "replace", "(", "'#'", ",", "''", ")", ".", "lower", "(", ")", ".", "strip", "(", ")", ":", "if", "not", "char", ".", "isalnum", "(", ")", ":", "char", "=", "'_'", "if", "last", "==", "'_'", "and", "char", "==", "'_'", ":", "continue", "slug", "+=", "char", "last", "=", "char", "return", "slug" ]
Turn name into a slug suitable for an image file name
[ "Turn", "name", "into", "a", "slug", "suitable", "for", "an", "image", "file", "name" ]
4886b1a697e4b81c2202db9cb977609e034f8e70
https://github.com/openbermuda/ripl/blob/4886b1a697e4b81c2202db9cb977609e034f8e70/ripl/slide2png.py#L201-L215
250,420
hapylestat/apputils
apputils/utils/storages/in_memory.py
InMemoryKeyStore.list_keys
def list_keys(self): """ Returns list of the available keys :return: List of the keys available in the storage :rtype list """ return [k for k, el in self._keystore.items() if not el.is_expired]
python
def list_keys(self): """ Returns list of the available keys :return: List of the keys available in the storage :rtype list """ return [k for k, el in self._keystore.items() if not el.is_expired]
[ "def", "list_keys", "(", "self", ")", ":", "return", "[", "k", "for", "k", ",", "el", "in", "self", ".", "_keystore", ".", "items", "(", ")", "if", "not", "el", ".", "is_expired", "]" ]
Returns list of the available keys :return: List of the keys available in the storage :rtype list
[ "Returns", "list", "of", "the", "available", "keys" ]
5d185616feda27e6e21273307161471ef11a3518
https://github.com/hapylestat/apputils/blob/5d185616feda27e6e21273307161471ef11a3518/apputils/utils/storages/in_memory.py#L59-L66
250,421
hapylestat/apputils
apputils/utils/storages/in_memory.py
InMemoryKeyStore.set
def set(self, key, value, expire_in=None): """ Function to set or change particular property in the storage :param key: key name :param value: value to set :param expire_in: seconds to expire key :type key str :type expire_in int """ if key not in self._keystore: self._keystore[key] = InMemoryItemValue(expire_in=expire_in) k = self._keystore[key] """:type k InMemoryItemValue""" k.update_expire_time(expire_in) k.value = value
python
def set(self, key, value, expire_in=None): """ Function to set or change particular property in the storage :param key: key name :param value: value to set :param expire_in: seconds to expire key :type key str :type expire_in int """ if key not in self._keystore: self._keystore[key] = InMemoryItemValue(expire_in=expire_in) k = self._keystore[key] """:type k InMemoryItemValue""" k.update_expire_time(expire_in) k.value = value
[ "def", "set", "(", "self", ",", "key", ",", "value", ",", "expire_in", "=", "None", ")", ":", "if", "key", "not", "in", "self", ".", "_keystore", ":", "self", ".", "_keystore", "[", "key", "]", "=", "InMemoryItemValue", "(", "expire_in", "=", "expire_in", ")", "k", "=", "self", ".", "_keystore", "[", "key", "]", "\"\"\":type k InMemoryItemValue\"\"\"", "k", ".", "update_expire_time", "(", "expire_in", ")", "k", ".", "value", "=", "value" ]
Function to set or change particular property in the storage :param key: key name :param value: value to set :param expire_in: seconds to expire key :type key str :type expire_in int
[ "Function", "to", "set", "or", "change", "particular", "property", "in", "the", "storage" ]
5d185616feda27e6e21273307161471ef11a3518
https://github.com/hapylestat/apputils/blob/5d185616feda27e6e21273307161471ef11a3518/apputils/utils/storages/in_memory.py#L68-L84
250,422
hapylestat/apputils
apputils/utils/storages/in_memory.py
InMemoryKeyStore.get
def get(self, key): """ Retrieves previously stored key from the storage :return value, stored in the storage """ if key not in self._keystore: return None rec = self._keystore[key] """:type rec InMemoryItemValue""" if rec.is_expired: self.delete(key) return None return rec.value
python
def get(self, key): """ Retrieves previously stored key from the storage :return value, stored in the storage """ if key not in self._keystore: return None rec = self._keystore[key] """:type rec InMemoryItemValue""" if rec.is_expired: self.delete(key) return None return rec.value
[ "def", "get", "(", "self", ",", "key", ")", ":", "if", "key", "not", "in", "self", ".", "_keystore", ":", "return", "None", "rec", "=", "self", ".", "_keystore", "[", "key", "]", "\"\"\":type rec InMemoryItemValue\"\"\"", "if", "rec", ".", "is_expired", ":", "self", ".", "delete", "(", "key", ")", "return", "None", "return", "rec", ".", "value" ]
Retrieves previously stored key from the storage :return value, stored in the storage
[ "Retrieves", "previously", "stored", "key", "from", "the", "storage" ]
5d185616feda27e6e21273307161471ef11a3518
https://github.com/hapylestat/apputils/blob/5d185616feda27e6e21273307161471ef11a3518/apputils/utils/storages/in_memory.py#L86-L102
250,423
hapylestat/apputils
apputils/utils/storages/in_memory.py
InMemoryKeyStore.exists
def exists(self, key): """ Check if the particular key exists in the storage :param key: name of the key which existence need to be checked :return: :type key str :rtype bool """ if key in self._keystore and not self._keystore[key].is_expired: return True elif key in self._keystore and self._keystore[key].is_expired: self.delete(key) return False return False
python
def exists(self, key): """ Check if the particular key exists in the storage :param key: name of the key which existence need to be checked :return: :type key str :rtype bool """ if key in self._keystore and not self._keystore[key].is_expired: return True elif key in self._keystore and self._keystore[key].is_expired: self.delete(key) return False return False
[ "def", "exists", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "_keystore", "and", "not", "self", ".", "_keystore", "[", "key", "]", ".", "is_expired", ":", "return", "True", "elif", "key", "in", "self", ".", "_keystore", "and", "self", ".", "_keystore", "[", "key", "]", ".", "is_expired", ":", "self", ".", "delete", "(", "key", ")", "return", "False", "return", "False" ]
Check if the particular key exists in the storage :param key: name of the key which existence need to be checked :return: :type key str :rtype bool
[ "Check", "if", "the", "particular", "key", "exists", "in", "the", "storage" ]
5d185616feda27e6e21273307161471ef11a3518
https://github.com/hapylestat/apputils/blob/5d185616feda27e6e21273307161471ef11a3518/apputils/utils/storages/in_memory.py#L104-L120
250,424
calvinku96/labreporthelper
labreporthelper/dataset.py
DataSets.plot_2d_single
def plot_2d_single(x, y, pdffilename, **kwargs): """ Do make_2d_single_plot and pass all arguments args: x: array_like xdata y: array_like ydata filepath: string filepath of pdf to save **kwargs: figure_options: passed to matplotlib.pyplot.figure xlabel_options: dict kwargs passed in set_xlabel ylabel_options: dict kwargs passed in set_ylabel suptitle_options: dict kwargs passed in figure.suptitle title_options: dict kwargs passed in set_title scilimits: tuple if number outside this limits, will use scientific notation errors: dictionary, array_like, scalar dictionary: {"xerr": xerr, "yerr": yerr} array_like, scalar: yerr fmt: string, default="k." line format bestfitfmt: string, default="k-" bestfit line format bestfit: BestFit child class eg. bestfit.polyfit.PolyFit, bestfit.logfit.LogFit bestfitlim: tuple, default=None xlim for bestfit line suptitle: string, default=xlim suptitle of pdf plot, formatted with outputdict suptitle_fontsize: int, default=15 font size of suptitle title: string, default=None title of the pdf plot title_fontsize: int, default=12 font size of title, formatted with outputdict xlabel: string, default=None label of string xlabel, formatted with outputdict ylabel: string, default=None label of string ylabel, formatted with outputdict xlim: tuple, default=None xlim ylim: tuple, default=None ylim outputdict: dictionary, default=None pass keys and arguments for formatting and to output """ pdffilepath = DataSets.get_pdffilepath(pdffilename) plotsingle2d = PlotSingle2D(x, y, pdffilepath, **kwargs) return plotsingle2d.plot()
python
def plot_2d_single(x, y, pdffilename, **kwargs): """ Do make_2d_single_plot and pass all arguments args: x: array_like xdata y: array_like ydata filepath: string filepath of pdf to save **kwargs: figure_options: passed to matplotlib.pyplot.figure xlabel_options: dict kwargs passed in set_xlabel ylabel_options: dict kwargs passed in set_ylabel suptitle_options: dict kwargs passed in figure.suptitle title_options: dict kwargs passed in set_title scilimits: tuple if number outside this limits, will use scientific notation errors: dictionary, array_like, scalar dictionary: {"xerr": xerr, "yerr": yerr} array_like, scalar: yerr fmt: string, default="k." line format bestfitfmt: string, default="k-" bestfit line format bestfit: BestFit child class eg. bestfit.polyfit.PolyFit, bestfit.logfit.LogFit bestfitlim: tuple, default=None xlim for bestfit line suptitle: string, default=xlim suptitle of pdf plot, formatted with outputdict suptitle_fontsize: int, default=15 font size of suptitle title: string, default=None title of the pdf plot title_fontsize: int, default=12 font size of title, formatted with outputdict xlabel: string, default=None label of string xlabel, formatted with outputdict ylabel: string, default=None label of string ylabel, formatted with outputdict xlim: tuple, default=None xlim ylim: tuple, default=None ylim outputdict: dictionary, default=None pass keys and arguments for formatting and to output """ pdffilepath = DataSets.get_pdffilepath(pdffilename) plotsingle2d = PlotSingle2D(x, y, pdffilepath, **kwargs) return plotsingle2d.plot()
[ "def", "plot_2d_single", "(", "x", ",", "y", ",", "pdffilename", ",", "*", "*", "kwargs", ")", ":", "pdffilepath", "=", "DataSets", ".", "get_pdffilepath", "(", "pdffilename", ")", "plotsingle2d", "=", "PlotSingle2D", "(", "x", ",", "y", ",", "pdffilepath", ",", "*", "*", "kwargs", ")", "return", "plotsingle2d", ".", "plot", "(", ")" ]
Do make_2d_single_plot and pass all arguments args: x: array_like xdata y: array_like ydata filepath: string filepath of pdf to save **kwargs: figure_options: passed to matplotlib.pyplot.figure xlabel_options: dict kwargs passed in set_xlabel ylabel_options: dict kwargs passed in set_ylabel suptitle_options: dict kwargs passed in figure.suptitle title_options: dict kwargs passed in set_title scilimits: tuple if number outside this limits, will use scientific notation errors: dictionary, array_like, scalar dictionary: {"xerr": xerr, "yerr": yerr} array_like, scalar: yerr fmt: string, default="k." line format bestfitfmt: string, default="k-" bestfit line format bestfit: BestFit child class eg. bestfit.polyfit.PolyFit, bestfit.logfit.LogFit bestfitlim: tuple, default=None xlim for bestfit line suptitle: string, default=xlim suptitle of pdf plot, formatted with outputdict suptitle_fontsize: int, default=15 font size of suptitle title: string, default=None title of the pdf plot title_fontsize: int, default=12 font size of title, formatted with outputdict xlabel: string, default=None label of string xlabel, formatted with outputdict ylabel: string, default=None label of string ylabel, formatted with outputdict xlim: tuple, default=None xlim ylim: tuple, default=None ylim outputdict: dictionary, default=None pass keys and arguments for formatting and to output
[ "Do", "make_2d_single_plot", "and", "pass", "all", "arguments" ]
4d436241f389c02eb188c313190df62ab28c3763
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/dataset.py#L73-L129
250,425
calvinku96/labreporthelper
labreporthelper/dataset.py
DataSets.get_pdffilepath
def get_pdffilepath(pdffilename): """ Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf """ return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
python
def get_pdffilepath(pdffilename): """ Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf """ return FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=pdffilename, folder=PURPOSE.get("plots").get("folder", "plots"), ext=PURPOSE.get("plots").get("extension", "pdf") )
[ "def", "get_pdffilepath", "(", "pdffilename", ")", ":", "return", "FILEPATHSTR", ".", "format", "(", "root_dir", "=", "ROOT_DIR", ",", "os_sep", "=", "os", ".", "sep", ",", "os_extsep", "=", "os", ".", "extsep", ",", "name", "=", "pdffilename", ",", "folder", "=", "PURPOSE", ".", "get", "(", "\"plots\"", ")", ".", "get", "(", "\"folder\"", ",", "\"plots\"", ")", ",", "ext", "=", "PURPOSE", ".", "get", "(", "\"plots\"", ")", ".", "get", "(", "\"extension\"", ",", "\"pdf\"", ")", ")" ]
Returns the path for the pdf file args: pdffilename: string returns path for the plots folder / pdffilename.pdf
[ "Returns", "the", "path", "for", "the", "pdf", "file" ]
4d436241f389c02eb188c313190df62ab28c3763
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/dataset.py#L132-L145
250,426
calvinku96/labreporthelper
labreporthelper/dataset.py
DataSets.make_tex_table
def make_tex_table(inputlist, outputfilename, fmt=None, **kwargs): """ Do make_tex_table and pass all arguments args: inputlist: list outputfilename: string fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwargs: nonestring: string string when objecttype is None """ outputfilepath = FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=outputfilename, folder=PURPOSE.get("tables").get("folder", "tables"), ext=PURPOSE.get("tables").get("extension", "tex") ) table.make_tex_table(inputlist, open(outputfilepath, 'wb'), fmt=fmt, close=kwargs.get("close", True), **kwargs)
python
def make_tex_table(inputlist, outputfilename, fmt=None, **kwargs): """ Do make_tex_table and pass all arguments args: inputlist: list outputfilename: string fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwargs: nonestring: string string when objecttype is None """ outputfilepath = FILEPATHSTR.format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=outputfilename, folder=PURPOSE.get("tables").get("folder", "tables"), ext=PURPOSE.get("tables").get("extension", "tex") ) table.make_tex_table(inputlist, open(outputfilepath, 'wb'), fmt=fmt, close=kwargs.get("close", True), **kwargs)
[ "def", "make_tex_table", "(", "inputlist", ",", "outputfilename", ",", "fmt", "=", "None", ",", "*", "*", "kwargs", ")", ":", "outputfilepath", "=", "FILEPATHSTR", ".", "format", "(", "root_dir", "=", "ROOT_DIR", ",", "os_sep", "=", "os", ".", "sep", ",", "os_extsep", "=", "os", ".", "extsep", ",", "name", "=", "outputfilename", ",", "folder", "=", "PURPOSE", ".", "get", "(", "\"tables\"", ")", ".", "get", "(", "\"folder\"", ",", "\"tables\"", ")", ",", "ext", "=", "PURPOSE", ".", "get", "(", "\"tables\"", ")", ".", "get", "(", "\"extension\"", ",", "\"tex\"", ")", ")", "table", ".", "make_tex_table", "(", "inputlist", ",", "open", "(", "outputfilepath", ",", "'wb'", ")", ",", "fmt", "=", "fmt", ",", "close", "=", "kwargs", ".", "get", "(", "\"close\"", ",", "True", ")", ",", "*", "*", "kwargs", ")" ]
Do make_tex_table and pass all arguments args: inputlist: list outputfilename: string fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwargs: nonestring: string string when objecttype is None
[ "Do", "make_tex_table", "and", "pass", "all", "arguments" ]
4d436241f389c02eb188c313190df62ab28c3763
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/dataset.py#L148-L171
250,427
calvinku96/labreporthelper
labreporthelper/dataset.py
DataSets.make_compute_file
def make_compute_file(self): """ Make the compute file from the self.vardict and self.vardictformat """ string = "" try: vardict_items = self.vardict.iteritems() except AttributeError: vardict_items = self.vardict.items() for key, val in vardict_items: # get default default_format = get_default_format(val) string_format = "\\newcommand{{\\{}}}{{" + self.vardictformat.get( key, default_format) + "}}\n" string += string_format.format(key, val).replace("+", "") # get settings compute_file = open( "{root_dir}{os_sep}{name}{os_extsep}{ext}".format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=SETTINGS["COMPUTE"]["name"], ext=SETTINGS["COMPUTE"]["extension"] ), "wb") compute_file.write(string) compute_file.close()
python
def make_compute_file(self): """ Make the compute file from the self.vardict and self.vardictformat """ string = "" try: vardict_items = self.vardict.iteritems() except AttributeError: vardict_items = self.vardict.items() for key, val in vardict_items: # get default default_format = get_default_format(val) string_format = "\\newcommand{{\\{}}}{{" + self.vardictformat.get( key, default_format) + "}}\n" string += string_format.format(key, val).replace("+", "") # get settings compute_file = open( "{root_dir}{os_sep}{name}{os_extsep}{ext}".format( root_dir=ROOT_DIR, os_sep=os.sep, os_extsep=os.extsep, name=SETTINGS["COMPUTE"]["name"], ext=SETTINGS["COMPUTE"]["extension"] ), "wb") compute_file.write(string) compute_file.close()
[ "def", "make_compute_file", "(", "self", ")", ":", "string", "=", "\"\"", "try", ":", "vardict_items", "=", "self", ".", "vardict", ".", "iteritems", "(", ")", "except", "AttributeError", ":", "vardict_items", "=", "self", ".", "vardict", ".", "items", "(", ")", "for", "key", ",", "val", "in", "vardict_items", ":", "# get default", "default_format", "=", "get_default_format", "(", "val", ")", "string_format", "=", "\"\\\\newcommand{{\\\\{}}}{{\"", "+", "self", ".", "vardictformat", ".", "get", "(", "key", ",", "default_format", ")", "+", "\"}}\\n\"", "string", "+=", "string_format", ".", "format", "(", "key", ",", "val", ")", ".", "replace", "(", "\"+\"", ",", "\"\"", ")", "# get settings", "compute_file", "=", "open", "(", "\"{root_dir}{os_sep}{name}{os_extsep}{ext}\"", ".", "format", "(", "root_dir", "=", "ROOT_DIR", ",", "os_sep", "=", "os", ".", "sep", ",", "os_extsep", "=", "os", ".", "extsep", ",", "name", "=", "SETTINGS", "[", "\"COMPUTE\"", "]", "[", "\"name\"", "]", ",", "ext", "=", "SETTINGS", "[", "\"COMPUTE\"", "]", "[", "\"extension\"", "]", ")", ",", "\"wb\"", ")", "compute_file", ".", "write", "(", "string", ")", "compute_file", ".", "close", "(", ")" ]
Make the compute file from the self.vardict and self.vardictformat
[ "Make", "the", "compute", "file", "from", "the", "self", ".", "vardict", "and", "self", ".", "vardictformat" ]
4d436241f389c02eb188c313190df62ab28c3763
https://github.com/calvinku96/labreporthelper/blob/4d436241f389c02eb188c313190df62ab28c3763/labreporthelper/dataset.py#L173-L196
250,428
KnowledgeLinks/rdfframework
rdfframework/search/esmappings.py
EsMappings.get_es_mappings
def get_es_mappings(self): """ Returns the mapping defitions presetn in elasticsearh """ es_mappings = json.loads(requests.get(self.mapping_url).text) es_mappings = {"_".join(key.split("_")[:-1]): value['mappings'] \ for key, value in es_mappings.items()} return es_mappings
python
def get_es_mappings(self): """ Returns the mapping defitions presetn in elasticsearh """ es_mappings = json.loads(requests.get(self.mapping_url).text) es_mappings = {"_".join(key.split("_")[:-1]): value['mappings'] \ for key, value in es_mappings.items()} return es_mappings
[ "def", "get_es_mappings", "(", "self", ")", ":", "es_mappings", "=", "json", ".", "loads", "(", "requests", ".", "get", "(", "self", ".", "mapping_url", ")", ".", "text", ")", "es_mappings", "=", "{", "\"_\"", ".", "join", "(", "key", ".", "split", "(", "\"_\"", ")", "[", ":", "-", "1", "]", ")", ":", "value", "[", "'mappings'", "]", "for", "key", ",", "value", "in", "es_mappings", ".", "items", "(", ")", "}", "return", "es_mappings" ]
Returns the mapping defitions presetn in elasticsearh
[ "Returns", "the", "mapping", "defitions", "presetn", "in", "elasticsearh" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/esmappings.py#L225-L233
250,429
shaunduncan/helga-reminders
helga_reminders.py
readable_time_delta
def readable_time_delta(seconds): """ Convert a number of seconds into readable days, hours, and minutes """ days = seconds // 86400 seconds -= days * 86400 hours = seconds // 3600 seconds -= hours * 3600 minutes = seconds // 60 m_suffix = 's' if minutes != 1 else '' h_suffix = 's' if hours != 1 else '' d_suffix = 's' if days != 1 else '' retval = u'{0} minute{1}'.format(minutes, m_suffix) if hours != 0: retval = u'{0} hour{1} and {2}'.format(hours, h_suffix, retval) if days != 0: retval = u'{0} day{1}, {2}'.format(days, d_suffix, retval) return retval
python
def readable_time_delta(seconds): """ Convert a number of seconds into readable days, hours, and minutes """ days = seconds // 86400 seconds -= days * 86400 hours = seconds // 3600 seconds -= hours * 3600 minutes = seconds // 60 m_suffix = 's' if minutes != 1 else '' h_suffix = 's' if hours != 1 else '' d_suffix = 's' if days != 1 else '' retval = u'{0} minute{1}'.format(minutes, m_suffix) if hours != 0: retval = u'{0} hour{1} and {2}'.format(hours, h_suffix, retval) if days != 0: retval = u'{0} day{1}, {2}'.format(days, d_suffix, retval) return retval
[ "def", "readable_time_delta", "(", "seconds", ")", ":", "days", "=", "seconds", "//", "86400", "seconds", "-=", "days", "*", "86400", "hours", "=", "seconds", "//", "3600", "seconds", "-=", "hours", "*", "3600", "minutes", "=", "seconds", "//", "60", "m_suffix", "=", "'s'", "if", "minutes", "!=", "1", "else", "''", "h_suffix", "=", "'s'", "if", "hours", "!=", "1", "else", "''", "d_suffix", "=", "'s'", "if", "days", "!=", "1", "else", "''", "retval", "=", "u'{0} minute{1}'", ".", "format", "(", "minutes", ",", "m_suffix", ")", "if", "hours", "!=", "0", ":", "retval", "=", "u'{0} hour{1} and {2}'", ".", "format", "(", "hours", ",", "h_suffix", ",", "retval", ")", "if", "days", "!=", "0", ":", "retval", "=", "u'{0} day{1}, {2}'", ".", "format", "(", "days", ",", "d_suffix", ",", "retval", ")", "return", "retval" ]
Convert a number of seconds into readable days, hours, and minutes
[ "Convert", "a", "number", "of", "seconds", "into", "readable", "days", "hours", "and", "minutes" ]
e2b88cb65eade270ed175ceb6a4d6339554b893b
https://github.com/shaunduncan/helga-reminders/blob/e2b88cb65eade270ed175ceb6a4d6339554b893b/helga_reminders.py#L82-L104
250,430
shaunduncan/helga-reminders
helga_reminders.py
next_occurrence
def next_occurrence(reminder): """ Calculate the next occurrence of a repeatable reminder """ now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) now_dow = now.weekday() # Start/end dow starting from tomorrow start_dow = now_dow + 1 end_dow = start_dow + 7 # Modded range from tomorrow until 1 week from now. Normalizes # wraparound values that span into next week dow_iter = imap(lambda x: x % 7, xrange(start_dow, end_dow)) # Filter out any days that aren't in the schedule dow_iter = ifilter(lambda x: x in reminder['repeat'], dow_iter) # Get the first one. That's the next day of week try: next_dow = next(dow_iter) except StopIteration: # How? logger.exception("Somehow, we didn't get a next day of week?") _scheduled.discard(reminder['_id']) return # Get the real day delta. Take the next day of the week. if that day of # the week is before the current day of the week, add a week. Normalize # this value by subtracting the starting point. Example: # Now = 3, Next = 1, Delta = (1 + 7) - 3 = 5 day_delta = next_dow if next_dow <= now_dow: day_delta += 7 day_delta -= now_dow # Update the record return reminder['when'] + datetime.timedelta(days=day_delta), day_delta
python
def next_occurrence(reminder): """ Calculate the next occurrence of a repeatable reminder """ now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) now_dow = now.weekday() # Start/end dow starting from tomorrow start_dow = now_dow + 1 end_dow = start_dow + 7 # Modded range from tomorrow until 1 week from now. Normalizes # wraparound values that span into next week dow_iter = imap(lambda x: x % 7, xrange(start_dow, end_dow)) # Filter out any days that aren't in the schedule dow_iter = ifilter(lambda x: x in reminder['repeat'], dow_iter) # Get the first one. That's the next day of week try: next_dow = next(dow_iter) except StopIteration: # How? logger.exception("Somehow, we didn't get a next day of week?") _scheduled.discard(reminder['_id']) return # Get the real day delta. Take the next day of the week. if that day of # the week is before the current day of the week, add a week. Normalize # this value by subtracting the starting point. Example: # Now = 3, Next = 1, Delta = (1 + 7) - 3 = 5 day_delta = next_dow if next_dow <= now_dow: day_delta += 7 day_delta -= now_dow # Update the record return reminder['when'] + datetime.timedelta(days=day_delta), day_delta
[ "def", "next_occurrence", "(", "reminder", ")", ":", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "UTC", ")", "now_dow", "=", "now", ".", "weekday", "(", ")", "# Start/end dow starting from tomorrow", "start_dow", "=", "now_dow", "+", "1", "end_dow", "=", "start_dow", "+", "7", "# Modded range from tomorrow until 1 week from now. Normalizes", "# wraparound values that span into next week", "dow_iter", "=", "imap", "(", "lambda", "x", ":", "x", "%", "7", ",", "xrange", "(", "start_dow", ",", "end_dow", ")", ")", "# Filter out any days that aren't in the schedule", "dow_iter", "=", "ifilter", "(", "lambda", "x", ":", "x", "in", "reminder", "[", "'repeat'", "]", ",", "dow_iter", ")", "# Get the first one. That's the next day of week", "try", ":", "next_dow", "=", "next", "(", "dow_iter", ")", "except", "StopIteration", ":", "# How?", "logger", ".", "exception", "(", "\"Somehow, we didn't get a next day of week?\"", ")", "_scheduled", ".", "discard", "(", "reminder", "[", "'_id'", "]", ")", "return", "# Get the real day delta. Take the next day of the week. if that day of", "# the week is before the current day of the week, add a week. Normalize", "# this value by subtracting the starting point. Example:", "# Now = 3, Next = 1, Delta = (1 + 7) - 3 = 5", "day_delta", "=", "next_dow", "if", "next_dow", "<=", "now_dow", ":", "day_delta", "+=", "7", "day_delta", "-=", "now_dow", "# Update the record", "return", "reminder", "[", "'when'", "]", "+", "datetime", ".", "timedelta", "(", "days", "=", "day_delta", ")", ",", "day_delta" ]
Calculate the next occurrence of a repeatable reminder
[ "Calculate", "the", "next", "occurrence", "of", "a", "repeatable", "reminder" ]
e2b88cb65eade270ed175ceb6a4d6339554b893b
https://github.com/shaunduncan/helga-reminders/blob/e2b88cb65eade270ed175ceb6a4d6339554b893b/helga_reminders.py#L107-L143
250,431
shaunduncan/helga-reminders
helga_reminders.py
at_reminder
def at_reminder(client, channel, nick, args): """ Schedule a reminder to occur at a specific time. The given time can optionally be specified to occur at a specific timezone, but will default to the value of settings.TIMEZONE if none is specified. Times should be on a 24-hour clock. These types of reminders are repeatable, should the last two words of the message be of the form "repeat <days_of_week>" where days_of_week is a single string consisting of any of the following days: M, Tu, W, Th, F, Sa, Su. For example, 'repeat MWF' will repeat a reminder at the same time every Monday, Wednesday, and Friday. A full example of how one would use this: <sduncan> helga at 13:00 EST standup time repeat MTuWThF This will create a reminder "standup time" to occur at 1:00PM Eastern every weekday. Optionally, a specific channel can be specified to receive the reminder message. This is useful if creating several reminders via a private message. To use this, specify "on <channel>" between the time amount and the message: <sduncan> helga at 13:00 EST on #bots standup time repeat MTuWThF <sduncan> helga at 13:00 EST on bots standup time repeat MTuWThF Note that the '#' char for specifying the channel is entirely optional. """ global _scheduled now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) # Parse the time it should go off, and the minute offset of the day hh, mm = map(int, args[0].split(':')) # Strip time from args args = args[1:] # Default timezone timezone = pytz.timezone(getattr(settings, 'TIMEZONE', 'US/Eastern')) try: # If there was a timezone passed in timezone = pytz.timezone(args[0]) except pytz.UnknownTimeZoneError: pass else: # If so, remove it from args args = args[1:] local_now = now.astimezone(timezone) local_next = local_now.replace(hour=hh, minute=mm) if local_next <= local_now: local_next += datetime.timedelta(days=1) reminder = { 'when': local_next.astimezone(pytz.UTC), 'channel': channel, 'message': ' '.join(args), 'creator': nick, } # Check for 'repeat' arg try: repeat = args[-2] == 'repeat' except IndexError: repeat = False if repeat: # If repeating, strip off the last two for the message sched = args[-1] reminder['message'] = ' '.join(args[:-2]) repeat_days = sorted([v for k, v in days_of_week.iteritems() if k in sched]) if not repeat_days: return u"I didn't understand '{0}'. You must use any of M,Tu,W,Th,F,Sa,Su. Ex: MWF".format(sched) reminder['repeat'] = repeat_days for attempt in xrange(7): if reminder['when'].weekday() in repeat_days: break reminder['when'] += datetime.timedelta(days=1) # Handle ability to specify the channel if reminder['message'].startswith('on'): parts = reminder['message'].split(' ') chan = parts[1] reminder['message'] = ' '.join(parts[2:]) # Make sure channel is formatted correctly if not chan.startswith('#'): chan = '#{0}'.format(chan) reminder['channel'] = chan id = db.reminders.insert(reminder) diff = reminder['when'] - now delay = (diff.days * 24 * 3600) + diff.seconds _scheduled.add(id) reactor.callLater(delay, _do_reminder, id, client) return u'Reminder set for {0} from now'.format(readable_time_delta(delay))
python
def at_reminder(client, channel, nick, args): """ Schedule a reminder to occur at a specific time. The given time can optionally be specified to occur at a specific timezone, but will default to the value of settings.TIMEZONE if none is specified. Times should be on a 24-hour clock. These types of reminders are repeatable, should the last two words of the message be of the form "repeat <days_of_week>" where days_of_week is a single string consisting of any of the following days: M, Tu, W, Th, F, Sa, Su. For example, 'repeat MWF' will repeat a reminder at the same time every Monday, Wednesday, and Friday. A full example of how one would use this: <sduncan> helga at 13:00 EST standup time repeat MTuWThF This will create a reminder "standup time" to occur at 1:00PM Eastern every weekday. Optionally, a specific channel can be specified to receive the reminder message. This is useful if creating several reminders via a private message. To use this, specify "on <channel>" between the time amount and the message: <sduncan> helga at 13:00 EST on #bots standup time repeat MTuWThF <sduncan> helga at 13:00 EST on bots standup time repeat MTuWThF Note that the '#' char for specifying the channel is entirely optional. """ global _scheduled now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) # Parse the time it should go off, and the minute offset of the day hh, mm = map(int, args[0].split(':')) # Strip time from args args = args[1:] # Default timezone timezone = pytz.timezone(getattr(settings, 'TIMEZONE', 'US/Eastern')) try: # If there was a timezone passed in timezone = pytz.timezone(args[0]) except pytz.UnknownTimeZoneError: pass else: # If so, remove it from args args = args[1:] local_now = now.astimezone(timezone) local_next = local_now.replace(hour=hh, minute=mm) if local_next <= local_now: local_next += datetime.timedelta(days=1) reminder = { 'when': local_next.astimezone(pytz.UTC), 'channel': channel, 'message': ' '.join(args), 'creator': nick, } # Check for 'repeat' arg try: repeat = args[-2] == 'repeat' except IndexError: repeat = False if repeat: # If repeating, strip off the last two for the message sched = args[-1] reminder['message'] = ' '.join(args[:-2]) repeat_days = sorted([v for k, v in days_of_week.iteritems() if k in sched]) if not repeat_days: return u"I didn't understand '{0}'. You must use any of M,Tu,W,Th,F,Sa,Su. Ex: MWF".format(sched) reminder['repeat'] = repeat_days for attempt in xrange(7): if reminder['when'].weekday() in repeat_days: break reminder['when'] += datetime.timedelta(days=1) # Handle ability to specify the channel if reminder['message'].startswith('on'): parts = reminder['message'].split(' ') chan = parts[1] reminder['message'] = ' '.join(parts[2:]) # Make sure channel is formatted correctly if not chan.startswith('#'): chan = '#{0}'.format(chan) reminder['channel'] = chan id = db.reminders.insert(reminder) diff = reminder['when'] - now delay = (diff.days * 24 * 3600) + diff.seconds _scheduled.add(id) reactor.callLater(delay, _do_reminder, id, client) return u'Reminder set for {0} from now'.format(readable_time_delta(delay))
[ "def", "at_reminder", "(", "client", ",", "channel", ",", "nick", ",", "args", ")", ":", "global", "_scheduled", "now", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "UTC", ")", "# Parse the time it should go off, and the minute offset of the day", "hh", ",", "mm", "=", "map", "(", "int", ",", "args", "[", "0", "]", ".", "split", "(", "':'", ")", ")", "# Strip time from args", "args", "=", "args", "[", "1", ":", "]", "# Default timezone", "timezone", "=", "pytz", ".", "timezone", "(", "getattr", "(", "settings", ",", "'TIMEZONE'", ",", "'US/Eastern'", ")", ")", "try", ":", "# If there was a timezone passed in", "timezone", "=", "pytz", ".", "timezone", "(", "args", "[", "0", "]", ")", "except", "pytz", ".", "UnknownTimeZoneError", ":", "pass", "else", ":", "# If so, remove it from args", "args", "=", "args", "[", "1", ":", "]", "local_now", "=", "now", ".", "astimezone", "(", "timezone", ")", "local_next", "=", "local_now", ".", "replace", "(", "hour", "=", "hh", ",", "minute", "=", "mm", ")", "if", "local_next", "<=", "local_now", ":", "local_next", "+=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "reminder", "=", "{", "'when'", ":", "local_next", ".", "astimezone", "(", "pytz", ".", "UTC", ")", ",", "'channel'", ":", "channel", ",", "'message'", ":", "' '", ".", "join", "(", "args", ")", ",", "'creator'", ":", "nick", ",", "}", "# Check for 'repeat' arg", "try", ":", "repeat", "=", "args", "[", "-", "2", "]", "==", "'repeat'", "except", "IndexError", ":", "repeat", "=", "False", "if", "repeat", ":", "# If repeating, strip off the last two for the message", "sched", "=", "args", "[", "-", "1", "]", "reminder", "[", "'message'", "]", "=", "' '", ".", "join", "(", "args", "[", ":", "-", "2", "]", ")", "repeat_days", "=", "sorted", "(", "[", "v", "for", "k", ",", "v", "in", "days_of_week", ".", "iteritems", "(", ")", "if", "k", "in", "sched", "]", ")", "if", "not", "repeat_days", ":", "return", "u\"I didn't understand '{0}'. You must use any of M,Tu,W,Th,F,Sa,Su. Ex: MWF\"", ".", "format", "(", "sched", ")", "reminder", "[", "'repeat'", "]", "=", "repeat_days", "for", "attempt", "in", "xrange", "(", "7", ")", ":", "if", "reminder", "[", "'when'", "]", ".", "weekday", "(", ")", "in", "repeat_days", ":", "break", "reminder", "[", "'when'", "]", "+=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "# Handle ability to specify the channel", "if", "reminder", "[", "'message'", "]", ".", "startswith", "(", "'on'", ")", ":", "parts", "=", "reminder", "[", "'message'", "]", ".", "split", "(", "' '", ")", "chan", "=", "parts", "[", "1", "]", "reminder", "[", "'message'", "]", "=", "' '", ".", "join", "(", "parts", "[", "2", ":", "]", ")", "# Make sure channel is formatted correctly", "if", "not", "chan", ".", "startswith", "(", "'#'", ")", ":", "chan", "=", "'#{0}'", ".", "format", "(", "chan", ")", "reminder", "[", "'channel'", "]", "=", "chan", "id", "=", "db", ".", "reminders", ".", "insert", "(", "reminder", ")", "diff", "=", "reminder", "[", "'when'", "]", "-", "now", "delay", "=", "(", "diff", ".", "days", "*", "24", "*", "3600", ")", "+", "diff", ".", "seconds", "_scheduled", ".", "add", "(", "id", ")", "reactor", ".", "callLater", "(", "delay", ",", "_do_reminder", ",", "id", ",", "client", ")", "return", "u'Reminder set for {0} from now'", ".", "format", "(", "readable_time_delta", "(", "delay", ")", ")" ]
Schedule a reminder to occur at a specific time. The given time can optionally be specified to occur at a specific timezone, but will default to the value of settings.TIMEZONE if none is specified. Times should be on a 24-hour clock. These types of reminders are repeatable, should the last two words of the message be of the form "repeat <days_of_week>" where days_of_week is a single string consisting of any of the following days: M, Tu, W, Th, F, Sa, Su. For example, 'repeat MWF' will repeat a reminder at the same time every Monday, Wednesday, and Friday. A full example of how one would use this: <sduncan> helga at 13:00 EST standup time repeat MTuWThF This will create a reminder "standup time" to occur at 1:00PM Eastern every weekday. Optionally, a specific channel can be specified to receive the reminder message. This is useful if creating several reminders via a private message. To use this, specify "on <channel>" between the time amount and the message: <sduncan> helga at 13:00 EST on #bots standup time repeat MTuWThF <sduncan> helga at 13:00 EST on bots standup time repeat MTuWThF Note that the '#' char for specifying the channel is entirely optional.
[ "Schedule", "a", "reminder", "to", "occur", "at", "a", "specific", "time", ".", "The", "given", "time", "can", "optionally", "be", "specified", "to", "occur", "at", "a", "specific", "timezone", "but", "will", "default", "to", "the", "value", "of", "settings", ".", "TIMEZONE", "if", "none", "is", "specified", ".", "Times", "should", "be", "on", "a", "24", "-", "hour", "clock", "." ]
e2b88cb65eade270ed175ceb6a4d6339554b893b
https://github.com/shaunduncan/helga-reminders/blob/e2b88cb65eade270ed175ceb6a4d6339554b893b/helga_reminders.py#L223-L324
250,432
minhhoit/yacms
yacms/bin/management/commands/yacms_project.py
Command.handle_template
def handle_template(self, template, subdir): """ Use yacms's project template by default. The method of picking the default directory is copied from Django's TemplateCommand. """ if template is None: return six.text_type(os.path.join(yacms.__path__[0], subdir)) return super(Command, self).handle_template(template, subdir)
python
def handle_template(self, template, subdir): """ Use yacms's project template by default. The method of picking the default directory is copied from Django's TemplateCommand. """ if template is None: return six.text_type(os.path.join(yacms.__path__[0], subdir)) return super(Command, self).handle_template(template, subdir)
[ "def", "handle_template", "(", "self", ",", "template", ",", "subdir", ")", ":", "if", "template", "is", "None", ":", "return", "six", ".", "text_type", "(", "os", ".", "path", ".", "join", "(", "yacms", ".", "__path__", "[", "0", "]", ",", "subdir", ")", ")", "return", "super", "(", "Command", ",", "self", ")", ".", "handle_template", "(", "template", ",", "subdir", ")" ]
Use yacms's project template by default. The method of picking the default directory is copied from Django's TemplateCommand.
[ "Use", "yacms", "s", "project", "template", "by", "default", ".", "The", "method", "of", "picking", "the", "default", "directory", "is", "copied", "from", "Django", "s", "TemplateCommand", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/bin/management/commands/yacms_project.py#L80-L88
250,433
praekelt/jmbo-gallery
gallery/models.py
VideoEmbed.save
def save(self, *args, **kwargs): """Automatically set image""" if not self.image: # Fetch image url = "http://img.youtube.com/vi/%s/0.jpg" % self.youtube_id response = None try: response = requests.get(url) except requests.exceptions.RequestException: # Nothing we can really do in this case pass if response is not None: # Jump through filesystem hoop to please photologue filename = self.youtube_id + '.jpg' filepath = os.path.join(mkdtemp(), filename) fp = open(filepath, 'wb') try: fp.write(response.content) finally: fp.close() # Check for a valid image image = None try: image = Image.open(filepath) except IOError: os.remove(filepath) if image is not None: try: # Overlay a play button if possible video_play_image = \ preferences.GalleryPreferences.video_play_image if video_play_image: overlay = Image.open(video_play_image) # Downsize image_overlay if it is larger than image w1, h1 = image.size w2, h2 = overlay.size if w2 > w1 or h2 > h1: ratio1 = w1 / float(h1) ratio2 = w2 / float(h2) if ratio1 > ratio2: resize_fract = h1 / float(h2) else: resize_fract = w1 / float(w2) overlay.resize( w2 * resize_fract, h2 * resize_fract, Image.ANTIALIAS ) image.paste( overlay, (int((w1 - w2) / 2.0), int((h1 - h2) / 2.0)), mask=overlay ) image.save(filepath) # Finally set image image = File(open(filepath, 'rb')) image.name = filename self.image = image finally: os.remove(filepath) super(VideoEmbed, self).save(*args, **kwargs)
python
def save(self, *args, **kwargs): """Automatically set image""" if not self.image: # Fetch image url = "http://img.youtube.com/vi/%s/0.jpg" % self.youtube_id response = None try: response = requests.get(url) except requests.exceptions.RequestException: # Nothing we can really do in this case pass if response is not None: # Jump through filesystem hoop to please photologue filename = self.youtube_id + '.jpg' filepath = os.path.join(mkdtemp(), filename) fp = open(filepath, 'wb') try: fp.write(response.content) finally: fp.close() # Check for a valid image image = None try: image = Image.open(filepath) except IOError: os.remove(filepath) if image is not None: try: # Overlay a play button if possible video_play_image = \ preferences.GalleryPreferences.video_play_image if video_play_image: overlay = Image.open(video_play_image) # Downsize image_overlay if it is larger than image w1, h1 = image.size w2, h2 = overlay.size if w2 > w1 or h2 > h1: ratio1 = w1 / float(h1) ratio2 = w2 / float(h2) if ratio1 > ratio2: resize_fract = h1 / float(h2) else: resize_fract = w1 / float(w2) overlay.resize( w2 * resize_fract, h2 * resize_fract, Image.ANTIALIAS ) image.paste( overlay, (int((w1 - w2) / 2.0), int((h1 - h2) / 2.0)), mask=overlay ) image.save(filepath) # Finally set image image = File(open(filepath, 'rb')) image.name = filename self.image = image finally: os.remove(filepath) super(VideoEmbed, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "image", ":", "# Fetch image", "url", "=", "\"http://img.youtube.com/vi/%s/0.jpg\"", "%", "self", ".", "youtube_id", "response", "=", "None", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ")", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "# Nothing we can really do in this case", "pass", "if", "response", "is", "not", "None", ":", "# Jump through filesystem hoop to please photologue", "filename", "=", "self", ".", "youtube_id", "+", "'.jpg'", "filepath", "=", "os", ".", "path", ".", "join", "(", "mkdtemp", "(", ")", ",", "filename", ")", "fp", "=", "open", "(", "filepath", ",", "'wb'", ")", "try", ":", "fp", ".", "write", "(", "response", ".", "content", ")", "finally", ":", "fp", ".", "close", "(", ")", "# Check for a valid image", "image", "=", "None", "try", ":", "image", "=", "Image", ".", "open", "(", "filepath", ")", "except", "IOError", ":", "os", ".", "remove", "(", "filepath", ")", "if", "image", "is", "not", "None", ":", "try", ":", "# Overlay a play button if possible", "video_play_image", "=", "preferences", ".", "GalleryPreferences", ".", "video_play_image", "if", "video_play_image", ":", "overlay", "=", "Image", ".", "open", "(", "video_play_image", ")", "# Downsize image_overlay if it is larger than image", "w1", ",", "h1", "=", "image", ".", "size", "w2", ",", "h2", "=", "overlay", ".", "size", "if", "w2", ">", "w1", "or", "h2", ">", "h1", ":", "ratio1", "=", "w1", "/", "float", "(", "h1", ")", "ratio2", "=", "w2", "/", "float", "(", "h2", ")", "if", "ratio1", ">", "ratio2", ":", "resize_fract", "=", "h1", "/", "float", "(", "h2", ")", "else", ":", "resize_fract", "=", "w1", "/", "float", "(", "w2", ")", "overlay", ".", "resize", "(", "w2", "*", "resize_fract", ",", "h2", "*", "resize_fract", ",", "Image", ".", "ANTIALIAS", ")", "image", ".", "paste", "(", "overlay", ",", "(", "int", "(", "(", "w1", "-", "w2", ")", "/", "2.0", ")", ",", "int", "(", "(", "h1", "-", "h2", ")", "/", "2.0", ")", ")", ",", "mask", "=", "overlay", ")", "image", ".", "save", "(", "filepath", ")", "# Finally set image", "image", "=", "File", "(", "open", "(", "filepath", ",", "'rb'", ")", ")", "image", ".", "name", "=", "filename", "self", ".", "image", "=", "image", "finally", ":", "os", ".", "remove", "(", "filepath", ")", "super", "(", "VideoEmbed", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Automatically set image
[ "Automatically", "set", "image" ]
064e005913d79e456ba014b50205c7916df4714a
https://github.com/praekelt/jmbo-gallery/blob/064e005913d79e456ba014b50205c7916df4714a/gallery/models.py#L71-L140
250,434
kodexlab/reliure
reliure/engine.py
PlayMeta.name
def name(self): """ Compute a name according to sub meta results names >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_plus) >>> gres.append(res_moins) >>> gres.name 'operation:[plus, moins]' """ return "%s:[%s]" % (self._name, ", ".join(meta.name for meta in self._metas))
python
def name(self): """ Compute a name according to sub meta results names >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_plus) >>> gres.append(res_moins) >>> gres.name 'operation:[plus, moins]' """ return "%s:[%s]" % (self._name, ", ".join(meta.name for meta in self._metas))
[ "def", "name", "(", "self", ")", ":", "return", "\"%s:[%s]\"", "%", "(", "self", ".", "_name", ",", "\", \"", ".", "join", "(", "meta", ".", "name", "for", "meta", "in", "self", ".", "_metas", ")", ")" ]
Compute a name according to sub meta results names >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_plus) >>> gres.append(res_moins) >>> gres.name 'operation:[plus, moins]'
[ "Compute", "a", "name", "according", "to", "sub", "meta", "results", "names" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L163-L174
250,435
kodexlab/reliure
reliure/engine.py
PlayMeta.errors
def errors(self): """ get all the errors >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> gres.append(res_plus) >>> res_plus.add_error(ValueError("invalid data")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_moins) >>> res_plus.add_error(RuntimeError("server not anwsering")) >>> gres.errors [ValueError('invalid data',), RuntimeError('server not anwsering',)] """ errors = [] for meta in self: errors.extend(meta.errors) return errors
python
def errors(self): """ get all the errors >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> gres.append(res_plus) >>> res_plus.add_error(ValueError("invalid data")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_moins) >>> res_plus.add_error(RuntimeError("server not anwsering")) >>> gres.errors [ValueError('invalid data',), RuntimeError('server not anwsering',)] """ errors = [] for meta in self: errors.extend(meta.errors) return errors
[ "def", "errors", "(", "self", ")", ":", "errors", "=", "[", "]", "for", "meta", "in", "self", ":", "errors", ".", "extend", "(", "meta", ".", "errors", ")", "return", "errors" ]
get all the errors >>> gres = PlayMeta("operation") >>> res_plus = BasicPlayMeta(Composable(name="plus")) >>> gres.append(res_plus) >>> res_plus.add_error(ValueError("invalid data")) >>> res_moins = BasicPlayMeta(Composable(name="moins")) >>> gres.append(res_moins) >>> res_plus.add_error(RuntimeError("server not anwsering")) >>> gres.errors [ValueError('invalid data',), RuntimeError('server not anwsering',)]
[ "get", "all", "the", "errors" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L193-L209
250,436
kodexlab/reliure
reliure/engine.py
Block.defaults
def defaults(self): """ component default component .. Note:: default components is just an indication for user and the views, except if the Block is required. If required then default is selected if nothing explisitely selected. """ default = self._defaults # if require and no default, the first component as default if not len(default) and self.required and len(self._components): default = [six.next(six.itervalues(self._components)).name] return default
python
def defaults(self): """ component default component .. Note:: default components is just an indication for user and the views, except if the Block is required. If required then default is selected if nothing explisitely selected. """ default = self._defaults # if require and no default, the first component as default if not len(default) and self.required and len(self._components): default = [six.next(six.itervalues(self._components)).name] return default
[ "def", "defaults", "(", "self", ")", ":", "default", "=", "self", ".", "_defaults", "# if require and no default, the first component as default", "if", "not", "len", "(", "default", ")", "and", "self", ".", "required", "and", "len", "(", "self", ".", "_components", ")", ":", "default", "=", "[", "six", ".", "next", "(", "six", ".", "itervalues", "(", "self", ".", "_components", ")", ")", ".", "name", "]", "return", "default" ]
component default component .. Note:: default components is just an indication for user and the views, except if the Block is required. If required then default is selected if nothing explisitely selected.
[ "component", "default", "component" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L340-L351
250,437
kodexlab/reliure
reliure/engine.py
Block.selected
def selected(self): """ returns the list of selected component names. if no component selected return the one marked as default. If the block is required and no component where indicated as default, then the first component is selected. """ selected = self._selected if len(self._selected) == 0 and self.required: # nothing has been selected yet BUT the component is required selected = self.defaults return selected
python
def selected(self): """ returns the list of selected component names. if no component selected return the one marked as default. If the block is required and no component where indicated as default, then the first component is selected. """ selected = self._selected if len(self._selected) == 0 and self.required: # nothing has been selected yet BUT the component is required selected = self.defaults return selected
[ "def", "selected", "(", "self", ")", ":", "selected", "=", "self", ".", "_selected", "if", "len", "(", "self", ".", "_selected", ")", "==", "0", "and", "self", ".", "required", ":", "# nothing has been selected yet BUT the component is required", "selected", "=", "self", ".", "defaults", "return", "selected" ]
returns the list of selected component names. if no component selected return the one marked as default. If the block is required and no component where indicated as default, then the first component is selected.
[ "returns", "the", "list", "of", "selected", "component", "names", "." ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L362-L373
250,438
kodexlab/reliure
reliure/engine.py
Block.as_dict
def as_dict(self): """ returns a dictionary representation of the block and of all component options """ #TODO/FIXME: add selected information if self.hidden: rdict = {} else: def_selected = self.selected() comps = [ { 'name': comp.name, 'default': comp.name in self.defaults, 'options': comp.get_ordered_options() if isinstance(comp, Optionable) else None } for comp in self ] rdict = { 'name': self.name, 'required': self.required, 'multiple': self.multiple, 'args': self.in_name, 'returns': self.out_name, 'components': comps } return rdict
python
def as_dict(self): """ returns a dictionary representation of the block and of all component options """ #TODO/FIXME: add selected information if self.hidden: rdict = {} else: def_selected = self.selected() comps = [ { 'name': comp.name, 'default': comp.name in self.defaults, 'options': comp.get_ordered_options() if isinstance(comp, Optionable) else None } for comp in self ] rdict = { 'name': self.name, 'required': self.required, 'multiple': self.multiple, 'args': self.in_name, 'returns': self.out_name, 'components': comps } return rdict
[ "def", "as_dict", "(", "self", ")", ":", "#TODO/FIXME: add selected information", "if", "self", ".", "hidden", ":", "rdict", "=", "{", "}", "else", ":", "def_selected", "=", "self", ".", "selected", "(", ")", "comps", "=", "[", "{", "'name'", ":", "comp", ".", "name", ",", "'default'", ":", "comp", ".", "name", "in", "self", ".", "defaults", ",", "'options'", ":", "comp", ".", "get_ordered_options", "(", ")", "if", "isinstance", "(", "comp", ",", "Optionable", ")", "else", "None", "}", "for", "comp", "in", "self", "]", "rdict", "=", "{", "'name'", ":", "self", ".", "name", ",", "'required'", ":", "self", ".", "required", ",", "'multiple'", ":", "self", ".", "multiple", ",", "'args'", ":", "self", ".", "in_name", ",", "'returns'", ":", "self", ".", "out_name", ",", "'components'", ":", "comps", "}", "return", "rdict" ]
returns a dictionary representation of the block and of all component options
[ "returns", "a", "dictionary", "representation", "of", "the", "block", "and", "of", "all", "component", "options" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L375-L400
250,439
kodexlab/reliure
reliure/engine.py
Block.reset
def reset(self): """ Removes all the components of the block """ self._components = OrderedDict() self.clear_selections() self._logger.info("<block: %s> reset component list" % (self.name))
python
def reset(self): """ Removes all the components of the block """ self._components = OrderedDict() self.clear_selections() self._logger.info("<block: %s> reset component list" % (self.name))
[ "def", "reset", "(", "self", ")", ":", "self", ".", "_components", "=", "OrderedDict", "(", ")", "self", ".", "clear_selections", "(", ")", "self", ".", "_logger", ".", "info", "(", "\"<block: %s> reset component list\"", "%", "(", "self", ".", "name", ")", ")" ]
Removes all the components of the block
[ "Removes", "all", "the", "components", "of", "the", "block" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L402-L407
250,440
kodexlab/reliure
reliure/engine.py
Block.setup
def setup(self, in_name=None, out_name=None, required=None, hidden=None, multiple=None, defaults=None): """ Set the options of the block. Only the not None given options are set .. note:: a block may have multiple inputs but have only one output :param in_name: name(s) of the block input data :type in_name: str or list of str :param out_name: name of the block output data :type out_name: str :param required: whether the block will be required or not :type required: bool :param hidden: whether the block will be hidden to the user or not :type hidden: bool :param multiple: if True more than one component may be selected/ run) :type multiple: bool :param defaults: names of the selected components :type defaults: list of str, or str """ if in_name is not None: self.in_name = in_name if isinstance(in_name, list) else [in_name] if out_name is not None: self.out_name = out_name if required is not None: self.required = required if hidden is not None: self.hidden = hidden if multiple is not None: self.multiple = multiple if defaults is not None: #if default is just a 'str' it is managed in setter self.defaults = defaults
python
def setup(self, in_name=None, out_name=None, required=None, hidden=None, multiple=None, defaults=None): """ Set the options of the block. Only the not None given options are set .. note:: a block may have multiple inputs but have only one output :param in_name: name(s) of the block input data :type in_name: str or list of str :param out_name: name of the block output data :type out_name: str :param required: whether the block will be required or not :type required: bool :param hidden: whether the block will be hidden to the user or not :type hidden: bool :param multiple: if True more than one component may be selected/ run) :type multiple: bool :param defaults: names of the selected components :type defaults: list of str, or str """ if in_name is not None: self.in_name = in_name if isinstance(in_name, list) else [in_name] if out_name is not None: self.out_name = out_name if required is not None: self.required = required if hidden is not None: self.hidden = hidden if multiple is not None: self.multiple = multiple if defaults is not None: #if default is just a 'str' it is managed in setter self.defaults = defaults
[ "def", "setup", "(", "self", ",", "in_name", "=", "None", ",", "out_name", "=", "None", ",", "required", "=", "None", ",", "hidden", "=", "None", ",", "multiple", "=", "None", ",", "defaults", "=", "None", ")", ":", "if", "in_name", "is", "not", "None", ":", "self", ".", "in_name", "=", "in_name", "if", "isinstance", "(", "in_name", ",", "list", ")", "else", "[", "in_name", "]", "if", "out_name", "is", "not", "None", ":", "self", ".", "out_name", "=", "out_name", "if", "required", "is", "not", "None", ":", "self", ".", "required", "=", "required", "if", "hidden", "is", "not", "None", ":", "self", ".", "hidden", "=", "hidden", "if", "multiple", "is", "not", "None", ":", "self", ".", "multiple", "=", "multiple", "if", "defaults", "is", "not", "None", ":", "#if default is just a 'str' it is managed in setter", "self", ".", "defaults", "=", "defaults" ]
Set the options of the block. Only the not None given options are set .. note:: a block may have multiple inputs but have only one output :param in_name: name(s) of the block input data :type in_name: str or list of str :param out_name: name of the block output data :type out_name: str :param required: whether the block will be required or not :type required: bool :param hidden: whether the block will be hidden to the user or not :type hidden: bool :param multiple: if True more than one component may be selected/ run) :type multiple: bool :param defaults: names of the selected components :type defaults: list of str, or str
[ "Set", "the", "options", "of", "the", "block", ".", "Only", "the", "not", "None", "given", "options", "are", "set" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L422-L454
250,441
kodexlab/reliure
reliure/engine.py
Block.validate
def validate(self): """ check that the block can be run """ if self.required and len(self.selected()) == 0: raise ReliureError("No component selected for block '%s'" % self.name)
python
def validate(self): """ check that the block can be run """ if self.required and len(self.selected()) == 0: raise ReliureError("No component selected for block '%s'" % self.name)
[ "def", "validate", "(", "self", ")", ":", "if", "self", ".", "required", "and", "len", "(", "self", ".", "selected", "(", ")", ")", "==", "0", ":", "raise", "ReliureError", "(", "\"No component selected for block '%s'\"", "%", "self", ".", "name", ")" ]
check that the block can be run
[ "check", "that", "the", "block", "can", "be", "run" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L599-L603
250,442
kodexlab/reliure
reliure/engine.py
Engine.requires
def requires(self, *names): """ Declare what block will be used in this engine. It should be call before adding or setting any component. Blocks order will be preserved for runnning task. """ if len(names) == 0: raise ValueError("You should give at least one block name") if self._blocks is not None and len(self._blocks) > 0: raise ReliureError("Method 'requires' should be called only once before adding any composant") for name in names: if name in self._blocks: raise ValueError("Duplicate block name %s" % name) self._blocks[name] = Block(name) self._logger.info(" ** requires ** %s", names)
python
def requires(self, *names): """ Declare what block will be used in this engine. It should be call before adding or setting any component. Blocks order will be preserved for runnning task. """ if len(names) == 0: raise ValueError("You should give at least one block name") if self._blocks is not None and len(self._blocks) > 0: raise ReliureError("Method 'requires' should be called only once before adding any composant") for name in names: if name in self._blocks: raise ValueError("Duplicate block name %s" % name) self._blocks[name] = Block(name) self._logger.info(" ** requires ** %s", names)
[ "def", "requires", "(", "self", ",", "*", "names", ")", ":", "if", "len", "(", "names", ")", "==", "0", ":", "raise", "ValueError", "(", "\"You should give at least one block name\"", ")", "if", "self", ".", "_blocks", "is", "not", "None", "and", "len", "(", "self", ".", "_blocks", ")", ">", "0", ":", "raise", "ReliureError", "(", "\"Method 'requires' should be called only once before adding any composant\"", ")", "for", "name", "in", "names", ":", "if", "name", "in", "self", ".", "_blocks", ":", "raise", "ValueError", "(", "\"Duplicate block name %s\"", "%", "name", ")", "self", ".", "_blocks", "[", "name", "]", "=", "Block", "(", "name", ")", "self", ".", "_logger", ".", "info", "(", "\" ** requires ** %s\"", ",", "names", ")" ]
Declare what block will be used in this engine. It should be call before adding or setting any component. Blocks order will be preserved for runnning task.
[ "Declare", "what", "block", "will", "be", "used", "in", "this", "engine", "." ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L713-L728
250,443
kodexlab/reliure
reliure/engine.py
Engine.needed_inputs
def needed_inputs(self): """ List all the needed inputs of a configured engine >>> engine = Engine("op1", "op2") >>> engine.op1.setup(in_name="in", out_name="middle", required=False) >>> engine.op2.setup(in_name="middle", out_name="out") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x:x*2) >>> engine.op1.select('<lambda>') >>> list(engine.needed_inputs()) ['in'] But now if we unactivate the first component: >>> engine.op1.clear_selections() >>> list(engine.needed_inputs()) ['middle'] More complex example: >>> engine = Engine("op1", "op2") >>> engine.op1.setup(in_name="in", out_name="middle") >>> engine.op2.setup(in_name=["middle", "in2"], out_name="out") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x, y:x*y) >>> engine.needed_inputs() == {'in', 'in2'} True Note that by default the needed input is 'input': >>> engine = Engine("op1", "op2") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x:x*2) >>> list(engine.needed_inputs()) ['input'] """ needed = set() available = set() # set of available data for bnum, block in enumerate(self): if not block.selected(): # if the block will not be used continue if block.in_name is not None: for in_name in block.in_name: if not in_name in available: needed.add(in_name) elif bnum == 0: # if the first block needed.add(Engine.DEFAULT_IN_NAME) # register the output available.add(block.out_name) return needed
python
def needed_inputs(self): """ List all the needed inputs of a configured engine >>> engine = Engine("op1", "op2") >>> engine.op1.setup(in_name="in", out_name="middle", required=False) >>> engine.op2.setup(in_name="middle", out_name="out") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x:x*2) >>> engine.op1.select('<lambda>') >>> list(engine.needed_inputs()) ['in'] But now if we unactivate the first component: >>> engine.op1.clear_selections() >>> list(engine.needed_inputs()) ['middle'] More complex example: >>> engine = Engine("op1", "op2") >>> engine.op1.setup(in_name="in", out_name="middle") >>> engine.op2.setup(in_name=["middle", "in2"], out_name="out") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x, y:x*y) >>> engine.needed_inputs() == {'in', 'in2'} True Note that by default the needed input is 'input': >>> engine = Engine("op1", "op2") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x:x*2) >>> list(engine.needed_inputs()) ['input'] """ needed = set() available = set() # set of available data for bnum, block in enumerate(self): if not block.selected(): # if the block will not be used continue if block.in_name is not None: for in_name in block.in_name: if not in_name in available: needed.add(in_name) elif bnum == 0: # if the first block needed.add(Engine.DEFAULT_IN_NAME) # register the output available.add(block.out_name) return needed
[ "def", "needed_inputs", "(", "self", ")", ":", "needed", "=", "set", "(", ")", "available", "=", "set", "(", ")", "# set of available data", "for", "bnum", ",", "block", "in", "enumerate", "(", "self", ")", ":", "if", "not", "block", ".", "selected", "(", ")", ":", "# if the block will not be used", "continue", "if", "block", ".", "in_name", "is", "not", "None", ":", "for", "in_name", "in", "block", ".", "in_name", ":", "if", "not", "in_name", "in", "available", ":", "needed", ".", "add", "(", "in_name", ")", "elif", "bnum", "==", "0", ":", "# if the first block", "needed", ".", "add", "(", "Engine", ".", "DEFAULT_IN_NAME", ")", "# register the output", "available", ".", "add", "(", "block", ".", "out_name", ")", "return", "needed" ]
List all the needed inputs of a configured engine >>> engine = Engine("op1", "op2") >>> engine.op1.setup(in_name="in", out_name="middle", required=False) >>> engine.op2.setup(in_name="middle", out_name="out") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x:x*2) >>> engine.op1.select('<lambda>') >>> list(engine.needed_inputs()) ['in'] But now if we unactivate the first component: >>> engine.op1.clear_selections() >>> list(engine.needed_inputs()) ['middle'] More complex example: >>> engine = Engine("op1", "op2") >>> engine.op1.setup(in_name="in", out_name="middle") >>> engine.op2.setup(in_name=["middle", "in2"], out_name="out") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x, y:x*y) >>> engine.needed_inputs() == {'in', 'in2'} True Note that by default the needed input is 'input': >>> engine = Engine("op1", "op2") >>> engine.op1.append(lambda x:x+2) >>> engine.op2.append(lambda x:x*2) >>> list(engine.needed_inputs()) ['input']
[ "List", "all", "the", "needed", "inputs", "of", "a", "configured", "engine" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L892-L942
250,444
kodexlab/reliure
reliure/engine.py
Engine.as_dict
def as_dict(self): """ dict repr of the components """ drepr = { 'blocks': [ block.as_dict() for block in self if block.hidden == False ], 'args': list(self.needed_inputs()) } return drepr
python
def as_dict(self): """ dict repr of the components """ drepr = { 'blocks': [ block.as_dict() for block in self if block.hidden == False ], 'args': list(self.needed_inputs()) } return drepr
[ "def", "as_dict", "(", "self", ")", ":", "drepr", "=", "{", "'blocks'", ":", "[", "block", ".", "as_dict", "(", ")", "for", "block", "in", "self", "if", "block", ".", "hidden", "==", "False", "]", ",", "'args'", ":", "list", "(", "self", ".", "needed_inputs", "(", ")", ")", "}", "return", "drepr" ]
dict repr of the components
[ "dict", "repr", "of", "the", "components" ]
0450c7a9254c5c003162738458bbe0c49e777ba5
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L1033-L1041
250,445
messense/extender
extender/manager.py
InstanceManager.all
def all(self): """ Returns a list of cached instances. """ class_list = list(self.get_class_list()) if not class_list: self.cache = [] return [] if self.cache is not None: return self.cache results = [] for cls_path in class_list: module_name, class_name = cls_path.rsplit('.', 1) try: module = __import__(module_name, {}, {}, class_name) cls = getattr(module, class_name) if self.instances: results.append(cls()) else: results.append(cls) except Exception: logger.exception('Unable to import {cls}'.format(cls=cls_path)) continue self.cache = results return results
python
def all(self): """ Returns a list of cached instances. """ class_list = list(self.get_class_list()) if not class_list: self.cache = [] return [] if self.cache is not None: return self.cache results = [] for cls_path in class_list: module_name, class_name = cls_path.rsplit('.', 1) try: module = __import__(module_name, {}, {}, class_name) cls = getattr(module, class_name) if self.instances: results.append(cls()) else: results.append(cls) except Exception: logger.exception('Unable to import {cls}'.format(cls=cls_path)) continue self.cache = results return results
[ "def", "all", "(", "self", ")", ":", "class_list", "=", "list", "(", "self", ".", "get_class_list", "(", ")", ")", "if", "not", "class_list", ":", "self", ".", "cache", "=", "[", "]", "return", "[", "]", "if", "self", ".", "cache", "is", "not", "None", ":", "return", "self", ".", "cache", "results", "=", "[", "]", "for", "cls_path", "in", "class_list", ":", "module_name", ",", "class_name", "=", "cls_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "try", ":", "module", "=", "__import__", "(", "module_name", ",", "{", "}", ",", "{", "}", ",", "class_name", ")", "cls", "=", "getattr", "(", "module", ",", "class_name", ")", "if", "self", ".", "instances", ":", "results", ".", "append", "(", "cls", "(", ")", ")", "else", ":", "results", ".", "append", "(", "cls", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "'Unable to import {cls}'", ".", "format", "(", "cls", "=", "cls_path", ")", ")", "continue", "self", ".", "cache", "=", "results", "return", "results" ]
Returns a list of cached instances.
[ "Returns", "a", "list", "of", "cached", "instances", "." ]
511a1712494d65133a0dbabca3d849e2f8c500c2
https://github.com/messense/extender/blob/511a1712494d65133a0dbabca3d849e2f8c500c2/extender/manager.py#L38-L65
250,446
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig._download_initial_config
def _download_initial_config(self): """Loads the initial config.""" _initial_config = self._download_running_config() # this is a bit slow! self._last_working_config = _initial_config self._config_history.append(_initial_config) self._config_history.append(_initial_config)
python
def _download_initial_config(self): """Loads the initial config.""" _initial_config = self._download_running_config() # this is a bit slow! self._last_working_config = _initial_config self._config_history.append(_initial_config) self._config_history.append(_initial_config)
[ "def", "_download_initial_config", "(", "self", ")", ":", "_initial_config", "=", "self", ".", "_download_running_config", "(", ")", "# this is a bit slow!", "self", ".", "_last_working_config", "=", "_initial_config", "self", ".", "_config_history", ".", "append", "(", "_initial_config", ")", "self", ".", "_config_history", ".", "append", "(", "_initial_config", ")" ]
Loads the initial config.
[ "Loads", "the", "initial", "config", "." ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L52-L57
250,447
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig._upload_config_content
def _upload_config_content(self, configuration, rollbacked=False): """Will try to upload a specific configuration on the device.""" try: for configuration_line in configuration.splitlines(): self._device.cli(configuration_line) self._config_changed = True # configuration was changed self._committed = False # and not committed yet except (pyPluribus.exceptions.CommandExecutionError, pyPluribus.exceptions.TimeoutError) as clierr: if not rollbacked: # rollack errors will just trow # to avoid loops self.discard() raise pyPluribus.exceptions.ConfigLoadError("Unable to upload config on the device: {err}.\ Configuration will be discarded.".format(err=clierr.message)) return True
python
def _upload_config_content(self, configuration, rollbacked=False): """Will try to upload a specific configuration on the device.""" try: for configuration_line in configuration.splitlines(): self._device.cli(configuration_line) self._config_changed = True # configuration was changed self._committed = False # and not committed yet except (pyPluribus.exceptions.CommandExecutionError, pyPluribus.exceptions.TimeoutError) as clierr: if not rollbacked: # rollack errors will just trow # to avoid loops self.discard() raise pyPluribus.exceptions.ConfigLoadError("Unable to upload config on the device: {err}.\ Configuration will be discarded.".format(err=clierr.message)) return True
[ "def", "_upload_config_content", "(", "self", ",", "configuration", ",", "rollbacked", "=", "False", ")", ":", "try", ":", "for", "configuration_line", "in", "configuration", ".", "splitlines", "(", ")", ":", "self", ".", "_device", ".", "cli", "(", "configuration_line", ")", "self", ".", "_config_changed", "=", "True", "# configuration was changed", "self", ".", "_committed", "=", "False", "# and not committed yet", "except", "(", "pyPluribus", ".", "exceptions", ".", "CommandExecutionError", ",", "pyPluribus", ".", "exceptions", ".", "TimeoutError", ")", "as", "clierr", ":", "if", "not", "rollbacked", ":", "# rollack errors will just trow", "# to avoid loops", "self", ".", "discard", "(", ")", "raise", "pyPluribus", ".", "exceptions", ".", "ConfigLoadError", "(", "\"Unable to upload config on the device: {err}.\\\n Configuration will be discarded.\"", ".", "format", "(", "err", "=", "clierr", ".", "message", ")", ")", "return", "True" ]
Will try to upload a specific configuration on the device.
[ "Will", "try", "to", "upload", "a", "specific", "configuration", "on", "the", "device", "." ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L63-L78
250,448
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig.load_candidate
def load_candidate(self, filename=None, config=None): """ Loads a candidate configuration on the device. In case the load fails at any point, will automatically rollback to last working configuration. :param filename: Specifies the name of the file with the configuration content. :param config: New configuration to be uploaded on the device. :raise pyPluribus.exceptions.ConfigLoadError: When the configuration could not be uploaded to the device. """ configuration = '' if filename is None: configuration = config else: with open(filename) as config_file: configuration = config_file.read() return self._upload_config_content(configuration)
python
def load_candidate(self, filename=None, config=None): """ Loads a candidate configuration on the device. In case the load fails at any point, will automatically rollback to last working configuration. :param filename: Specifies the name of the file with the configuration content. :param config: New configuration to be uploaded on the device. :raise pyPluribus.exceptions.ConfigLoadError: When the configuration could not be uploaded to the device. """ configuration = '' if filename is None: configuration = config else: with open(filename) as config_file: configuration = config_file.read() return self._upload_config_content(configuration)
[ "def", "load_candidate", "(", "self", ",", "filename", "=", "None", ",", "config", "=", "None", ")", ":", "configuration", "=", "''", "if", "filename", "is", "None", ":", "configuration", "=", "config", "else", ":", "with", "open", "(", "filename", ")", "as", "config_file", ":", "configuration", "=", "config_file", ".", "read", "(", ")", "return", "self", ".", "_upload_config_content", "(", "configuration", ")" ]
Loads a candidate configuration on the device. In case the load fails at any point, will automatically rollback to last working configuration. :param filename: Specifies the name of the file with the configuration content. :param config: New configuration to be uploaded on the device. :raise pyPluribus.exceptions.ConfigLoadError: When the configuration could not be uploaded to the device.
[ "Loads", "a", "candidate", "configuration", "on", "the", "device", ".", "In", "case", "the", "load", "fails", "at", "any", "point", "will", "automatically", "rollback", "to", "last", "working", "configuration", "." ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L88-L106
250,449
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig.discard
def discard(self): # pylint: disable=no-self-use """ Clears uncommited changes. :raise pyPluribus.exceptions.ConfigurationDiscardError: If the configuration applied cannot be discarded. """ try: self.rollback(0) except pyPluribus.exceptions.RollbackError as rbackerr: raise pyPluribus.exceptions.ConfigurationDiscardError("Cannot discard configuration: {err}.\ ".format(err=rbackerr))
python
def discard(self): # pylint: disable=no-self-use """ Clears uncommited changes. :raise pyPluribus.exceptions.ConfigurationDiscardError: If the configuration applied cannot be discarded. """ try: self.rollback(0) except pyPluribus.exceptions.RollbackError as rbackerr: raise pyPluribus.exceptions.ConfigurationDiscardError("Cannot discard configuration: {err}.\ ".format(err=rbackerr))
[ "def", "discard", "(", "self", ")", ":", "# pylint: disable=no-self-use", "try", ":", "self", ".", "rollback", "(", "0", ")", "except", "pyPluribus", ".", "exceptions", ".", "RollbackError", "as", "rbackerr", ":", "raise", "pyPluribus", ".", "exceptions", ".", "ConfigurationDiscardError", "(", "\"Cannot discard configuration: {err}.\\\n \"", ".", "format", "(", "err", "=", "rbackerr", ")", ")" ]
Clears uncommited changes. :raise pyPluribus.exceptions.ConfigurationDiscardError: If the configuration applied cannot be discarded.
[ "Clears", "uncommited", "changes", "." ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L108-L118
250,450
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig.commit
def commit(self): # pylint: disable=no-self-use """Will commit the changes on the device""" if self._config_changed: self._last_working_config = self._download_running_config() self._config_history.append(self._last_working_config) self._committed = True # comfiguration was committed self._config_changed = False # no changes since last commit :) return True # this will be always true # since the changes are automatically applied self._committed = False # make sure the _committed attribute is not True by any chance return False
python
def commit(self): # pylint: disable=no-self-use """Will commit the changes on the device""" if self._config_changed: self._last_working_config = self._download_running_config() self._config_history.append(self._last_working_config) self._committed = True # comfiguration was committed self._config_changed = False # no changes since last commit :) return True # this will be always true # since the changes are automatically applied self._committed = False # make sure the _committed attribute is not True by any chance return False
[ "def", "commit", "(", "self", ")", ":", "# pylint: disable=no-self-use", "if", "self", ".", "_config_changed", ":", "self", ".", "_last_working_config", "=", "self", ".", "_download_running_config", "(", ")", "self", ".", "_config_history", ".", "append", "(", "self", ".", "_last_working_config", ")", "self", ".", "_committed", "=", "True", "# comfiguration was committed", "self", ".", "_config_changed", "=", "False", "# no changes since last commit :)", "return", "True", "# this will be always true", "# since the changes are automatically applied", "self", ".", "_committed", "=", "False", "# make sure the _committed attribute is not True by any chance", "return", "False" ]
Will commit the changes on the device
[ "Will", "commit", "the", "changes", "on", "the", "device" ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L120-L130
250,451
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig.compare
def compare(self): # pylint: disable=no-self-use """ Computes the difference between the candidate config and the running config. """ # becuase we emulate the configuration history # the difference is between the last committed config and the running-config running_config = self._download_running_config() running_config_lines = running_config.splitlines() last_committed_config = self._last_working_config last_committed_config_lines = last_committed_config.splitlines() difference = difflib.unified_diff(running_config_lines, last_committed_config_lines, n=0) return '\n'.join(difference)
python
def compare(self): # pylint: disable=no-self-use """ Computes the difference between the candidate config and the running config. """ # becuase we emulate the configuration history # the difference is between the last committed config and the running-config running_config = self._download_running_config() running_config_lines = running_config.splitlines() last_committed_config = self._last_working_config last_committed_config_lines = last_committed_config.splitlines() difference = difflib.unified_diff(running_config_lines, last_committed_config_lines, n=0) return '\n'.join(difference)
[ "def", "compare", "(", "self", ")", ":", "# pylint: disable=no-self-use", "# becuase we emulate the configuration history", "# the difference is between the last committed config and the running-config", "running_config", "=", "self", ".", "_download_running_config", "(", ")", "running_config_lines", "=", "running_config", ".", "splitlines", "(", ")", "last_committed_config", "=", "self", ".", "_last_working_config", "last_committed_config_lines", "=", "last_committed_config", ".", "splitlines", "(", ")", "difference", "=", "difflib", ".", "unified_diff", "(", "running_config_lines", ",", "last_committed_config_lines", ",", "n", "=", "0", ")", "return", "'\\n'", ".", "join", "(", "difference", ")" ]
Computes the difference between the candidate config and the running config.
[ "Computes", "the", "difference", "between", "the", "candidate", "config", "and", "the", "running", "config", "." ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L132-L143
250,452
mirceaulinic/pypluribus
pyPluribus/config.py
PluribusConfig.rollback
def rollback(self, number=0): """ Will rollback the configuration to a previous state. Can be called also when :param number: How many steps back in the configuration history must look back. :raise pyPluribus.exceptions.RollbackError: In case the configuration cannot be rolled back. """ if number < 0: raise pyPluribus.exceptions.RollbackError("Please provide a positive number to rollback to!") available_configs = len(self._config_history) max_rollbacks = available_configs - 2 if max_rollbacks < 0: raise pyPluribus.exceptions.RollbackError("Cannot rollback: \ not enough configration history available!") if max_rollbacks > 0 and number > max_rollbacks: raise pyPluribus.exceptions.RollbackError("Cannot rollback more than {cfgs} configurations!\ ".format(cfgs=max_rollbacks)) config_location = 1 # will load the initial config worst case (user never commited, but wants to discard) if max_rollbacks > 0: # in case of previous commit(s) will be able to load a specific configuration config_location = available_configs - number - 1 # stored in location len() - rollabck_nb - 1 # covers also the case of discard uncommitted changes (rollback 0) desired_config = self._config_history[config_location] try: self._upload_config_content(desired_config, rollbacked=True) except pyPluribus.exceptions.ConfigLoadError as loaderr: raise pyPluribus.exceptions.RollbackError("Cannot rollback: {err}".format(err=loaderr)) del self._config_history[(config_location+1):] # delete all newer configurations than the config rolled back self._last_working_config = desired_config self._committed = True self._config_changed = False return True
python
def rollback(self, number=0): """ Will rollback the configuration to a previous state. Can be called also when :param number: How many steps back in the configuration history must look back. :raise pyPluribus.exceptions.RollbackError: In case the configuration cannot be rolled back. """ if number < 0: raise pyPluribus.exceptions.RollbackError("Please provide a positive number to rollback to!") available_configs = len(self._config_history) max_rollbacks = available_configs - 2 if max_rollbacks < 0: raise pyPluribus.exceptions.RollbackError("Cannot rollback: \ not enough configration history available!") if max_rollbacks > 0 and number > max_rollbacks: raise pyPluribus.exceptions.RollbackError("Cannot rollback more than {cfgs} configurations!\ ".format(cfgs=max_rollbacks)) config_location = 1 # will load the initial config worst case (user never commited, but wants to discard) if max_rollbacks > 0: # in case of previous commit(s) will be able to load a specific configuration config_location = available_configs - number - 1 # stored in location len() - rollabck_nb - 1 # covers also the case of discard uncommitted changes (rollback 0) desired_config = self._config_history[config_location] try: self._upload_config_content(desired_config, rollbacked=True) except pyPluribus.exceptions.ConfigLoadError as loaderr: raise pyPluribus.exceptions.RollbackError("Cannot rollback: {err}".format(err=loaderr)) del self._config_history[(config_location+1):] # delete all newer configurations than the config rolled back self._last_working_config = desired_config self._committed = True self._config_changed = False return True
[ "def", "rollback", "(", "self", ",", "number", "=", "0", ")", ":", "if", "number", "<", "0", ":", "raise", "pyPluribus", ".", "exceptions", ".", "RollbackError", "(", "\"Please provide a positive number to rollback to!\"", ")", "available_configs", "=", "len", "(", "self", ".", "_config_history", ")", "max_rollbacks", "=", "available_configs", "-", "2", "if", "max_rollbacks", "<", "0", ":", "raise", "pyPluribus", ".", "exceptions", ".", "RollbackError", "(", "\"Cannot rollback: \\\n not enough configration history available!\"", ")", "if", "max_rollbacks", ">", "0", "and", "number", ">", "max_rollbacks", ":", "raise", "pyPluribus", ".", "exceptions", ".", "RollbackError", "(", "\"Cannot rollback more than {cfgs} configurations!\\\n \"", ".", "format", "(", "cfgs", "=", "max_rollbacks", ")", ")", "config_location", "=", "1", "# will load the initial config worst case (user never commited, but wants to discard)", "if", "max_rollbacks", ">", "0", ":", "# in case of previous commit(s) will be able to load a specific configuration", "config_location", "=", "available_configs", "-", "number", "-", "1", "# stored in location len() - rollabck_nb - 1", "# covers also the case of discard uncommitted changes (rollback 0)", "desired_config", "=", "self", ".", "_config_history", "[", "config_location", "]", "try", ":", "self", ".", "_upload_config_content", "(", "desired_config", ",", "rollbacked", "=", "True", ")", "except", "pyPluribus", ".", "exceptions", ".", "ConfigLoadError", "as", "loaderr", ":", "raise", "pyPluribus", ".", "exceptions", ".", "RollbackError", "(", "\"Cannot rollback: {err}\"", ".", "format", "(", "err", "=", "loaderr", ")", ")", "del", "self", ".", "_config_history", "[", "(", "config_location", "+", "1", ")", ":", "]", "# delete all newer configurations than the config rolled back", "self", ".", "_last_working_config", "=", "desired_config", "self", ".", "_committed", "=", "True", "self", ".", "_config_changed", "=", "False", "return", "True" ]
Will rollback the configuration to a previous state. Can be called also when :param number: How many steps back in the configuration history must look back. :raise pyPluribus.exceptions.RollbackError: In case the configuration cannot be rolled back.
[ "Will", "rollback", "the", "configuration", "to", "a", "previous", "state", ".", "Can", "be", "called", "also", "when" ]
99bb9b6de40a0e465e3f0e6636b26acdeabbbd90
https://github.com/mirceaulinic/pypluribus/blob/99bb9b6de40a0e465e3f0e6636b26acdeabbbd90/pyPluribus/config.py#L145-L176
250,453
nens/turn
turn/core.py
Queue.draw
def draw(self, label, expire): """ Return a Serial number for this resource queue, after bootstrapping. """ # get next number with self.client.pipeline() as pipe: pipe.msetnx({self.keys.dispenser: 0, self.keys.indicator: 1}) pipe.incr(self.keys.dispenser) number = pipe.execute()[-1] # publish for humans self.message('{} assigned to "{}"'.format(number, label)) # launch keeper kwargs = {'client': self.client, 'key': self.keys.key(number)} keeper = Keeper(label=label, expire=expire, **kwargs) try: yield number except: self.message('{} crashed!'.format(number)) raise finally: keeper.close() self.message('{} completed by "{}"'.format(number, label)) number += 1 self.client.set(self.keys.indicator, number) self.announce(number)
python
def draw(self, label, expire): """ Return a Serial number for this resource queue, after bootstrapping. """ # get next number with self.client.pipeline() as pipe: pipe.msetnx({self.keys.dispenser: 0, self.keys.indicator: 1}) pipe.incr(self.keys.dispenser) number = pipe.execute()[-1] # publish for humans self.message('{} assigned to "{}"'.format(number, label)) # launch keeper kwargs = {'client': self.client, 'key': self.keys.key(number)} keeper = Keeper(label=label, expire=expire, **kwargs) try: yield number except: self.message('{} crashed!'.format(number)) raise finally: keeper.close() self.message('{} completed by "{}"'.format(number, label)) number += 1 self.client.set(self.keys.indicator, number) self.announce(number)
[ "def", "draw", "(", "self", ",", "label", ",", "expire", ")", ":", "# get next number", "with", "self", ".", "client", ".", "pipeline", "(", ")", "as", "pipe", ":", "pipe", ".", "msetnx", "(", "{", "self", ".", "keys", ".", "dispenser", ":", "0", ",", "self", ".", "keys", ".", "indicator", ":", "1", "}", ")", "pipe", ".", "incr", "(", "self", ".", "keys", ".", "dispenser", ")", "number", "=", "pipe", ".", "execute", "(", ")", "[", "-", "1", "]", "# publish for humans", "self", ".", "message", "(", "'{} assigned to \"{}\"'", ".", "format", "(", "number", ",", "label", ")", ")", "# launch keeper", "kwargs", "=", "{", "'client'", ":", "self", ".", "client", ",", "'key'", ":", "self", ".", "keys", ".", "key", "(", "number", ")", "}", "keeper", "=", "Keeper", "(", "label", "=", "label", ",", "expire", "=", "expire", ",", "*", "*", "kwargs", ")", "try", ":", "yield", "number", "except", ":", "self", ".", "message", "(", "'{} crashed!'", ".", "format", "(", "number", ")", ")", "raise", "finally", ":", "keeper", ".", "close", "(", ")", "self", ".", "message", "(", "'{} completed by \"{}\"'", ".", "format", "(", "number", ",", "label", ")", ")", "number", "+=", "1", "self", ".", "client", ".", "set", "(", "self", ".", "keys", ".", "indicator", ",", "number", ")", "self", ".", "announce", "(", "number", ")" ]
Return a Serial number for this resource queue, after bootstrapping.
[ "Return", "a", "Serial", "number", "for", "this", "resource", "queue", "after", "bootstrapping", "." ]
98e806a0749ada0ddfd04b3c29fb04c15bf5ac18
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L98-L126
250,454
nens/turn
turn/core.py
Queue.wait
def wait(self, number, patience): """ Waits and resets if necessary. """ # inspect indicator for our number waiting = int(self.client.get(self.keys.indicator)) != number # wait until someone announces our number while waiting: message = self.subscription.listen(patience) if message is None: # timeout beyond patience, bump and try again self.message('{} bumps'.format(number)) self.bump() continue if message['type'] != 'message': continue # a subscribe message waiting = self.keys.number(message['data']) != number # our turn now self.message('{} started'.format(number))
python
def wait(self, number, patience): """ Waits and resets if necessary. """ # inspect indicator for our number waiting = int(self.client.get(self.keys.indicator)) != number # wait until someone announces our number while waiting: message = self.subscription.listen(patience) if message is None: # timeout beyond patience, bump and try again self.message('{} bumps'.format(number)) self.bump() continue if message['type'] != 'message': continue # a subscribe message waiting = self.keys.number(message['data']) != number # our turn now self.message('{} started'.format(number))
[ "def", "wait", "(", "self", ",", "number", ",", "patience", ")", ":", "# inspect indicator for our number", "waiting", "=", "int", "(", "self", ".", "client", ".", "get", "(", "self", ".", "keys", ".", "indicator", ")", ")", "!=", "number", "# wait until someone announces our number", "while", "waiting", ":", "message", "=", "self", ".", "subscription", ".", "listen", "(", "patience", ")", "if", "message", "is", "None", ":", "# timeout beyond patience, bump and try again", "self", ".", "message", "(", "'{} bumps'", ".", "format", "(", "number", ")", ")", "self", ".", "bump", "(", ")", "continue", "if", "message", "[", "'type'", "]", "!=", "'message'", ":", "continue", "# a subscribe message", "waiting", "=", "self", ".", "keys", ".", "number", "(", "message", "[", "'data'", "]", ")", "!=", "number", "# our turn now", "self", ".", "message", "(", "'{} started'", ".", "format", "(", "number", ")", ")" ]
Waits and resets if necessary.
[ "Waits", "and", "resets", "if", "necessary", "." ]
98e806a0749ada0ddfd04b3c29fb04c15bf5ac18
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L128-L147
250,455
nens/turn
turn/core.py
Queue.message
def message(self, text): """ Public message. """ self.client.publish(self.keys.external, '{}: {}'.format(self.resource, text))
python
def message(self, text): """ Public message. """ self.client.publish(self.keys.external, '{}: {}'.format(self.resource, text))
[ "def", "message", "(", "self", ",", "text", ")", ":", "self", ".", "client", ".", "publish", "(", "self", ".", "keys", ".", "external", ",", "'{}: {}'", ".", "format", "(", "self", ".", "resource", ",", "text", ")", ")" ]
Public message.
[ "Public", "message", "." ]
98e806a0749ada0ddfd04b3c29fb04c15bf5ac18
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L149-L152
250,456
nens/turn
turn/core.py
Queue.announce
def announce(self, number): """ Announce an indicator change on both channels. """ self.client.publish(self.keys.internal, self.keys.key(number)) self.message('{} granted'.format(number))
python
def announce(self, number): """ Announce an indicator change on both channels. """ self.client.publish(self.keys.internal, self.keys.key(number)) self.message('{} granted'.format(number))
[ "def", "announce", "(", "self", ",", "number", ")", ":", "self", ".", "client", ".", "publish", "(", "self", ".", "keys", ".", "internal", ",", "self", ".", "keys", ".", "key", "(", "number", ")", ")", "self", ".", "message", "(", "'{} granted'", ".", "format", "(", "number", ")", ")" ]
Announce an indicator change on both channels.
[ "Announce", "an", "indicator", "change", "on", "both", "channels", "." ]
98e806a0749ada0ddfd04b3c29fb04c15bf5ac18
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L154-L157
250,457
nens/turn
turn/core.py
Queue.bump
def bump(self): """ Fix indicator in case of unnanounced departments. """ # read client values = self.client.mget(self.keys.indicator, self.keys.dispenser) indicator, dispenser = map(int, values) # determine active users numbers = range(indicator, dispenser + 1) keys = [self.keys.key(n) for n in numbers] pairs = zip(keys, self.client.mget(*keys)) try: # determine number of first active user number = next(self.keys.number(key) for key, value in pairs if value is not None) except: # set number to next result of incr on dispenser number = dispenser + 1 # set indicator to it if necessary if number != indicator: self.client.set(self.keys.indicator, number) # announce and return it anyway self.announce(number) return number
python
def bump(self): """ Fix indicator in case of unnanounced departments. """ # read client values = self.client.mget(self.keys.indicator, self.keys.dispenser) indicator, dispenser = map(int, values) # determine active users numbers = range(indicator, dispenser + 1) keys = [self.keys.key(n) for n in numbers] pairs = zip(keys, self.client.mget(*keys)) try: # determine number of first active user number = next(self.keys.number(key) for key, value in pairs if value is not None) except: # set number to next result of incr on dispenser number = dispenser + 1 # set indicator to it if necessary if number != indicator: self.client.set(self.keys.indicator, number) # announce and return it anyway self.announce(number) return number
[ "def", "bump", "(", "self", ")", ":", "# read client", "values", "=", "self", ".", "client", ".", "mget", "(", "self", ".", "keys", ".", "indicator", ",", "self", ".", "keys", ".", "dispenser", ")", "indicator", ",", "dispenser", "=", "map", "(", "int", ",", "values", ")", "# determine active users", "numbers", "=", "range", "(", "indicator", ",", "dispenser", "+", "1", ")", "keys", "=", "[", "self", ".", "keys", ".", "key", "(", "n", ")", "for", "n", "in", "numbers", "]", "pairs", "=", "zip", "(", "keys", ",", "self", ".", "client", ".", "mget", "(", "*", "keys", ")", ")", "try", ":", "# determine number of first active user", "number", "=", "next", "(", "self", ".", "keys", ".", "number", "(", "key", ")", "for", "key", ",", "value", "in", "pairs", "if", "value", "is", "not", "None", ")", "except", ":", "# set number to next result of incr on dispenser", "number", "=", "dispenser", "+", "1", "# set indicator to it if necessary", "if", "number", "!=", "indicator", ":", "self", ".", "client", ".", "set", "(", "self", ".", "keys", ".", "indicator", ",", "number", ")", "# announce and return it anyway", "self", ".", "announce", "(", "number", ")", "return", "number" ]
Fix indicator in case of unnanounced departments.
[ "Fix", "indicator", "in", "case", "of", "unnanounced", "departments", "." ]
98e806a0749ada0ddfd04b3c29fb04c15bf5ac18
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L159-L184
250,458
nens/turn
turn/core.py
Locker.lock
def lock(self, resource, label='', expire=60, patience=60): """ Lock a resource. :param resource: String corresponding to resource type :param label: String label to attach :param expire: int seconds :param patience: int seconds """ queue = Queue(client=self.client, resource=resource) with queue.draw(label=label, expire=expire) as number: queue.wait(number=number, patience=patience) yield queue.close()
python
def lock(self, resource, label='', expire=60, patience=60): """ Lock a resource. :param resource: String corresponding to resource type :param label: String label to attach :param expire: int seconds :param patience: int seconds """ queue = Queue(client=self.client, resource=resource) with queue.draw(label=label, expire=expire) as number: queue.wait(number=number, patience=patience) yield queue.close()
[ "def", "lock", "(", "self", ",", "resource", ",", "label", "=", "''", ",", "expire", "=", "60", ",", "patience", "=", "60", ")", ":", "queue", "=", "Queue", "(", "client", "=", "self", ".", "client", ",", "resource", "=", "resource", ")", "with", "queue", ".", "draw", "(", "label", "=", "label", ",", "expire", "=", "expire", ")", "as", "number", ":", "queue", ".", "wait", "(", "number", "=", "number", ",", "patience", "=", "patience", ")", "yield", "queue", ".", "close", "(", ")" ]
Lock a resource. :param resource: String corresponding to resource type :param label: String label to attach :param expire: int seconds :param patience: int seconds
[ "Lock", "a", "resource", "." ]
98e806a0749ada0ddfd04b3c29fb04c15bf5ac18
https://github.com/nens/turn/blob/98e806a0749ada0ddfd04b3c29fb04c15bf5ac18/turn/core.py#L205-L218
250,459
jtpaasch/simplygithub
simplygithub/internals/refs.py
list_refs
def list_refs(profile, ref_type=None): """List all refs. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref_type The type of ref you want. For heads, it's ``heads``. For tags, it's ``tags``. That sort of thing. If you don't specify a type, all refs are returned. Returns: A list of dicts with data about each ref. """ resource = "/refs" if ref_type: resource += "/" + ref_type data = api.get_request(profile, resource) result = [prepare(x) for x in data] return result
python
def list_refs(profile, ref_type=None): """List all refs. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref_type The type of ref you want. For heads, it's ``heads``. For tags, it's ``tags``. That sort of thing. If you don't specify a type, all refs are returned. Returns: A list of dicts with data about each ref. """ resource = "/refs" if ref_type: resource += "/" + ref_type data = api.get_request(profile, resource) result = [prepare(x) for x in data] return result
[ "def", "list_refs", "(", "profile", ",", "ref_type", "=", "None", ")", ":", "resource", "=", "\"/refs\"", "if", "ref_type", ":", "resource", "+=", "\"/\"", "+", "ref_type", "data", "=", "api", ".", "get_request", "(", "profile", ",", "resource", ")", "result", "=", "[", "prepare", "(", "x", ")", "for", "x", "in", "data", "]", "return", "result" ]
List all refs. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref_type The type of ref you want. For heads, it's ``heads``. For tags, it's ``tags``. That sort of thing. If you don't specify a type, all refs are returned. Returns: A list of dicts with data about each ref.
[ "List", "all", "refs", "." ]
b77506275ec276ce90879bf1ea9299a79448b903
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/refs.py#L16-L40
250,460
jtpaasch/simplygithub
simplygithub/internals/refs.py
get_ref
def get_ref(profile, ref): """Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns A dict with data about the ref. """ resource = "/refs/" + ref data = api.get_request(profile, resource) return prepare(data)
python
def get_ref(profile, ref): """Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns A dict with data about the ref. """ resource = "/refs/" + ref data = api.get_request(profile, resource) return prepare(data)
[ "def", "get_ref", "(", "profile", ",", "ref", ")", ":", "resource", "=", "\"/refs/\"", "+", "ref", "data", "=", "api", ".", "get_request", "(", "profile", ",", "resource", ")", "return", "prepare", "(", "data", ")" ]
Fetch a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to fetch, e.g., ``heads/my-feature-branch``. Returns A dict with data about the ref.
[ "Fetch", "a", "ref", "." ]
b77506275ec276ce90879bf1ea9299a79448b903
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/refs.py#L43-L62
250,461
jtpaasch/simplygithub
simplygithub/internals/refs.py
create_ref
def create_ref(profile, ref, sha): """Create a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to create, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref. """ resource = "/refs" payload = {"ref": "refs/" + ref, "sha": sha} data = api.post_request(profile, resource, payload) return prepare(data)
python
def create_ref(profile, ref, sha): """Create a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to create, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref. """ resource = "/refs" payload = {"ref": "refs/" + ref, "sha": sha} data = api.post_request(profile, resource, payload) return prepare(data)
[ "def", "create_ref", "(", "profile", ",", "ref", ",", "sha", ")", ":", "resource", "=", "\"/refs\"", "payload", "=", "{", "\"ref\"", ":", "\"refs/\"", "+", "ref", ",", "\"sha\"", ":", "sha", "}", "data", "=", "api", ".", "post_request", "(", "profile", ",", "resource", ",", "payload", ")", "return", "prepare", "(", "data", ")" ]
Create a ref. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to create, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref.
[ "Create", "a", "ref", "." ]
b77506275ec276ce90879bf1ea9299a79448b903
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/refs.py#L65-L88
250,462
jtpaasch/simplygithub
simplygithub/internals/refs.py
update_ref
def update_ref(profile, ref, sha): """Point a ref to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to update, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref. """ resource = "/refs/" + ref payload = {"sha": sha} data = api.patch_request(profile, resource, payload) return prepare(data)
python
def update_ref(profile, ref, sha): """Point a ref to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to update, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref. """ resource = "/refs/" + ref payload = {"sha": sha} data = api.patch_request(profile, resource, payload) return prepare(data)
[ "def", "update_ref", "(", "profile", ",", "ref", ",", "sha", ")", ":", "resource", "=", "\"/refs/\"", "+", "ref", "payload", "=", "{", "\"sha\"", ":", "sha", "}", "data", "=", "api", ".", "patch_request", "(", "profile", ",", "resource", ",", "payload", ")", "return", "prepare", "(", "data", ")" ]
Point a ref to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. ref The ref to update, e.g., ``heads/my-feature-branch``. sha The SHA of the commit to point the ref to. Returns A dict with data about the ref.
[ "Point", "a", "ref", "to", "a", "new", "SHA", "." ]
b77506275ec276ce90879bf1ea9299a79448b903
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/internals/refs.py#L91-L114
250,463
jamieleshaw/lurklib
lurklib/connection.py
_Connection.latency
def latency(self): """ Checks the connection latency. """ with self.lock: self.send('PING %s' % self.server) ctime = self._m_time.time() msg = self._recv(expected_replies=('PONG',)) if msg[0] == 'PONG': latency = self._m_time.time() - ctime return latency
python
def latency(self): """ Checks the connection latency. """ with self.lock: self.send('PING %s' % self.server) ctime = self._m_time.time() msg = self._recv(expected_replies=('PONG',)) if msg[0] == 'PONG': latency = self._m_time.time() - ctime return latency
[ "def", "latency", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "send", "(", "'PING %s'", "%", "self", ".", "server", ")", "ctime", "=", "self", ".", "_m_time", ".", "time", "(", ")", "msg", "=", "self", ".", "_recv", "(", "expected_replies", "=", "(", "'PONG'", ",", ")", ")", "if", "msg", "[", "0", "]", "==", "'PONG'", ":", "latency", "=", "self", ".", "_m_time", ".", "time", "(", ")", "-", "ctime", "return", "latency" ]
Checks the connection latency.
[ "Checks", "the", "connection", "latency", "." ]
a861f35d880140422103dd78ec3239814e85fd7e
https://github.com/jamieleshaw/lurklib/blob/a861f35d880140422103dd78ec3239814e85fd7e/lurklib/connection.py#L357-L366
250,464
armstrong/armstrong.core.arm_sections
armstrong/core/arm_sections/utils.py
get_section_relations
def get_section_relations(Section): """Find every relationship between section and the item model.""" all_rels = (Section._meta.get_all_related_objects() + Section._meta.get_all_related_many_to_many_objects()) return filter_item_rels(all_rels)
python
def get_section_relations(Section): """Find every relationship between section and the item model.""" all_rels = (Section._meta.get_all_related_objects() + Section._meta.get_all_related_many_to_many_objects()) return filter_item_rels(all_rels)
[ "def", "get_section_relations", "(", "Section", ")", ":", "all_rels", "=", "(", "Section", ".", "_meta", ".", "get_all_related_objects", "(", ")", "+", "Section", ".", "_meta", ".", "get_all_related_many_to_many_objects", "(", ")", ")", "return", "filter_item_rels", "(", "all_rels", ")" ]
Find every relationship between section and the item model.
[ "Find", "every", "relationship", "between", "section", "and", "the", "item", "model", "." ]
39c999c93771da909359e53b35afefe4846f77cb
https://github.com/armstrong/armstrong.core.arm_sections/blob/39c999c93771da909359e53b35afefe4846f77cb/armstrong/core/arm_sections/utils.py#L30-L34
250,465
mattimck/python-exist
exist/auth.py
ExistAuth.authorize_url
def authorize_url(self): """ Build the authorization url and save the state. Return the authorization url """ url, self.state = self.oauth.authorization_url( '%sauthorize' % OAUTH_URL) return url
python
def authorize_url(self): """ Build the authorization url and save the state. Return the authorization url """ url, self.state = self.oauth.authorization_url( '%sauthorize' % OAUTH_URL) return url
[ "def", "authorize_url", "(", "self", ")", ":", "url", ",", "self", ".", "state", "=", "self", ".", "oauth", ".", "authorization_url", "(", "'%sauthorize'", "%", "OAUTH_URL", ")", "return", "url" ]
Build the authorization url and save the state. Return the authorization url
[ "Build", "the", "authorization", "url", "and", "save", "the", "state", ".", "Return", "the", "authorization", "url" ]
2c4be9d176d8e8007c4e020ee7cd6263a2096abb
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L75-L82
250,466
mattimck/python-exist
exist/auth.py
ExistAuth.fetch_token
def fetch_token(self, code, state): """ Fetch the token, using the verification code. Also, make sure the state received in the response matches the one in the request. Returns the access_token. """ if self.state != state: raise MismatchingStateError() self.token = self.oauth.fetch_token( '%saccess_token/' % OAUTH_URL, code=code, client_secret=self.client_secret) return self.token['access_token']
python
def fetch_token(self, code, state): """ Fetch the token, using the verification code. Also, make sure the state received in the response matches the one in the request. Returns the access_token. """ if self.state != state: raise MismatchingStateError() self.token = self.oauth.fetch_token( '%saccess_token/' % OAUTH_URL, code=code, client_secret=self.client_secret) return self.token['access_token']
[ "def", "fetch_token", "(", "self", ",", "code", ",", "state", ")", ":", "if", "self", ".", "state", "!=", "state", ":", "raise", "MismatchingStateError", "(", ")", "self", ".", "token", "=", "self", ".", "oauth", ".", "fetch_token", "(", "'%saccess_token/'", "%", "OAUTH_URL", ",", "code", "=", "code", ",", "client_secret", "=", "self", ".", "client_secret", ")", "return", "self", ".", "token", "[", "'access_token'", "]" ]
Fetch the token, using the verification code. Also, make sure the state received in the response matches the one in the request. Returns the access_token.
[ "Fetch", "the", "token", "using", "the", "verification", "code", ".", "Also", "make", "sure", "the", "state", "received", "in", "the", "response", "matches", "the", "one", "in", "the", "request", ".", "Returns", "the", "access_token", "." ]
2c4be9d176d8e8007c4e020ee7cd6263a2096abb
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L84-L95
250,467
mattimck/python-exist
exist/auth.py
ExistAuth.refresh_token
def refresh_token(self, refresh_token): """ Get a new token, using the provided refresh token. Returns the new access_token. """ response = requests.post('%saccess_token' % OAUTH_URL, { 'refresh_token': refresh_token, 'grant_type': 'refresh_token', 'client_id': self.client_id, 'client_secret': self.client_secret }) resp = json.loads(response.content) if 'access_token' in resp: self.token = resp['access_token'] return resp
python
def refresh_token(self, refresh_token): """ Get a new token, using the provided refresh token. Returns the new access_token. """ response = requests.post('%saccess_token' % OAUTH_URL, { 'refresh_token': refresh_token, 'grant_type': 'refresh_token', 'client_id': self.client_id, 'client_secret': self.client_secret }) resp = json.loads(response.content) if 'access_token' in resp: self.token = resp['access_token'] return resp
[ "def", "refresh_token", "(", "self", ",", "refresh_token", ")", ":", "response", "=", "requests", ".", "post", "(", "'%saccess_token'", "%", "OAUTH_URL", ",", "{", "'refresh_token'", ":", "refresh_token", ",", "'grant_type'", ":", "'refresh_token'", ",", "'client_id'", ":", "self", ".", "client_id", ",", "'client_secret'", ":", "self", ".", "client_secret", "}", ")", "resp", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "if", "'access_token'", "in", "resp", ":", "self", ".", "token", "=", "resp", "[", "'access_token'", "]", "return", "resp" ]
Get a new token, using the provided refresh token. Returns the new access_token.
[ "Get", "a", "new", "token", "using", "the", "provided", "refresh", "token", ".", "Returns", "the", "new", "access_token", "." ]
2c4be9d176d8e8007c4e020ee7cd6263a2096abb
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L97-L114
250,468
mattimck/python-exist
exist/auth.py
ExistAuth.browser_authorize
def browser_authorize(self): """ Open a browser to the authorization url and spool up a CherryPy server to accept the response """ url = self.authorize_url() # Open the web browser in a new thread for command-line browser support threading.Timer(1, webbrowser.open, args=(url,)).start() server_config = { 'server.socket_host': '0.0.0.0', 'server.socket_port': 443, 'server.ssl_module': 'pyopenssl', 'server.ssl_certificate': 'tests/files/certificate.cert', 'server.ssl_private_key': 'tests/files/key.key', } cherrypy.config.update(server_config) cherrypy.quickstart(self)
python
def browser_authorize(self): """ Open a browser to the authorization url and spool up a CherryPy server to accept the response """ url = self.authorize_url() # Open the web browser in a new thread for command-line browser support threading.Timer(1, webbrowser.open, args=(url,)).start() server_config = { 'server.socket_host': '0.0.0.0', 'server.socket_port': 443, 'server.ssl_module': 'pyopenssl', 'server.ssl_certificate': 'tests/files/certificate.cert', 'server.ssl_private_key': 'tests/files/key.key', } cherrypy.config.update(server_config) cherrypy.quickstart(self)
[ "def", "browser_authorize", "(", "self", ")", ":", "url", "=", "self", ".", "authorize_url", "(", ")", "# Open the web browser in a new thread for command-line browser support", "threading", ".", "Timer", "(", "1", ",", "webbrowser", ".", "open", ",", "args", "=", "(", "url", ",", ")", ")", ".", "start", "(", ")", "server_config", "=", "{", "'server.socket_host'", ":", "'0.0.0.0'", ",", "'server.socket_port'", ":", "443", ",", "'server.ssl_module'", ":", "'pyopenssl'", ",", "'server.ssl_certificate'", ":", "'tests/files/certificate.cert'", ",", "'server.ssl_private_key'", ":", "'tests/files/key.key'", ",", "}", "cherrypy", ".", "config", ".", "update", "(", "server_config", ")", "cherrypy", ".", "quickstart", "(", "self", ")" ]
Open a browser to the authorization url and spool up a CherryPy server to accept the response
[ "Open", "a", "browser", "to", "the", "authorization", "url", "and", "spool", "up", "a", "CherryPy", "server", "to", "accept", "the", "response" ]
2c4be9d176d8e8007c4e020ee7cd6263a2096abb
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L116-L135
250,469
mattimck/python-exist
exist/auth.py
ExistAuth.index
def index(self, state, code=None, error=None): """ Receive a Exist response containing a verification code. Use the code to fetch the access_token. """ error = None if code: try: auth_token = self.fetch_token(code, state) except MissingTokenError: error = self._fmt_failure( 'Missing access token parameter.</br>Please check that ' 'you are using the correct client_secret') except MismatchingStateError: error = self._fmt_failure('CSRF Warning! Mismatching state') else: error = self._fmt_failure('Unknown error while authenticating') # Use a thread to shutdown cherrypy so we can return HTML first self._shutdown_cherrypy() return error if error else self.success_html % (auth_token)
python
def index(self, state, code=None, error=None): """ Receive a Exist response containing a verification code. Use the code to fetch the access_token. """ error = None if code: try: auth_token = self.fetch_token(code, state) except MissingTokenError: error = self._fmt_failure( 'Missing access token parameter.</br>Please check that ' 'you are using the correct client_secret') except MismatchingStateError: error = self._fmt_failure('CSRF Warning! Mismatching state') else: error = self._fmt_failure('Unknown error while authenticating') # Use a thread to shutdown cherrypy so we can return HTML first self._shutdown_cherrypy() return error if error else self.success_html % (auth_token)
[ "def", "index", "(", "self", ",", "state", ",", "code", "=", "None", ",", "error", "=", "None", ")", ":", "error", "=", "None", "if", "code", ":", "try", ":", "auth_token", "=", "self", ".", "fetch_token", "(", "code", ",", "state", ")", "except", "MissingTokenError", ":", "error", "=", "self", ".", "_fmt_failure", "(", "'Missing access token parameter.</br>Please check that '", "'you are using the correct client_secret'", ")", "except", "MismatchingStateError", ":", "error", "=", "self", ".", "_fmt_failure", "(", "'CSRF Warning! Mismatching state'", ")", "else", ":", "error", "=", "self", ".", "_fmt_failure", "(", "'Unknown error while authenticating'", ")", "# Use a thread to shutdown cherrypy so we can return HTML first", "self", ".", "_shutdown_cherrypy", "(", ")", "return", "error", "if", "error", "else", "self", ".", "success_html", "%", "(", "auth_token", ")" ]
Receive a Exist response containing a verification code. Use the code to fetch the access_token.
[ "Receive", "a", "Exist", "response", "containing", "a", "verification", "code", ".", "Use", "the", "code", "to", "fetch", "the", "access_token", "." ]
2c4be9d176d8e8007c4e020ee7cd6263a2096abb
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L138-L157
250,470
mattimck/python-exist
exist/auth.py
ExistAuth._shutdown_cherrypy
def _shutdown_cherrypy(self): """ Shutdown cherrypy in one second, if it's running """ if cherrypy.engine.state == cherrypy.engine.states.STARTED: threading.Timer(1, cherrypy.engine.exit).start()
python
def _shutdown_cherrypy(self): """ Shutdown cherrypy in one second, if it's running """ if cherrypy.engine.state == cherrypy.engine.states.STARTED: threading.Timer(1, cherrypy.engine.exit).start()
[ "def", "_shutdown_cherrypy", "(", "self", ")", ":", "if", "cherrypy", ".", "engine", ".", "state", "==", "cherrypy", ".", "engine", ".", "states", ".", "STARTED", ":", "threading", ".", "Timer", "(", "1", ",", "cherrypy", ".", "engine", ".", "exit", ")", ".", "start", "(", ")" ]
Shutdown cherrypy in one second, if it's running
[ "Shutdown", "cherrypy", "in", "one", "second", "if", "it", "s", "running" ]
2c4be9d176d8e8007c4e020ee7cd6263a2096abb
https://github.com/mattimck/python-exist/blob/2c4be9d176d8e8007c4e020ee7cd6263a2096abb/exist/auth.py#L164-L167
250,471
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
roundup
def roundup(x, order): '''Round a number to the passed order Args ---- x: float Number to be rounded order: int Order to which `x` should be rounded Returns ------- x_round: float The passed value rounded to the passed order ''' return x if x % 10**order == 0 else x + 10**order - x % 10**order
python
def roundup(x, order): '''Round a number to the passed order Args ---- x: float Number to be rounded order: int Order to which `x` should be rounded Returns ------- x_round: float The passed value rounded to the passed order ''' return x if x % 10**order == 0 else x + 10**order - x % 10**order
[ "def", "roundup", "(", "x", ",", "order", ")", ":", "return", "x", "if", "x", "%", "10", "**", "order", "==", "0", "else", "x", "+", "10", "**", "order", "-", "x", "%", "10", "**", "order" ]
Round a number to the passed order Args ---- x: float Number to be rounded order: int Order to which `x` should be rounded Returns ------- x_round: float The passed value rounded to the passed order
[ "Round", "a", "number", "to", "the", "passed", "order" ]
816563a9c3feb3fa416f1c2921c6b75db34111ad
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L7-L22
250,472
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
hourminsec
def hourminsec(n_seconds): '''Generate a string of hours and minutes from total number of seconds Args ---- n_seconds: int Total number of seconds to calculate hours, minutes, and seconds from Returns ------- hours: int Number of hours in `n_seconds` minutes: int Remaining minutes in `n_seconds` after number of hours seconds: int Remaining seconds in `n_seconds` after number of minutes ''' hours, remainder = divmod(n_seconds, 3600) minutes, seconds = divmod(remainder, 60) return abs(hours), abs(minutes), abs(seconds)
python
def hourminsec(n_seconds): '''Generate a string of hours and minutes from total number of seconds Args ---- n_seconds: int Total number of seconds to calculate hours, minutes, and seconds from Returns ------- hours: int Number of hours in `n_seconds` minutes: int Remaining minutes in `n_seconds` after number of hours seconds: int Remaining seconds in `n_seconds` after number of minutes ''' hours, remainder = divmod(n_seconds, 3600) minutes, seconds = divmod(remainder, 60) return abs(hours), abs(minutes), abs(seconds)
[ "def", "hourminsec", "(", "n_seconds", ")", ":", "hours", ",", "remainder", "=", "divmod", "(", "n_seconds", ",", "3600", ")", "minutes", ",", "seconds", "=", "divmod", "(", "remainder", ",", "60", ")", "return", "abs", "(", "hours", ")", ",", "abs", "(", "minutes", ")", ",", "abs", "(", "seconds", ")" ]
Generate a string of hours and minutes from total number of seconds Args ---- n_seconds: int Total number of seconds to calculate hours, minutes, and seconds from Returns ------- hours: int Number of hours in `n_seconds` minutes: int Remaining minutes in `n_seconds` after number of hours seconds: int Remaining seconds in `n_seconds` after number of minutes
[ "Generate", "a", "string", "of", "hours", "and", "minutes", "from", "total", "number", "of", "seconds" ]
816563a9c3feb3fa416f1c2921c6b75db34111ad
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L42-L63
250,473
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
add_alpha_labels
def add_alpha_labels(axes, xpos=0.03, ypos=0.95, suffix='', color=None, fontsize=14, fontweight='normal', boxstyle='square', facecolor='white', edgecolor='white', alpha=1.0): '''Add sequential alphbet labels to subplot axes Args ---- axes: list of pyplot.ax A list of matplotlib axes to add the label labels to xpos: float or array_like X position(s) of labels in figure coordinates ypos: float or array_like Y position(s) of labels in figure coordinates suffix: str String to append to labels (e.g. '.' or ' name) color: matplotlib color Color of labels fontsize: int Alppa fontsize fontweight: matplotlib fontweight Alpha fontweight boxstyle: matplotlib boxstyle Alpha boxstyle facecolor: matplotlib facecolor Color of box containing label edgecolor: matplotlib edgecolor Color of box'es border containing label alpha: float Transparency of label Returns ------- axes: list of pyplot.ax A list of matplotlib axes objects with alpha labels added ''' import seaborn import string import numpy if not numpy.iterable(xpos): xpos = [xpos,]*len(axes) ypos = [ypos,]*len(axes) if (len(xpos) > 1) or (len(ypos) > 1): try: assert (len(axes) == len(xpos)) except AssertionError as e: e.args += 'xpos iterable must be same length as axes' raise try: assert (len(axes) == len(ypos)) except AssertionError as e: e.args += 'ypos iterable must be same length as axes' raise else: xpos = [xpos,] ypos = [ypos,] colors = seaborn.color_palette() abc = string.ascii_uppercase for i, (label, ax) in enumerate(zip(abc[:len(axes)], axes)): if color is None: color = colors[i] kwargs = dict(color=color, fontweight=fontweight,) bbox = dict(boxstyle=boxstyle, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha) ax.text(xpos[i], ypos[i], '{}{}'.format(label, suffix), transform=ax.transAxes, fontsize=fontsize, verticalalignment='top', bbox=bbox, **kwargs) return axes
python
def add_alpha_labels(axes, xpos=0.03, ypos=0.95, suffix='', color=None, fontsize=14, fontweight='normal', boxstyle='square', facecolor='white', edgecolor='white', alpha=1.0): '''Add sequential alphbet labels to subplot axes Args ---- axes: list of pyplot.ax A list of matplotlib axes to add the label labels to xpos: float or array_like X position(s) of labels in figure coordinates ypos: float or array_like Y position(s) of labels in figure coordinates suffix: str String to append to labels (e.g. '.' or ' name) color: matplotlib color Color of labels fontsize: int Alppa fontsize fontweight: matplotlib fontweight Alpha fontweight boxstyle: matplotlib boxstyle Alpha boxstyle facecolor: matplotlib facecolor Color of box containing label edgecolor: matplotlib edgecolor Color of box'es border containing label alpha: float Transparency of label Returns ------- axes: list of pyplot.ax A list of matplotlib axes objects with alpha labels added ''' import seaborn import string import numpy if not numpy.iterable(xpos): xpos = [xpos,]*len(axes) ypos = [ypos,]*len(axes) if (len(xpos) > 1) or (len(ypos) > 1): try: assert (len(axes) == len(xpos)) except AssertionError as e: e.args += 'xpos iterable must be same length as axes' raise try: assert (len(axes) == len(ypos)) except AssertionError as e: e.args += 'ypos iterable must be same length as axes' raise else: xpos = [xpos,] ypos = [ypos,] colors = seaborn.color_palette() abc = string.ascii_uppercase for i, (label, ax) in enumerate(zip(abc[:len(axes)], axes)): if color is None: color = colors[i] kwargs = dict(color=color, fontweight=fontweight,) bbox = dict(boxstyle=boxstyle, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha) ax.text(xpos[i], ypos[i], '{}{}'.format(label, suffix), transform=ax.transAxes, fontsize=fontsize, verticalalignment='top', bbox=bbox, **kwargs) return axes
[ "def", "add_alpha_labels", "(", "axes", ",", "xpos", "=", "0.03", ",", "ypos", "=", "0.95", ",", "suffix", "=", "''", ",", "color", "=", "None", ",", "fontsize", "=", "14", ",", "fontweight", "=", "'normal'", ",", "boxstyle", "=", "'square'", ",", "facecolor", "=", "'white'", ",", "edgecolor", "=", "'white'", ",", "alpha", "=", "1.0", ")", ":", "import", "seaborn", "import", "string", "import", "numpy", "if", "not", "numpy", ".", "iterable", "(", "xpos", ")", ":", "xpos", "=", "[", "xpos", ",", "]", "*", "len", "(", "axes", ")", "ypos", "=", "[", "ypos", ",", "]", "*", "len", "(", "axes", ")", "if", "(", "len", "(", "xpos", ")", ">", "1", ")", "or", "(", "len", "(", "ypos", ")", ">", "1", ")", ":", "try", ":", "assert", "(", "len", "(", "axes", ")", "==", "len", "(", "xpos", ")", ")", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "'xpos iterable must be same length as axes'", "raise", "try", ":", "assert", "(", "len", "(", "axes", ")", "==", "len", "(", "ypos", ")", ")", "except", "AssertionError", "as", "e", ":", "e", ".", "args", "+=", "'ypos iterable must be same length as axes'", "raise", "else", ":", "xpos", "=", "[", "xpos", ",", "]", "ypos", "=", "[", "ypos", ",", "]", "colors", "=", "seaborn", ".", "color_palette", "(", ")", "abc", "=", "string", ".", "ascii_uppercase", "for", "i", ",", "(", "label", ",", "ax", ")", "in", "enumerate", "(", "zip", "(", "abc", "[", ":", "len", "(", "axes", ")", "]", ",", "axes", ")", ")", ":", "if", "color", "is", "None", ":", "color", "=", "colors", "[", "i", "]", "kwargs", "=", "dict", "(", "color", "=", "color", ",", "fontweight", "=", "fontweight", ",", ")", "bbox", "=", "dict", "(", "boxstyle", "=", "boxstyle", ",", "facecolor", "=", "facecolor", ",", "edgecolor", "=", "edgecolor", ",", "alpha", "=", "alpha", ")", "ax", ".", "text", "(", "xpos", "[", "i", "]", ",", "ypos", "[", "i", "]", ",", "'{}{}'", ".", "format", "(", "label", ",", "suffix", ")", ",", "transform", "=", "ax", ".", "transAxes", ",", "fontsize", "=", "fontsize", ",", "verticalalignment", "=", "'top'", ",", "bbox", "=", "bbox", ",", "*", "*", "kwargs", ")", "return", "axes" ]
Add sequential alphbet labels to subplot axes Args ---- axes: list of pyplot.ax A list of matplotlib axes to add the label labels to xpos: float or array_like X position(s) of labels in figure coordinates ypos: float or array_like Y position(s) of labels in figure coordinates suffix: str String to append to labels (e.g. '.' or ' name) color: matplotlib color Color of labels fontsize: int Alppa fontsize fontweight: matplotlib fontweight Alpha fontweight boxstyle: matplotlib boxstyle Alpha boxstyle facecolor: matplotlib facecolor Color of box containing label edgecolor: matplotlib edgecolor Color of box'es border containing label alpha: float Transparency of label Returns ------- axes: list of pyplot.ax A list of matplotlib axes objects with alpha labels added
[ "Add", "sequential", "alphbet", "labels", "to", "subplot", "axes" ]
816563a9c3feb3fa416f1c2921c6b75db34111ad
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L104-L179
250,474
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
merge_limits
def merge_limits(axes, xlim=True, ylim=True): '''Set maximum and minimum limits from list of axis objects to each axis Args ---- axes: iterable list of `matplotlib.pyplot` axis objects whose limits should be modified xlim: bool Flag to set modification of x axis limits ylim: bool Flag to set modification of y axis limits ''' # Compile lists of all x/y limits xlims = list() ylims = list() for ax in axes: [xlims.append(lim) for lim in ax.get_xlim()] [ylims.append(lim) for lim in ax.get_ylim()] # Iterate over axes objects and set limits for ax in axes: if xlim: ax.set_xlim(min(xlims), max(xlims)) if ylim: ax.set_ylim(min(ylims), max(ylims)) return None
python
def merge_limits(axes, xlim=True, ylim=True): '''Set maximum and minimum limits from list of axis objects to each axis Args ---- axes: iterable list of `matplotlib.pyplot` axis objects whose limits should be modified xlim: bool Flag to set modification of x axis limits ylim: bool Flag to set modification of y axis limits ''' # Compile lists of all x/y limits xlims = list() ylims = list() for ax in axes: [xlims.append(lim) for lim in ax.get_xlim()] [ylims.append(lim) for lim in ax.get_ylim()] # Iterate over axes objects and set limits for ax in axes: if xlim: ax.set_xlim(min(xlims), max(xlims)) if ylim: ax.set_ylim(min(ylims), max(ylims)) return None
[ "def", "merge_limits", "(", "axes", ",", "xlim", "=", "True", ",", "ylim", "=", "True", ")", ":", "# Compile lists of all x/y limits", "xlims", "=", "list", "(", ")", "ylims", "=", "list", "(", ")", "for", "ax", "in", "axes", ":", "[", "xlims", ".", "append", "(", "lim", ")", "for", "lim", "in", "ax", ".", "get_xlim", "(", ")", "]", "[", "ylims", ".", "append", "(", "lim", ")", "for", "lim", "in", "ax", ".", "get_ylim", "(", ")", "]", "# Iterate over axes objects and set limits", "for", "ax", "in", "axes", ":", "if", "xlim", ":", "ax", ".", "set_xlim", "(", "min", "(", "xlims", ")", ",", "max", "(", "xlims", ")", ")", "if", "ylim", ":", "ax", ".", "set_ylim", "(", "min", "(", "ylims", ")", ",", "max", "(", "ylims", ")", ")", "return", "None" ]
Set maximum and minimum limits from list of axis objects to each axis Args ---- axes: iterable list of `matplotlib.pyplot` axis objects whose limits should be modified xlim: bool Flag to set modification of x axis limits ylim: bool Flag to set modification of y axis limits
[ "Set", "maximum", "and", "minimum", "limits", "from", "list", "of", "axis", "objects", "to", "each", "axis" ]
816563a9c3feb3fa416f1c2921c6b75db34111ad
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L182-L209
250,475
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
plot_noncontiguous
def plot_noncontiguous(ax, data, ind, color='black', label='', offset=0, linewidth=0.5, linestyle='-'): '''Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions ''' def slice_with_nans(ind, data, offset): '''Insert nans in indices and data where indices non-contiguous''' import copy import numpy ind_nan = numpy.zeros(len(data)) ind_nan[:] = numpy.nan # prevent ind from overwrite with deepcopy ind_nan[ind-offset] = copy.deepcopy(ind) #ind_nan = ind_nan[ind[0]-offset:ind[-1]-offset] # prevent data from overwrite with deepcopy data_nan = copy.deepcopy(data) data_nan[numpy.isnan(ind_nan)] = numpy.nan return ind_nan, data_nan x, y = slice_with_nans(ind, data, offset) ax.plot(x, y, color=color, linewidth=linewidth, linestyle=linestyle, label=label) return ax
python
def plot_noncontiguous(ax, data, ind, color='black', label='', offset=0, linewidth=0.5, linestyle='-'): '''Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions ''' def slice_with_nans(ind, data, offset): '''Insert nans in indices and data where indices non-contiguous''' import copy import numpy ind_nan = numpy.zeros(len(data)) ind_nan[:] = numpy.nan # prevent ind from overwrite with deepcopy ind_nan[ind-offset] = copy.deepcopy(ind) #ind_nan = ind_nan[ind[0]-offset:ind[-1]-offset] # prevent data from overwrite with deepcopy data_nan = copy.deepcopy(data) data_nan[numpy.isnan(ind_nan)] = numpy.nan return ind_nan, data_nan x, y = slice_with_nans(ind, data, offset) ax.plot(x, y, color=color, linewidth=linewidth, linestyle=linestyle, label=label) return ax
[ "def", "plot_noncontiguous", "(", "ax", ",", "data", ",", "ind", ",", "color", "=", "'black'", ",", "label", "=", "''", ",", "offset", "=", "0", ",", "linewidth", "=", "0.5", ",", "linestyle", "=", "'-'", ")", ":", "def", "slice_with_nans", "(", "ind", ",", "data", ",", "offset", ")", ":", "'''Insert nans in indices and data where indices non-contiguous'''", "import", "copy", "import", "numpy", "ind_nan", "=", "numpy", ".", "zeros", "(", "len", "(", "data", ")", ")", "ind_nan", "[", ":", "]", "=", "numpy", ".", "nan", "# prevent ind from overwrite with deepcopy", "ind_nan", "[", "ind", "-", "offset", "]", "=", "copy", ".", "deepcopy", "(", "ind", ")", "#ind_nan = ind_nan[ind[0]-offset:ind[-1]-offset]", "# prevent data from overwrite with deepcopy", "data_nan", "=", "copy", ".", "deepcopy", "(", "data", ")", "data_nan", "[", "numpy", ".", "isnan", "(", "ind_nan", ")", "]", "=", "numpy", ".", "nan", "return", "ind_nan", ",", "data_nan", "x", ",", "y", "=", "slice_with_nans", "(", "ind", ",", "data", ",", "offset", ")", "ax", ".", "plot", "(", "x", ",", "y", ",", "color", "=", "color", ",", "linewidth", "=", "linewidth", ",", "linestyle", "=", "linestyle", ",", "label", "=", "label", ")", "return", "ax" ]
Plot non-contiguous slice of data Args ---- data: ndarray The data with non continguous regions to plot ind: ndarray indices of data to be plotted color: matplotlib color Color of plotted line label: str Name to be shown in legend offset: int The number of index positions to reset start of data to zero linewidth: float The width of the plotted line linstyle: str The char representation of the plotting style for the line Returns ------- ax: pyplot.ax Axes object with line glyph added for non-contiguous regions
[ "Plot", "non", "-", "contiguous", "slice", "of", "data" ]
816563a9c3feb3fa416f1c2921c6b75db34111ad
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L212-L261
250,476
ryanjdillon/pyotelem
pyotelem/plots/plotutils.py
plot_shade_mask
def plot_shade_mask(ax, ind, mask, facecolor='gray', alpha=0.5): '''Shade across x values where boolean mask is `True` Args ---- ax: pyplot.ax Axes object to plot with a shaded region ind: ndarray The indices to use for the x-axis values of the data mask: ndarray Boolean mask array to determine which regions should be shaded facecolor: matplotlib color Color of the shaded area Returns ------- ax: pyplot.ax Axes object with the shaded region added ''' ymin, ymax = ax.get_ylim() ax.fill_between(ind, ymin, ymax, where=mask, facecolor=facecolor, alpha=alpha) return ax
python
def plot_shade_mask(ax, ind, mask, facecolor='gray', alpha=0.5): '''Shade across x values where boolean mask is `True` Args ---- ax: pyplot.ax Axes object to plot with a shaded region ind: ndarray The indices to use for the x-axis values of the data mask: ndarray Boolean mask array to determine which regions should be shaded facecolor: matplotlib color Color of the shaded area Returns ------- ax: pyplot.ax Axes object with the shaded region added ''' ymin, ymax = ax.get_ylim() ax.fill_between(ind, ymin, ymax, where=mask, facecolor=facecolor, alpha=alpha) return ax
[ "def", "plot_shade_mask", "(", "ax", ",", "ind", ",", "mask", ",", "facecolor", "=", "'gray'", ",", "alpha", "=", "0.5", ")", ":", "ymin", ",", "ymax", "=", "ax", ".", "get_ylim", "(", ")", "ax", ".", "fill_between", "(", "ind", ",", "ymin", ",", "ymax", ",", "where", "=", "mask", ",", "facecolor", "=", "facecolor", ",", "alpha", "=", "alpha", ")", "return", "ax" ]
Shade across x values where boolean mask is `True` Args ---- ax: pyplot.ax Axes object to plot with a shaded region ind: ndarray The indices to use for the x-axis values of the data mask: ndarray Boolean mask array to determine which regions should be shaded facecolor: matplotlib color Color of the shaded area Returns ------- ax: pyplot.ax Axes object with the shaded region added
[ "Shade", "across", "x", "values", "where", "boolean", "mask", "is", "True" ]
816563a9c3feb3fa416f1c2921c6b75db34111ad
https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/plots/plotutils.py#L264-L286
250,477
emlazzarin/acrylic
acrylic/datatable.py
DataTable.fields
def fields(self, new_fieldnames): """ Overwrite all field names with new field names. Mass renaming. """ if len(new_fieldnames) != len(self.fields): raise Exception("Cannot replace fieldnames (len: %s) with list of " "incorrect length (len: %s)" % (len(new_fieldnames), len(self.fields))) for old_name, new_name in izip(self.fields, new_fieldnames): # use pop instead of `del` in case old_name == new_name self.__data[new_name] = self.__data.pop(old_name)
python
def fields(self, new_fieldnames): """ Overwrite all field names with new field names. Mass renaming. """ if len(new_fieldnames) != len(self.fields): raise Exception("Cannot replace fieldnames (len: %s) with list of " "incorrect length (len: %s)" % (len(new_fieldnames), len(self.fields))) for old_name, new_name in izip(self.fields, new_fieldnames): # use pop instead of `del` in case old_name == new_name self.__data[new_name] = self.__data.pop(old_name)
[ "def", "fields", "(", "self", ",", "new_fieldnames", ")", ":", "if", "len", "(", "new_fieldnames", ")", "!=", "len", "(", "self", ".", "fields", ")", ":", "raise", "Exception", "(", "\"Cannot replace fieldnames (len: %s) with list of \"", "\"incorrect length (len: %s)\"", "%", "(", "len", "(", "new_fieldnames", ")", ",", "len", "(", "self", ".", "fields", ")", ")", ")", "for", "old_name", ",", "new_name", "in", "izip", "(", "self", ".", "fields", ",", "new_fieldnames", ")", ":", "# use pop instead of `del` in case old_name == new_name", "self", ".", "__data", "[", "new_name", "]", "=", "self", ".", "__data", ".", "pop", "(", "old_name", ")" ]
Overwrite all field names with new field names. Mass renaming.
[ "Overwrite", "all", "field", "names", "with", "new", "field", "names", ".", "Mass", "renaming", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L140-L150
250,478
emlazzarin/acrylic
acrylic/datatable.py
DataTable.fromcsvstring
def fromcsvstring(cls, csvstring, delimiter=",", quotechar="\""): """ Takes one string that represents the entire contents of the CSV file, or similar delimited file. If you have a list of lists, where the first list is the headers, then use the main constructor. If you see an excess of whitespace in the first column of your data, this is probably because you tried to format a triple-quoted string literal nicely. Don't add any padding to the left. NOTE: Please prefix your triple-quoted string literal with `u` or `ur` as necessary. For copy-pasting directly from Excel, use `ur`. For copy-pasting from something Python (or similar) printed, use `ur`. For something just dumped from Python via __repr__ or some other text source that displays escape characters used, use `u`. --- Implementation notes: This solution was inspired by UnicodeRW. cStringIO.StringIO turns the passed string into a file-like (readble) object. The string must be encoded so that StringIO presents encoded text. In UnicodeRW, codecs.getreader('utf-8') reads an encoded file object to product a decoded file object on the fly. We don't need this. We read the StringIO object line by line into csv.reader, which is consumes encoded text and parses the CSV format out of it. Then we decode each cell one by one as we pass it into the data table csv.QUOTE_NONE (as well as the r-prefix on r'''string''') are vital since we're copy-pasting directly from Excel. The string should be treated as "literally" ("raw") as possible. """ if not isinstance(csvstring, basestring): raise Exception("If trying to construct a DataTable with " "a list of lists, just use the main " "constructor. Make sure to include a header row") stringio = StringIO(csvstring.encode('utf-8')) csv_data = csv.reader((line for line in stringio), delimiter=delimiter, dialect=csv.excel, quotechar=quotechar, quoting=csv.QUOTE_NONE) new_datatable = cls((s.decode('utf-8') for s in row) for row in csv_data) for field in new_datatable.fields: new_datatable[field] = parse_column(new_datatable[field]) return new_datatable
python
def fromcsvstring(cls, csvstring, delimiter=",", quotechar="\""): """ Takes one string that represents the entire contents of the CSV file, or similar delimited file. If you have a list of lists, where the first list is the headers, then use the main constructor. If you see an excess of whitespace in the first column of your data, this is probably because you tried to format a triple-quoted string literal nicely. Don't add any padding to the left. NOTE: Please prefix your triple-quoted string literal with `u` or `ur` as necessary. For copy-pasting directly from Excel, use `ur`. For copy-pasting from something Python (or similar) printed, use `ur`. For something just dumped from Python via __repr__ or some other text source that displays escape characters used, use `u`. --- Implementation notes: This solution was inspired by UnicodeRW. cStringIO.StringIO turns the passed string into a file-like (readble) object. The string must be encoded so that StringIO presents encoded text. In UnicodeRW, codecs.getreader('utf-8') reads an encoded file object to product a decoded file object on the fly. We don't need this. We read the StringIO object line by line into csv.reader, which is consumes encoded text and parses the CSV format out of it. Then we decode each cell one by one as we pass it into the data table csv.QUOTE_NONE (as well as the r-prefix on r'''string''') are vital since we're copy-pasting directly from Excel. The string should be treated as "literally" ("raw") as possible. """ if not isinstance(csvstring, basestring): raise Exception("If trying to construct a DataTable with " "a list of lists, just use the main " "constructor. Make sure to include a header row") stringio = StringIO(csvstring.encode('utf-8')) csv_data = csv.reader((line for line in stringio), delimiter=delimiter, dialect=csv.excel, quotechar=quotechar, quoting=csv.QUOTE_NONE) new_datatable = cls((s.decode('utf-8') for s in row) for row in csv_data) for field in new_datatable.fields: new_datatable[field] = parse_column(new_datatable[field]) return new_datatable
[ "def", "fromcsvstring", "(", "cls", ",", "csvstring", ",", "delimiter", "=", "\",\"", ",", "quotechar", "=", "\"\\\"\"", ")", ":", "if", "not", "isinstance", "(", "csvstring", ",", "basestring", ")", ":", "raise", "Exception", "(", "\"If trying to construct a DataTable with \"", "\"a list of lists, just use the main \"", "\"constructor. Make sure to include a header row\"", ")", "stringio", "=", "StringIO", "(", "csvstring", ".", "encode", "(", "'utf-8'", ")", ")", "csv_data", "=", "csv", ".", "reader", "(", "(", "line", "for", "line", "in", "stringio", ")", ",", "delimiter", "=", "delimiter", ",", "dialect", "=", "csv", ".", "excel", ",", "quotechar", "=", "quotechar", ",", "quoting", "=", "csv", ".", "QUOTE_NONE", ")", "new_datatable", "=", "cls", "(", "(", "s", ".", "decode", "(", "'utf-8'", ")", "for", "s", "in", "row", ")", "for", "row", "in", "csv_data", ")", "for", "field", "in", "new_datatable", ".", "fields", ":", "new_datatable", "[", "field", "]", "=", "parse_column", "(", "new_datatable", "[", "field", "]", ")", "return", "new_datatable" ]
Takes one string that represents the entire contents of the CSV file, or similar delimited file. If you have a list of lists, where the first list is the headers, then use the main constructor. If you see an excess of whitespace in the first column of your data, this is probably because you tried to format a triple-quoted string literal nicely. Don't add any padding to the left. NOTE: Please prefix your triple-quoted string literal with `u` or `ur` as necessary. For copy-pasting directly from Excel, use `ur`. For copy-pasting from something Python (or similar) printed, use `ur`. For something just dumped from Python via __repr__ or some other text source that displays escape characters used, use `u`. --- Implementation notes: This solution was inspired by UnicodeRW. cStringIO.StringIO turns the passed string into a file-like (readble) object. The string must be encoded so that StringIO presents encoded text. In UnicodeRW, codecs.getreader('utf-8') reads an encoded file object to product a decoded file object on the fly. We don't need this. We read the StringIO object line by line into csv.reader, which is consumes encoded text and parses the CSV format out of it. Then we decode each cell one by one as we pass it into the data table csv.QUOTE_NONE (as well as the r-prefix on r'''string''') are vital since we're copy-pasting directly from Excel. The string should be treated as "literally" ("raw") as possible.
[ "Takes", "one", "string", "that", "represents", "the", "entire", "contents", "of", "the", "CSV", "file", "or", "similar", "delimited", "file", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L175-L228
250,479
emlazzarin/acrylic
acrylic/datatable.py
DataTable.fromexcel
def fromexcel(cls, path, sheet_name_or_num=0, headers=None): """ Constructs a new DataTable from an Excel file. Specify sheet_name_or_number to load that specific sheet. Headers will be inferred automatically, but if you'd prefer to load only a subset of all the headers, pass in a list of the headers you'd like as `headers`. --- Alternatively, it's quite simple to: reader = ExcelReader('myfile.xls') reader.change_sheet('default') data = DataTable(reader) """ reader = ExcelRW.UnicodeDictReader(path, sheet_name_or_num) return cls(reader, headers=headers)
python
def fromexcel(cls, path, sheet_name_or_num=0, headers=None): """ Constructs a new DataTable from an Excel file. Specify sheet_name_or_number to load that specific sheet. Headers will be inferred automatically, but if you'd prefer to load only a subset of all the headers, pass in a list of the headers you'd like as `headers`. --- Alternatively, it's quite simple to: reader = ExcelReader('myfile.xls') reader.change_sheet('default') data = DataTable(reader) """ reader = ExcelRW.UnicodeDictReader(path, sheet_name_or_num) return cls(reader, headers=headers)
[ "def", "fromexcel", "(", "cls", ",", "path", ",", "sheet_name_or_num", "=", "0", ",", "headers", "=", "None", ")", ":", "reader", "=", "ExcelRW", ".", "UnicodeDictReader", "(", "path", ",", "sheet_name_or_num", ")", "return", "cls", "(", "reader", ",", "headers", "=", "headers", ")" ]
Constructs a new DataTable from an Excel file. Specify sheet_name_or_number to load that specific sheet. Headers will be inferred automatically, but if you'd prefer to load only a subset of all the headers, pass in a list of the headers you'd like as `headers`. --- Alternatively, it's quite simple to: reader = ExcelReader('myfile.xls') reader.change_sheet('default') data = DataTable(reader)
[ "Constructs", "a", "new", "DataTable", "from", "an", "Excel", "file", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L256-L275
250,480
emlazzarin/acrylic
acrylic/datatable.py
DataTable.__print_table
def __print_table(self, row_delim, header_delim=None, header_pad=u"", pad=u""): """ row_delim default delimiter inserted between columns of every row in the table. header_delim delimiter inserted within the headers. by default takes the value of `row_delim` header_pad put on the sides of the row of headers. pad put on the sides of every row. """ if header_delim is None: header_delim = row_delim num_cols = len(self.fields) accumulator = ((u"%s" + header_delim) * num_cols)[:-len(header_delim)] accumulator = ((header_pad + accumulator + header_pad + u"\n") % tuple(self.fields)) for datarow in self: rowstring = ((u"%s" + row_delim) * num_cols)[:-len(row_delim)] rowstring = (pad + rowstring + pad + u"\n") % tuple(datarow) accumulator += rowstring return accumulator[:-1]
python
def __print_table(self, row_delim, header_delim=None, header_pad=u"", pad=u""): """ row_delim default delimiter inserted between columns of every row in the table. header_delim delimiter inserted within the headers. by default takes the value of `row_delim` header_pad put on the sides of the row of headers. pad put on the sides of every row. """ if header_delim is None: header_delim = row_delim num_cols = len(self.fields) accumulator = ((u"%s" + header_delim) * num_cols)[:-len(header_delim)] accumulator = ((header_pad + accumulator + header_pad + u"\n") % tuple(self.fields)) for datarow in self: rowstring = ((u"%s" + row_delim) * num_cols)[:-len(row_delim)] rowstring = (pad + rowstring + pad + u"\n") % tuple(datarow) accumulator += rowstring return accumulator[:-1]
[ "def", "__print_table", "(", "self", ",", "row_delim", ",", "header_delim", "=", "None", ",", "header_pad", "=", "u\"\"", ",", "pad", "=", "u\"\"", ")", ":", "if", "header_delim", "is", "None", ":", "header_delim", "=", "row_delim", "num_cols", "=", "len", "(", "self", ".", "fields", ")", "accumulator", "=", "(", "(", "u\"%s\"", "+", "header_delim", ")", "*", "num_cols", ")", "[", ":", "-", "len", "(", "header_delim", ")", "]", "accumulator", "=", "(", "(", "header_pad", "+", "accumulator", "+", "header_pad", "+", "u\"\\n\"", ")", "%", "tuple", "(", "self", ".", "fields", ")", ")", "for", "datarow", "in", "self", ":", "rowstring", "=", "(", "(", "u\"%s\"", "+", "row_delim", ")", "*", "num_cols", ")", "[", ":", "-", "len", "(", "row_delim", ")", "]", "rowstring", "=", "(", "pad", "+", "rowstring", "+", "pad", "+", "u\"\\n\"", ")", "%", "tuple", "(", "datarow", ")", "accumulator", "+=", "rowstring", "return", "accumulator", "[", ":", "-", "1", "]" ]
row_delim default delimiter inserted between columns of every row in the table. header_delim delimiter inserted within the headers. by default takes the value of `row_delim` header_pad put on the sides of the row of headers. pad put on the sides of every row.
[ "row_delim", "default", "delimiter", "inserted", "between", "columns", "of", "every", "row", "in", "the", "table", ".", "header_delim", "delimiter", "inserted", "within", "the", "headers", ".", "by", "default", "takes", "the", "value", "of", "row_delim", "header_pad", "put", "on", "the", "sides", "of", "the", "row", "of", "headers", ".", "pad", "put", "on", "the", "sides", "of", "every", "row", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L373-L393
250,481
emlazzarin/acrylic
acrylic/datatable.py
DataTable.apply
def apply(self, func, *fields): """ Applies the function, `func`, to every row in the DataTable. If no fields are supplied, the entire row is passed to `func`. If fields are supplied, the values at all of those fields are passed into func in that order. --- data['diff'] = data.apply(short_diff, 'old_count', 'new_count') """ results = [] for row in self: if not fields: results.append(func(row)) else: if any(field not in self for field in fields): for field in fields: if field not in self: raise Exception("Column `%s` does not exist " "in DataTable" % field) results.append(func(*[row[field] for field in fields])) return results
python
def apply(self, func, *fields): """ Applies the function, `func`, to every row in the DataTable. If no fields are supplied, the entire row is passed to `func`. If fields are supplied, the values at all of those fields are passed into func in that order. --- data['diff'] = data.apply(short_diff, 'old_count', 'new_count') """ results = [] for row in self: if not fields: results.append(func(row)) else: if any(field not in self for field in fields): for field in fields: if field not in self: raise Exception("Column `%s` does not exist " "in DataTable" % field) results.append(func(*[row[field] for field in fields])) return results
[ "def", "apply", "(", "self", ",", "func", ",", "*", "fields", ")", ":", "results", "=", "[", "]", "for", "row", "in", "self", ":", "if", "not", "fields", ":", "results", ".", "append", "(", "func", "(", "row", ")", ")", "else", ":", "if", "any", "(", "field", "not", "in", "self", "for", "field", "in", "fields", ")", ":", "for", "field", "in", "fields", ":", "if", "field", "not", "in", "self", ":", "raise", "Exception", "(", "\"Column `%s` does not exist \"", "\"in DataTable\"", "%", "field", ")", "results", ".", "append", "(", "func", "(", "*", "[", "row", "[", "field", "]", "for", "field", "in", "fields", "]", ")", ")", "return", "results" ]
Applies the function, `func`, to every row in the DataTable. If no fields are supplied, the entire row is passed to `func`. If fields are supplied, the values at all of those fields are passed into func in that order. --- data['diff'] = data.apply(short_diff, 'old_count', 'new_count')
[ "Applies", "the", "function", "func", "to", "every", "row", "in", "the", "DataTable", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L486-L507
250,482
emlazzarin/acrylic
acrylic/datatable.py
DataTable.col
def col(self, col_name_or_num): """ Returns the col at index `colnum` or name `colnum`. """ if isinstance(col_name_or_num, basestring): return self[col_name_or_num] elif isinstance(col_name_or_num, (int, long)): if col_name_or_num > len(self.fields): raise IndexError("Invalid column index `%s` for DataTable" % col_name_or_num) return self.__data[self.fields[col_name_or_num]]
python
def col(self, col_name_or_num): """ Returns the col at index `colnum` or name `colnum`. """ if isinstance(col_name_or_num, basestring): return self[col_name_or_num] elif isinstance(col_name_or_num, (int, long)): if col_name_or_num > len(self.fields): raise IndexError("Invalid column index `%s` for DataTable" % col_name_or_num) return self.__data[self.fields[col_name_or_num]]
[ "def", "col", "(", "self", ",", "col_name_or_num", ")", ":", "if", "isinstance", "(", "col_name_or_num", ",", "basestring", ")", ":", "return", "self", "[", "col_name_or_num", "]", "elif", "isinstance", "(", "col_name_or_num", ",", "(", "int", ",", "long", ")", ")", ":", "if", "col_name_or_num", ">", "len", "(", "self", ".", "fields", ")", ":", "raise", "IndexError", "(", "\"Invalid column index `%s` for DataTable\"", "%", "col_name_or_num", ")", "return", "self", ".", "__data", "[", "self", ".", "fields", "[", "col_name_or_num", "]", "]" ]
Returns the col at index `colnum` or name `colnum`.
[ "Returns", "the", "col", "at", "index", "colnum", "or", "name", "colnum", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L509-L519
250,483
emlazzarin/acrylic
acrylic/datatable.py
DataTable.distinct
def distinct(self, fieldname, key=None): """ Returns the unique values seen at `fieldname`. """ return tuple(unique_everseen(self[fieldname], key=key))
python
def distinct(self, fieldname, key=None): """ Returns the unique values seen at `fieldname`. """ return tuple(unique_everseen(self[fieldname], key=key))
[ "def", "distinct", "(", "self", ",", "fieldname", ",", "key", "=", "None", ")", ":", "return", "tuple", "(", "unique_everseen", "(", "self", "[", "fieldname", "]", ",", "key", "=", "key", ")", ")" ]
Returns the unique values seen at `fieldname`.
[ "Returns", "the", "unique", "values", "seen", "at", "fieldname", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L566-L570
250,484
emlazzarin/acrylic
acrylic/datatable.py
DataTable.mask
def mask(self, masklist): """ `masklist` is an array of Bools or equivalent. This returns a new DataTable using only the rows that were True (or equivalent) in the mask. """ if not hasattr(masklist, '__len__'): masklist = tuple(masklist) if len(masklist) != len(self): raise Exception("Masklist length (%s) must match length " "of DataTable (%s)" % (len(masklist), len(self))) new_datatable = DataTable() for field in self.fields: new_datatable[field] = list(compress(self[field], masklist)) return new_datatable
python
def mask(self, masklist): """ `masklist` is an array of Bools or equivalent. This returns a new DataTable using only the rows that were True (or equivalent) in the mask. """ if not hasattr(masklist, '__len__'): masklist = tuple(masklist) if len(masklist) != len(self): raise Exception("Masklist length (%s) must match length " "of DataTable (%s)" % (len(masklist), len(self))) new_datatable = DataTable() for field in self.fields: new_datatable[field] = list(compress(self[field], masklist)) return new_datatable
[ "def", "mask", "(", "self", ",", "masklist", ")", ":", "if", "not", "hasattr", "(", "masklist", ",", "'__len__'", ")", ":", "masklist", "=", "tuple", "(", "masklist", ")", "if", "len", "(", "masklist", ")", "!=", "len", "(", "self", ")", ":", "raise", "Exception", "(", "\"Masklist length (%s) must match length \"", "\"of DataTable (%s)\"", "%", "(", "len", "(", "masklist", ")", ",", "len", "(", "self", ")", ")", ")", "new_datatable", "=", "DataTable", "(", ")", "for", "field", "in", "self", ".", "fields", ":", "new_datatable", "[", "field", "]", "=", "list", "(", "compress", "(", "self", "[", "field", "]", ",", "masklist", ")", ")", "return", "new_datatable" ]
`masklist` is an array of Bools or equivalent. This returns a new DataTable using only the rows that were True (or equivalent) in the mask.
[ "masklist", "is", "an", "array", "of", "Bools", "or", "equivalent", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L625-L642
250,485
emlazzarin/acrylic
acrylic/datatable.py
DataTable.mutapply
def mutapply(self, function, fieldname): """ Applies `function` in-place to the field name specified. In other words, `mutapply` overwrites column `fieldname` ith the results of applying `function` to each element of that column. """ self[fieldname] = self.apply(function, fieldname)
python
def mutapply(self, function, fieldname): """ Applies `function` in-place to the field name specified. In other words, `mutapply` overwrites column `fieldname` ith the results of applying `function` to each element of that column. """ self[fieldname] = self.apply(function, fieldname)
[ "def", "mutapply", "(", "self", ",", "function", ",", "fieldname", ")", ":", "self", "[", "fieldname", "]", "=", "self", ".", "apply", "(", "function", ",", "fieldname", ")" ]
Applies `function` in-place to the field name specified. In other words, `mutapply` overwrites column `fieldname` ith the results of applying `function` to each element of that column.
[ "Applies", "function", "in", "-", "place", "to", "the", "field", "name", "specified", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L644-L651
250,486
emlazzarin/acrylic
acrylic/datatable.py
DataTable.rename
def rename(self, old_fieldname, new_fieldname): """ Renames a specific field, and preserves the underlying order. """ if old_fieldname not in self: raise Exception("DataTable does not have field `%s`" % old_fieldname) if not isinstance(new_fieldname, basestring): raise ValueError("DataTable fields must be strings, not `%s`" % type(new_fieldname)) if old_fieldname == new_fieldname: return new_names = self.fields location = new_names.index(old_fieldname) del new_names[location] new_names.insert(location, new_fieldname) self.fields = new_names
python
def rename(self, old_fieldname, new_fieldname): """ Renames a specific field, and preserves the underlying order. """ if old_fieldname not in self: raise Exception("DataTable does not have field `%s`" % old_fieldname) if not isinstance(new_fieldname, basestring): raise ValueError("DataTable fields must be strings, not `%s`" % type(new_fieldname)) if old_fieldname == new_fieldname: return new_names = self.fields location = new_names.index(old_fieldname) del new_names[location] new_names.insert(location, new_fieldname) self.fields = new_names
[ "def", "rename", "(", "self", ",", "old_fieldname", ",", "new_fieldname", ")", ":", "if", "old_fieldname", "not", "in", "self", ":", "raise", "Exception", "(", "\"DataTable does not have field `%s`\"", "%", "old_fieldname", ")", "if", "not", "isinstance", "(", "new_fieldname", ",", "basestring", ")", ":", "raise", "ValueError", "(", "\"DataTable fields must be strings, not `%s`\"", "%", "type", "(", "new_fieldname", ")", ")", "if", "old_fieldname", "==", "new_fieldname", ":", "return", "new_names", "=", "self", ".", "fields", "location", "=", "new_names", ".", "index", "(", "old_fieldname", ")", "del", "new_names", "[", "location", "]", "new_names", ".", "insert", "(", "location", ",", "new_fieldname", ")", "self", ".", "fields", "=", "new_names" ]
Renames a specific field, and preserves the underlying order.
[ "Renames", "a", "specific", "field", "and", "preserves", "the", "underlying", "order", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L656-L675
250,487
emlazzarin/acrylic
acrylic/datatable.py
DataTable.reorder
def reorder(self, fields_in_new_order): """ Pass in field names in the order you wish them to be swapped. """ if not len(fields_in_new_order) == len(self.fields): raise Exception("Fields to reorder with are not the same length " "(%s) as the original fields (%s)" % (len(fields_in_new_order), len(self.fields))) if not set(fields_in_new_order) == set(self.fields): raise Exception("Fields to reorder with should be the same " "as the original fields") new = OrderedDict() for field in fields_in_new_order: new[field] = self.__data[field] self.__data = new
python
def reorder(self, fields_in_new_order): """ Pass in field names in the order you wish them to be swapped. """ if not len(fields_in_new_order) == len(self.fields): raise Exception("Fields to reorder with are not the same length " "(%s) as the original fields (%s)" % (len(fields_in_new_order), len(self.fields))) if not set(fields_in_new_order) == set(self.fields): raise Exception("Fields to reorder with should be the same " "as the original fields") new = OrderedDict() for field in fields_in_new_order: new[field] = self.__data[field] self.__data = new
[ "def", "reorder", "(", "self", ",", "fields_in_new_order", ")", ":", "if", "not", "len", "(", "fields_in_new_order", ")", "==", "len", "(", "self", ".", "fields", ")", ":", "raise", "Exception", "(", "\"Fields to reorder with are not the same length \"", "\"(%s) as the original fields (%s)\"", "%", "(", "len", "(", "fields_in_new_order", ")", ",", "len", "(", "self", ".", "fields", ")", ")", ")", "if", "not", "set", "(", "fields_in_new_order", ")", "==", "set", "(", "self", ".", "fields", ")", ":", "raise", "Exception", "(", "\"Fields to reorder with should be the same \"", "\"as the original fields\"", ")", "new", "=", "OrderedDict", "(", ")", "for", "field", "in", "fields_in_new_order", ":", "new", "[", "field", "]", "=", "self", ".", "__data", "[", "field", "]", "self", ".", "__data", "=", "new" ]
Pass in field names in the order you wish them to be swapped.
[ "Pass", "in", "field", "names", "in", "the", "order", "you", "wish", "them", "to", "be", "swapped", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L677-L691
250,488
emlazzarin/acrylic
acrylic/datatable.py
DataTable.sample
def sample(self, num): """ Returns a new table with rows randomly sampled. We create a mask with `num` True bools, and fill it with False bools until it is the length of the table. We shuffle it, and apply that mask to the table. """ if num > len(self): return self.copy() elif num < 0: raise IndexError("Cannot sample a negative number of rows " "from a DataTable") random_row_mask = ([True] * num) + ([False] * (len(self) - num)) shuffle(random_row_mask) sampled_table = self.mask(random_row_mask) random_col_name = 'random_sorting_column' while random_col_name in sampled_table: random_col_name = '%030x' % randrange(16**30) sampled_table[random_col_name] = [random() for _ in xrange(len(sampled_table))] sampled_table.sort(random_col_name, inplace=True) del sampled_table[random_col_name] return sampled_table
python
def sample(self, num): """ Returns a new table with rows randomly sampled. We create a mask with `num` True bools, and fill it with False bools until it is the length of the table. We shuffle it, and apply that mask to the table. """ if num > len(self): return self.copy() elif num < 0: raise IndexError("Cannot sample a negative number of rows " "from a DataTable") random_row_mask = ([True] * num) + ([False] * (len(self) - num)) shuffle(random_row_mask) sampled_table = self.mask(random_row_mask) random_col_name = 'random_sorting_column' while random_col_name in sampled_table: random_col_name = '%030x' % randrange(16**30) sampled_table[random_col_name] = [random() for _ in xrange(len(sampled_table))] sampled_table.sort(random_col_name, inplace=True) del sampled_table[random_col_name] return sampled_table
[ "def", "sample", "(", "self", ",", "num", ")", ":", "if", "num", ">", "len", "(", "self", ")", ":", "return", "self", ".", "copy", "(", ")", "elif", "num", "<", "0", ":", "raise", "IndexError", "(", "\"Cannot sample a negative number of rows \"", "\"from a DataTable\"", ")", "random_row_mask", "=", "(", "[", "True", "]", "*", "num", ")", "+", "(", "[", "False", "]", "*", "(", "len", "(", "self", ")", "-", "num", ")", ")", "shuffle", "(", "random_row_mask", ")", "sampled_table", "=", "self", ".", "mask", "(", "random_row_mask", ")", "random_col_name", "=", "'random_sorting_column'", "while", "random_col_name", "in", "sampled_table", ":", "random_col_name", "=", "'%030x'", "%", "randrange", "(", "16", "**", "30", ")", "sampled_table", "[", "random_col_name", "]", "=", "[", "random", "(", ")", "for", "_", "in", "xrange", "(", "len", "(", "sampled_table", ")", ")", "]", "sampled_table", ".", "sort", "(", "random_col_name", ",", "inplace", "=", "True", ")", "del", "sampled_table", "[", "random_col_name", "]", "return", "sampled_table" ]
Returns a new table with rows randomly sampled. We create a mask with `num` True bools, and fill it with False bools until it is the length of the table. We shuffle it, and apply that mask to the table.
[ "Returns", "a", "new", "table", "with", "rows", "randomly", "sampled", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L706-L731
250,489
emlazzarin/acrylic
acrylic/datatable.py
DataTable.select
def select(self, *cols): """ Returns DataTable with a subset of columns in this table """ return DataTable([cols] + zip(*[self[col] for col in cols]))
python
def select(self, *cols): """ Returns DataTable with a subset of columns in this table """ return DataTable([cols] + zip(*[self[col] for col in cols]))
[ "def", "select", "(", "self", ",", "*", "cols", ")", ":", "return", "DataTable", "(", "[", "cols", "]", "+", "zip", "(", "*", "[", "self", "[", "col", "]", "for", "col", "in", "cols", "]", ")", ")" ]
Returns DataTable with a subset of columns in this table
[ "Returns", "DataTable", "with", "a", "subset", "of", "columns", "in", "this", "table" ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L733-L737
250,490
emlazzarin/acrylic
acrylic/datatable.py
DataTable.sort
def sort(self, fieldname, key=lambda x: x, desc=False, inplace=False): """ This matches Python's built-in sorting signature closely. By default, a new DataTable will be returned and the original will not be mutated. If preferred, specify `inplace=True` in order to mutate the original table. Either way, a reference to the relevant table will be returned. """ try: field_index = tuple(self.fields).index(fieldname) except ValueError: raise ValueError("Sorting on a field that doesn't exist: `%s`" % fieldname) data_cols = izip(*sorted(izip(*[self.__data[field] for field in self.fields]), key=lambda row: key(row[field_index]), reverse=desc)) target_table = self if inplace else DataTable() for field, data_col in izip(self.fields, data_cols): target_table[field] = list(data_col) # Note that sorting in-place still returns a reference # to the table being sorted, for convenience. return target_table
python
def sort(self, fieldname, key=lambda x: x, desc=False, inplace=False): """ This matches Python's built-in sorting signature closely. By default, a new DataTable will be returned and the original will not be mutated. If preferred, specify `inplace=True` in order to mutate the original table. Either way, a reference to the relevant table will be returned. """ try: field_index = tuple(self.fields).index(fieldname) except ValueError: raise ValueError("Sorting on a field that doesn't exist: `%s`" % fieldname) data_cols = izip(*sorted(izip(*[self.__data[field] for field in self.fields]), key=lambda row: key(row[field_index]), reverse=desc)) target_table = self if inplace else DataTable() for field, data_col in izip(self.fields, data_cols): target_table[field] = list(data_col) # Note that sorting in-place still returns a reference # to the table being sorted, for convenience. return target_table
[ "def", "sort", "(", "self", ",", "fieldname", ",", "key", "=", "lambda", "x", ":", "x", ",", "desc", "=", "False", ",", "inplace", "=", "False", ")", ":", "try", ":", "field_index", "=", "tuple", "(", "self", ".", "fields", ")", ".", "index", "(", "fieldname", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Sorting on a field that doesn't exist: `%s`\"", "%", "fieldname", ")", "data_cols", "=", "izip", "(", "*", "sorted", "(", "izip", "(", "*", "[", "self", ".", "__data", "[", "field", "]", "for", "field", "in", "self", ".", "fields", "]", ")", ",", "key", "=", "lambda", "row", ":", "key", "(", "row", "[", "field_index", "]", ")", ",", "reverse", "=", "desc", ")", ")", "target_table", "=", "self", "if", "inplace", "else", "DataTable", "(", ")", "for", "field", ",", "data_col", "in", "izip", "(", "self", ".", "fields", ",", "data_cols", ")", ":", "target_table", "[", "field", "]", "=", "list", "(", "data_col", ")", "# Note that sorting in-place still returns a reference", "# to the table being sorted, for convenience.", "return", "target_table" ]
This matches Python's built-in sorting signature closely. By default, a new DataTable will be returned and the original will not be mutated. If preferred, specify `inplace=True` in order to mutate the original table. Either way, a reference to the relevant table will be returned.
[ "This", "matches", "Python", "s", "built", "-", "in", "sorting", "signature", "closely", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L739-L766
250,491
emlazzarin/acrylic
acrylic/datatable.py
DataTable.where
def where(self, fieldname, value, negate=False): """ Returns a new DataTable with rows only where the value at `fieldname` == `value`. """ if negate: return self.mask([elem != value for elem in self[fieldname]]) else: return self.mask([elem == value for elem in self[fieldname]])
python
def where(self, fieldname, value, negate=False): """ Returns a new DataTable with rows only where the value at `fieldname` == `value`. """ if negate: return self.mask([elem != value for elem in self[fieldname]]) else: return self.mask([elem == value for elem in self[fieldname]])
[ "def", "where", "(", "self", ",", "fieldname", ",", "value", ",", "negate", "=", "False", ")", ":", "if", "negate", ":", "return", "self", ".", "mask", "(", "[", "elem", "!=", "value", "for", "elem", "in", "self", "[", "fieldname", "]", "]", ")", "else", ":", "return", "self", ".", "mask", "(", "[", "elem", "==", "value", "for", "elem", "in", "self", "[", "fieldname", "]", "]", ")" ]
Returns a new DataTable with rows only where the value at `fieldname` == `value`.
[ "Returns", "a", "new", "DataTable", "with", "rows", "only", "where", "the", "value", "at", "fieldname", "==", "value", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L768-L778
250,492
emlazzarin/acrylic
acrylic/datatable.py
DataTable.wheregreater
def wheregreater(self, fieldname, value): """ Returns a new DataTable with rows only where the value at `fieldname` > `value`. """ return self.mask([elem > value for elem in self[fieldname]])
python
def wheregreater(self, fieldname, value): """ Returns a new DataTable with rows only where the value at `fieldname` > `value`. """ return self.mask([elem > value for elem in self[fieldname]])
[ "def", "wheregreater", "(", "self", ",", "fieldname", ",", "value", ")", ":", "return", "self", ".", "mask", "(", "[", "elem", ">", "value", "for", "elem", "in", "self", "[", "fieldname", "]", "]", ")" ]
Returns a new DataTable with rows only where the value at `fieldname` > `value`.
[ "Returns", "a", "new", "DataTable", "with", "rows", "only", "where", "the", "value", "at", "fieldname", ">", "value", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L815-L820
250,493
emlazzarin/acrylic
acrylic/datatable.py
DataTable.whereless
def whereless(self, fieldname, value): """ Returns a new DataTable with rows only where the value at `fieldname` < `value`. """ return self.mask([elem < value for elem in self[fieldname]])
python
def whereless(self, fieldname, value): """ Returns a new DataTable with rows only where the value at `fieldname` < `value`. """ return self.mask([elem < value for elem in self[fieldname]])
[ "def", "whereless", "(", "self", ",", "fieldname", ",", "value", ")", ":", "return", "self", ".", "mask", "(", "[", "elem", "<", "value", "for", "elem", "in", "self", "[", "fieldname", "]", "]", ")" ]
Returns a new DataTable with rows only where the value at `fieldname` < `value`.
[ "Returns", "a", "new", "DataTable", "with", "rows", "only", "where", "the", "value", "at", "fieldname", "<", "value", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L822-L827
250,494
emlazzarin/acrylic
acrylic/datatable.py
DataTable.wherenot
def wherenot(self, fieldname, value): """ Logical opposite of `where`. """ return self.where(fieldname, value, negate=True)
python
def wherenot(self, fieldname, value): """ Logical opposite of `where`. """ return self.where(fieldname, value, negate=True)
[ "def", "wherenot", "(", "self", ",", "fieldname", ",", "value", ")", ":", "return", "self", ".", "where", "(", "fieldname", ",", "value", ",", "negate", "=", "True", ")" ]
Logical opposite of `where`.
[ "Logical", "opposite", "of", "where", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L829-L833
250,495
emlazzarin/acrylic
acrylic/datatable.py
DataTable.wherenotin
def wherenotin(self, fieldname, value): """ Logical opposite of `wherein`. """ return self.wherein(fieldname, value, negate=True)
python
def wherenotin(self, fieldname, value): """ Logical opposite of `wherein`. """ return self.wherein(fieldname, value, negate=True)
[ "def", "wherenotin", "(", "self", ",", "fieldname", ",", "value", ")", ":", "return", "self", ".", "wherein", "(", "fieldname", ",", "value", ",", "negate", "=", "True", ")" ]
Logical opposite of `wherein`.
[ "Logical", "opposite", "of", "wherein", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L841-L845
250,496
emlazzarin/acrylic
acrylic/datatable.py
DataTable.writexlsx
def writexlsx(self, path, sheetname="default"): """ Writes this table to an .xlsx file at the specified path. If you'd like to specify a sheetname, you may do so. If you'd like to write one workbook with different DataTables for each sheet, import the `excel` function from acrylic. You can see that code in `utils.py`. Note that the outgoing file is an .xlsx file, so it'd make sense to name that way. """ writer = ExcelRW.UnicodeWriter(path) writer.set_active_sheet(sheetname) writer.writerow(self.fields) writer.writerows(self) writer.save()
python
def writexlsx(self, path, sheetname="default"): """ Writes this table to an .xlsx file at the specified path. If you'd like to specify a sheetname, you may do so. If you'd like to write one workbook with different DataTables for each sheet, import the `excel` function from acrylic. You can see that code in `utils.py`. Note that the outgoing file is an .xlsx file, so it'd make sense to name that way. """ writer = ExcelRW.UnicodeWriter(path) writer.set_active_sheet(sheetname) writer.writerow(self.fields) writer.writerows(self) writer.save()
[ "def", "writexlsx", "(", "self", ",", "path", ",", "sheetname", "=", "\"default\"", ")", ":", "writer", "=", "ExcelRW", ".", "UnicodeWriter", "(", "path", ")", "writer", ".", "set_active_sheet", "(", "sheetname", ")", "writer", ".", "writerow", "(", "self", ".", "fields", ")", "writer", ".", "writerows", "(", "self", ")", "writer", ".", "save", "(", ")" ]
Writes this table to an .xlsx file at the specified path. If you'd like to specify a sheetname, you may do so. If you'd like to write one workbook with different DataTables for each sheet, import the `excel` function from acrylic. You can see that code in `utils.py`. Note that the outgoing file is an .xlsx file, so it'd make sense to name that way.
[ "Writes", "this", "table", "to", "an", ".", "xlsx", "file", "at", "the", "specified", "path", "." ]
08c6702d73b9660ead1024653f4fa016f6340e46
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L856-L873
250,497
KnowledgeLinks/rdfframework
rdfframework/datatypes/uriformatters.py
pyuri_formatter
def pyuri_formatter(namespace, value): """ Formats a namespace and ending value into a python friendly format args: namespace: RdfNamespace or tuple in the format of (prefix, uri,) value: end value to attach to the namespace """ if namespace[0]: return "%s_%s" %(namespace[0], value) else: return "pyuri_%s_%s" % (base64.b64encode(bytes(namespace[1], "utf-8")).decode(), value)
python
def pyuri_formatter(namespace, value): """ Formats a namespace and ending value into a python friendly format args: namespace: RdfNamespace or tuple in the format of (prefix, uri,) value: end value to attach to the namespace """ if namespace[0]: return "%s_%s" %(namespace[0], value) else: return "pyuri_%s_%s" % (base64.b64encode(bytes(namespace[1], "utf-8")).decode(), value)
[ "def", "pyuri_formatter", "(", "namespace", ",", "value", ")", ":", "if", "namespace", "[", "0", "]", ":", "return", "\"%s_%s\"", "%", "(", "namespace", "[", "0", "]", ",", "value", ")", "else", ":", "return", "\"pyuri_%s_%s\"", "%", "(", "base64", ".", "b64encode", "(", "bytes", "(", "namespace", "[", "1", "]", ",", "\"utf-8\"", ")", ")", ".", "decode", "(", ")", ",", "value", ")" ]
Formats a namespace and ending value into a python friendly format args: namespace: RdfNamespace or tuple in the format of (prefix, uri,) value: end value to attach to the namespace
[ "Formats", "a", "namespace", "and", "ending", "value", "into", "a", "python", "friendly", "format" ]
9ec32dcc4bed51650a4b392cc5c15100fef7923a
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/datatypes/uriformatters.py#L38-L50
250,498
dlancer/django-pages-cms
pages/models/pagecontenttypes.py
PageMetaContent.robots
def robots(self): """Return values for robots html meta key""" r = 'noindex' if self.is_noindex else 'index' r += ',' r += 'nofollow' if self.is_nofollow else 'follow' return r
python
def robots(self): """Return values for robots html meta key""" r = 'noindex' if self.is_noindex else 'index' r += ',' r += 'nofollow' if self.is_nofollow else 'follow' return r
[ "def", "robots", "(", "self", ")", ":", "r", "=", "'noindex'", "if", "self", ".", "is_noindex", "else", "'index'", "r", "+=", "','", "r", "+=", "'nofollow'", "if", "self", ".", "is_nofollow", "else", "'follow'", "return", "r" ]
Return values for robots html meta key
[ "Return", "values", "for", "robots", "html", "meta", "key" ]
441fad674d5ad4f6e05c953508950525dc0fa789
https://github.com/dlancer/django-pages-cms/blob/441fad674d5ad4f6e05c953508950525dc0fa789/pages/models/pagecontenttypes.py#L115-L120
250,499
rackerlabs/rackspace-python-neutronclient
neutronclient/neutron/v2_0/fw/firewallrule.py
_add_common_args
def _add_common_args(parser, is_create=True): """If is_create is True, protocol and action become mandatory arguments. CreateCommand = is_create : True UpdateCommand = is_create : False """ parser.add_argument( '--name', help=_('Name for the firewall rule.')) parser.add_argument( '--description', help=_('Description for the firewall rule.')) parser.add_argument( '--source-ip-address', help=_('Source IP address or subnet.')) parser.add_argument( '--destination-ip-address', help=_('Destination IP address or subnet.')) parser.add_argument( '--source-port', help=_('Source port (integer in [1, 65535] or range in a:b).')) parser.add_argument( '--destination-port', help=_('Destination port (integer in [1, 65535] or range in ' 'a:b).')) utils.add_boolean_argument( parser, '--enabled', dest='enabled', help=_('Whether to enable or disable this rule.')) parser.add_argument( '--protocol', choices=['tcp', 'udp', 'icmp', 'any'], required=is_create, type=utils.convert_to_lowercase, help=_('Protocol for the firewall rule.')) parser.add_argument( '--action', required=is_create, type=utils.convert_to_lowercase, choices=['allow', 'deny', 'reject'], help=_('Action for the firewall rule.'))
python
def _add_common_args(parser, is_create=True): """If is_create is True, protocol and action become mandatory arguments. CreateCommand = is_create : True UpdateCommand = is_create : False """ parser.add_argument( '--name', help=_('Name for the firewall rule.')) parser.add_argument( '--description', help=_('Description for the firewall rule.')) parser.add_argument( '--source-ip-address', help=_('Source IP address or subnet.')) parser.add_argument( '--destination-ip-address', help=_('Destination IP address or subnet.')) parser.add_argument( '--source-port', help=_('Source port (integer in [1, 65535] or range in a:b).')) parser.add_argument( '--destination-port', help=_('Destination port (integer in [1, 65535] or range in ' 'a:b).')) utils.add_boolean_argument( parser, '--enabled', dest='enabled', help=_('Whether to enable or disable this rule.')) parser.add_argument( '--protocol', choices=['tcp', 'udp', 'icmp', 'any'], required=is_create, type=utils.convert_to_lowercase, help=_('Protocol for the firewall rule.')) parser.add_argument( '--action', required=is_create, type=utils.convert_to_lowercase, choices=['allow', 'deny', 'reject'], help=_('Action for the firewall rule.'))
[ "def", "_add_common_args", "(", "parser", ",", "is_create", "=", "True", ")", ":", "parser", ".", "add_argument", "(", "'--name'", ",", "help", "=", "_", "(", "'Name for the firewall rule.'", ")", ")", "parser", ".", "add_argument", "(", "'--description'", ",", "help", "=", "_", "(", "'Description for the firewall rule.'", ")", ")", "parser", ".", "add_argument", "(", "'--source-ip-address'", ",", "help", "=", "_", "(", "'Source IP address or subnet.'", ")", ")", "parser", ".", "add_argument", "(", "'--destination-ip-address'", ",", "help", "=", "_", "(", "'Destination IP address or subnet.'", ")", ")", "parser", ".", "add_argument", "(", "'--source-port'", ",", "help", "=", "_", "(", "'Source port (integer in [1, 65535] or range in a:b).'", ")", ")", "parser", ".", "add_argument", "(", "'--destination-port'", ",", "help", "=", "_", "(", "'Destination port (integer in [1, 65535] or range in '", "'a:b).'", ")", ")", "utils", ".", "add_boolean_argument", "(", "parser", ",", "'--enabled'", ",", "dest", "=", "'enabled'", ",", "help", "=", "_", "(", "'Whether to enable or disable this rule.'", ")", ")", "parser", ".", "add_argument", "(", "'--protocol'", ",", "choices", "=", "[", "'tcp'", ",", "'udp'", ",", "'icmp'", ",", "'any'", "]", ",", "required", "=", "is_create", ",", "type", "=", "utils", ".", "convert_to_lowercase", ",", "help", "=", "_", "(", "'Protocol for the firewall rule.'", ")", ")", "parser", ".", "add_argument", "(", "'--action'", ",", "required", "=", "is_create", ",", "type", "=", "utils", ".", "convert_to_lowercase", ",", "choices", "=", "[", "'allow'", ",", "'deny'", ",", "'reject'", "]", ",", "help", "=", "_", "(", "'Action for the firewall rule.'", ")", ")" ]
If is_create is True, protocol and action become mandatory arguments. CreateCommand = is_create : True UpdateCommand = is_create : False
[ "If", "is_create", "is", "True", "protocol", "and", "action", "become", "mandatory", "arguments", "." ]
5a5009a8fe078e3aa1d582176669f1b28ab26bef
https://github.com/rackerlabs/rackspace-python-neutronclient/blob/5a5009a8fe078e3aa1d582176669f1b28ab26bef/neutronclient/neutron/v2_0/fw/firewallrule.py#L24-L62