code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def check_perm(self, request, resource): perm_name = self.get_perm_name(resource, request.method) if not self._has_perm(request.user, perm_name): raise errors.Forbidden()
Check permission @param request the HTTP request @param resource the requested resource @raise Forbidden if the user doesn't have access to the resource
def _has_perm(self, user, permission): if user.is_superuser: return True if user.is_active: perms = [perm.split('.')[1] for perm in user.get_all_permissions()] return permission in perms return False
Check whether the user has the given permission @return True if user is granted with access, False if not.
def wash_url_argument(var, new_type): out = [] if new_type == 'list': # return lst if isinstance(var, list): out = var else: out = [var] elif new_type == 'str': # return str if isinstance(var, list): try: out = "%s" % var[0] except: out = "" elif isinstance(var, str): out = var else: out = "%s" % var elif new_type == 'int': # return int if isinstance(var, list): try: out = int(var[0]) except: out = 0 elif isinstance(var, (int, long)): out = var elif isinstance(var, str): try: out = int(var) except: out = 0 else: out = 0 elif new_type == 'tuple': # return tuple if isinstance(var, tuple): out = var else: out = (var, ) elif new_type == 'dict': # return dictionary if isinstance(var, dict): out = var else: out = {0: var} return out
Wash argument into 'new_type', that can be 'list', 'str', 'int', 'tuple' or 'dict'. If needed, the check 'type(var) is not None' should be done before calling this function. @param var: variable value @param new_type: variable type, 'list', 'str', 'int', 'tuple' or 'dict' @return: as much as possible, value var as type new_type If var is a list, will change first element into new_type. If int check unsuccessful, returns 0
def is_local_url(target): ref_url = urlparse(cfg.get('CFG_SITE_SECURE_URL')) test_url = urlparse(urljoin(cfg.get('CFG_SITE_SECURE_URL'), target)) return test_url.scheme in ('http', 'https') and \ ref_url.netloc == test_url.netloc
Determine if URL is a local.
def get_safe_redirect_target(arg='next'): for target in request.args.get(arg), request.referrer: if not target: continue if is_local_url(target): return target return None
Get URL to redirect to and ensure that it is local.
def rewrite_to_secure_url(url, secure_base=None): if secure_base is None: secure_base = cfg.get('CFG_SITE_SECURE_URL') url_parts = list(urlparse(url)) url_secure_parts = urlparse(secure_base) url_parts[0] = url_secure_parts[0] url_parts[1] = url_secure_parts[1] return urlunparse(url_parts)
Rewrite URL to a Secure URL @param url URL to be rewritten to a secure URL. @param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL).
def get_referer(req, replace_ampersands=False): try: referer = req.headers_in['Referer'] if replace_ampersands == 1: return referer.replace('&', '&') return referer except KeyError: return ''
Return the referring page of a request. Referer (wikipedia): Referer is a common misspelling of the word "referrer"; so common, in fact, that it made it into the official specification of HTTP. When visiting a webpage, the referer or referring page is the URL of the previous webpage from which a link was followed. @param req: request @param replace_ampersands: if 1, replace & by & in url (correct HTML cannot contain & characters alone)
def make_canonical_urlargd(urlargd, default_urlargd): canonical = drop_default_urlargd(urlargd, default_urlargd) if canonical: return '?' + urlencode(canonical, doseq=True) # FIXME double escaping of '&'? .replace('&', '&') return ''
Build up the query part of an URL from the arguments passed in the 'urlargd' dictionary. 'default_urlargd' is a secondary dictionary which contains tuples of the form (type, default value) for the query arguments (this is the same dictionary as the one you can pass to webinterface_handler.wash_urlargd). When a query element has its default value, it is discarded, so that the simplest (canonical) url query is returned. The result contains the initial '?' if there are actual query items remaining.
def create_html_link(urlbase, urlargd, link_label, linkattrd=None, escape_urlargd=True, escape_linkattrd=True, urlhash=None): attributes_separator = ' ' output = '<a href="' + \ create_url(urlbase, urlargd, escape_urlargd, urlhash) + '"' if linkattrd: output += ' ' if escape_linkattrd: attributes = [escape(str(key), quote=True) + '="' + escape(str(linkattrd[key]), quote=True) + '"' for key in linkattrd.keys()] else: attributes = [str(key) + '="' + str(linkattrd[key]) + '"' for key in linkattrd.keys()] output += attributes_separator.join(attributes) output = wash_for_utf8(output) output += '>' + wash_for_utf8(link_label) + '</a>' return output
Creates a W3C compliant link. @param urlbase: base url (e.g. config.CFG_SITE_URL/search) @param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}) @param link_label: text displayed in a browser (has to be already escaped) @param linkattrd: dictionary of attributes (e.g. a={'class': 'img'}) @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes &lt; or " becomes &quot;) @param escape_linkattrd: boolean indicating if the function should escape attributes (e.g. < becomes &lt; or " becomes &quot;) @param urlhash: hash string to add at the end of the link
def string_to_numeric_char_reference(string): out = "" for char in string: out += "&#" + str(ord(char)) + ";" return out
Encode a string to HTML-compatible numeric character reference. Eg: encode_html_entities("abc") == '&#97;&#98;&#99;'
def get_canonical_and_alternates_urls( url, drop_ln=True, washed_argd=None, quote_path=False): dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse( url) canonical_scheme, canonical_netloc = urlparse(cfg.get('CFG_SITE_URL'))[0:2] parsed_query = washed_argd or parse_qsl(query) no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != 'ln'] if drop_ln: canonical_parsed_query = no_ln_parsed_query else: canonical_parsed_query = parsed_query if quote_path: path = urllib.quote(path) canonical_query = urlencode(canonical_parsed_query) canonical_url = urlunparse( (canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment)) alternate_urls = {} for ln in cfg.get('CFG_SITE_LANGS'): alternate_query = urlencode(no_ln_parsed_query + [('ln', ln)]) alternate_url = urlunparse( (canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment)) alternate_urls[ln] = alternate_url return canonical_url, alternate_urls
Given an Invenio URL returns a tuple with two elements. The first is the canonical URL, that is the original URL with CFG_SITE_URL prefix, and where the ln= argument stripped. The second element element is mapping, language code -> alternate URL @param quote_path: if True, the path section of the given C{url} is quoted according to RFC 2396
def create_url(urlbase, urlargd, escape_urlargd=True, urlhash=None): separator = '&amp;' output = urlbase if urlargd: output += '?' if escape_urlargd: arguments = [escape(quote(str(key)), quote=True) + '=' + escape(quote(str(urlargd[key])), quote=True) for key in urlargd.keys()] else: arguments = [str(key) + '=' + str(urlargd[key]) for key in urlargd.keys()] output += separator.join(arguments) if urlhash: output += "#" + escape(quote(str(urlhash))) return output
Creates a W3C compliant URL. Output will look like this: 'urlbase?param1=value1&amp;param2=value2' @param urlbase: base url (e.g. config.CFG_SITE_URL/search) @param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'} @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes &lt; or " becomes &quot;) @param urlhash: hash string to add at the end of the link
def same_urls_p(a, b): ua = list(urlparse(a)) ub = list(urlparse(b)) ua[4] = parse_qs(ua[4]) ub[4] = parse_qs(ub[4]) return ua == ub
Compare two URLs, ignoring reorganizing of query arguments
def urlargs_replace_text_in_arg(urlargs, regexp_argname, text_old, text_new): out = "" # parse URL arguments into a dictionary: urlargsdict = parse_qs(urlargs) # construct new URL arguments: urlargsdictnew = {} for key in urlargsdict.keys(): if re.match(regexp_argname, key): # replace `arg' by new values urlargsdictnew[key] = [] for parg in urlargsdict[key]: urlargsdictnew[key].append(parg.replace(text_old, text_new)) else: # keep old values urlargsdictnew[key] = urlargsdict[key] # build new URL for this word: for key in urlargsdictnew.keys(): for val in urlargsdictnew[key]: out += "&amp;" + key + "=" + quote_plus(val, '') if out.startswith("&amp;"): out = out[5:] return out
Analyze `urlargs' (URL CGI GET query arguments in string form) and for each occurrence of argument matching `regexp_argname' replace every substring `text_old' by `text_new'. Return the resulting new URL. Used to be used for search engine's create_nearest_terms_box, now it is not used there anymore. It is left here in case it will become possibly useful later.
def make_user_agent_string(component=None): ret = "Invenio-%s (+%s; \"%s\")" % (cfg.get('CFG_VERSION'), cfg.get('CFG_SITE_URL'), cfg.get('CFG_SITE_NAME')) if component: ret += " %s" % component return ret
Return a nice and uniform user-agent string to be used when Invenio act as a client in HTTP requests.
def make_invenio_opener(component=None): opener = urllib2.build_opener() opener.addheaders = [('User-agent', make_user_agent_string(component))] return opener
Return an urllib2 opener with the useragent already set in the appropriate way.
def auto_version_url(file_path): file_md5 = "" try: file_md5 = md5(open(cfg.get('CFG_WEBDIR') + os.sep + file_path).read()).hexdigest() except IOError: pass return file_path + "?%s" % file_md5
Appends modification time of the file to the request URL in order for the browser to refresh the cache when file changes @param file_path: path to the file, e.g js/foo.js @return: file_path with modification time appended to URL
def get_relative_url(url): # remove any protocol info before stripped_site_url = url.replace("://", "") baseurl = "/" + "/".join(stripped_site_url.split("/")[1:]) # remove any trailing slash ("/") if baseurl[-1] == "/": return baseurl[:-1] else: return baseurl
Returns the relative URL from a URL. For example: 'http://web.net' -> '' 'http://web.net/' -> '' 'http://web.net/1222' -> '/1222' 'http://web.net/wsadas/asd' -> '/wsadas/asd' It will never return a trailing "/". @param url: A url to transform @type url: str @return: relative URL
def function_arg_count(fn): assert callable(fn), 'function_arg_count needed a callable function, not {0}'.format(repr(fn)) if hasattr(fn, '__code__') and hasattr(fn.__code__, 'co_argcount'): return fn.__code__.co_argcount else: return 1
returns how many arguments a funciton has
def map(*args): functions_to_apply = [i for i in args if callable(i)] iterables_to_run = [i for i in args if not callable(i)] #print('functions_to_apply:',functions_to_apply) #print('iterables_to_run:',iterables_to_run) assert len(functions_to_apply)>0, 'at least one function needs to be given to map' assert len(iterables_to_run)>0, 'no iterables were given to map' # check for native map usage if len(functions_to_apply) == 1 and len(iterables_to_run) >= 1 and function_arg_count(*functions_to_apply)==1: if hasattr(iter([]), '__next__'): # if python 3 return __builtins__.map(functions_to_apply[0], *iterables_to_run) else: return iter(__builtins__.map(functions_to_apply[0], *iterables_to_run)) # ---------------------------- new logic below ---------------------------- # logic for a single function elif len(functions_to_apply) == 1: fn = functions_to_apply[0] # if there is a single iterable, chop it up if len(iterables_to_run) == 1: return (fn(*i) for i in window(iterables_to_run[0], function_arg_count(functions_to_apply[0]))) # logic for more than 1 function elif len(functions_to_apply) > 1 and len(iterables_to_run) == 1: return multi_ops(*(iterables_to_run + functions_to_apply)) else: raise ValueError('invalid usage of map()')
this map works just like the builtin.map, except, this one you can also: - give it multiple functions to map over an iterable - give it a single function with multiple arguments to run a window based map operation over an iterable
def merge(left, right, how='inner', key=None, left_key=None, right_key=None, left_as='left', right_as='right'): return join(left, right, how, key, left_key, right_key, join_fn=make_union_join(left_as, right_as))
Performs a join using the union join function.
def join(left, right, how='inner', key=None, left_key=None, right_key=None, join_fn=tuple_join): if key is None and (left_key is None or right_key is None): raise ValueError("Must provide either key param or both left_key and right_key") if key is not None: lkey = rkey = key if callable(key) else make_key_fn(key) else: lkey = left_key if callable(left_key) else make_key_fn(left_key) rkey = right_key if callable(right_key) else make_key_fn(right_key) try: join_impl = { "left": _left_join, "right": _right_join, "inner": _inner_join, "outer": _outer_join, }[how] except KeyError: raise ValueError("Invalid value for how: {}, must be left, right, " "inner, or outer.".format(str(how))) else: return join_impl(left, right, lkey, rkey, join_fn)
:param left: left iterable to be joined :param right: right iterable to be joined :param str | function key: either an attr name, dict key, or function that produces hashable value :param how: 'inner', 'left', 'right', or 'outer' :param join_fn: function called on joined left and right iterable items to complete join :rtype: list
def _inner_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): joiner = defaultdict(list) for ele in right: joiner[right_key_fn(ele)].append(ele) joined = [] for ele in left: for other in joiner[left_key_fn(ele)]: joined.append(join_fn(ele, other)) return joined
Inner join using left and right key functions :param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list
def _right_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): def reversed_join_fn(left_ele, right_ele): return join_fn(right_ele, left_ele) return _left_join(right, left, right_key_fn, left_key_fn, reversed_join_fn)
:param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list
def _outer_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): left_joiner = defaultdict(list) for ele in left: left_joiner[left_key_fn(ele)].append(ele) right_joiner = defaultdict(list) for ele in right: right_joiner[right_key_fn(ele)].append(ele) keys = set(left_joiner.keys()).union(set(right_joiner.keys())) def iter_join(l, r, join_keys): for join_key in join_keys: for ele in l.get(join_key, [None]): for other in r.get(join_key, [None]): yield join_fn(ele, other) return list(iter_join(left_joiner, right_joiner, keys))
:param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list
def group(iterable, key=lambda ele: ele): if callable(key): return _group(iterable, key) else: return _group(iterable, make_key_fn(key))
Groups an iterable by a specified attribute, or using a specified key access function. Returns tuples of grouped elements. >>> dogs = [Dog('gatsby', 'Rruff!', 15), Dog('william', 'roof', 12), Dog('edward', 'hi', 15)] >>> groupby(dogs, 'weight') [(Dog('gatsby', 'Rruff!', 15), Dog('edward', 'hi', 15)), (Dog('william', 'roof', 12), )] :param iterable: iterable to be grouped :param key: a key-access function or attr name to be used as a group key
def trigger_keyphrases( text = None, # input text to parse keyphrases = None, # keyphrases for parsing input text response = None, # optional text response on trigger function = None, # optional function on trigger kwargs = None, # optional function keyword arguments confirm = False, # optional return of confirmation confirmation_prompt = "Do you want to continue? (y/n)", confirmation_feedback_confirm = "confirm", confirmation_feedback_deny = "deny" ): if any(pattern in text for pattern in keyphrases): if confirm: return confirmation( prompt = confirmation_prompt, feedback_confirm = confirmation_feedback_confirm, feedback_deny = confirmation_feedback_deny, function = function, kwargs = kwargs ) if function and not kwargs: result = function() elif function and kwargs: result = function(**kwargs) else: result = None if response: return response elif not response and result: return str(result) else: return True else: return False
Parse input text for keyphrases. If any keyphrases are found, respond with text or by seeking confirmation or by engaging a function with optional keyword arguments. Return text or True if triggered and return False if not triggered. If confirmation is required, a confirmation object is returned, encapsulating a function and its optional arguments.
def parse_networking( text = None ): try: address = _builtins.address port = _builtins.port except: address = None port = None triggers = [] if address and port: triggers.extend([ trigger_keyphrases( text = text, keyphrases = [ "reverse SSH", "reverse ssh" ], function = engage_command, kwargs = {"command": "ssh -R " + str(port) + ":localhost:22 " + address}, confirm = True, confirmation_prompt = "Do you want to reverse SSH " "connect? (y/n)", confirmation_feedback_confirm = "confirm reverse SSH connect: " "ssh localhost -p " + str(port), confirmation_feedback_deny = "deny reverse SSH connect" ) ]) if any(triggers): responses = [response for response in triggers if response] if len(responses) > 1: return responses else: return responses[0] else: return False
Access address and port parameters via the builtins or __builtin__ module. Relish the nonsense.
def multiparse( text = None, parsers = [parse], help_message = None ): responses = [] for _parser in parsers: response = _parser(text = text) if response is not False: responses.extend(response if response is list else [response]) if not any(responses): if help_message: return help_message else: return False else: if len(responses) > 1: return responses else: return responses[0]
Parse input text by looping over a list of multiple parsers. If one trigger is triggered, return the value returned by that trigger, if multiple triggers are triggered, return a list of the values returned by those triggers. If no triggers are triggered, return False or an optional help message.
def run( self ): if self._function and not self._kwargs: return self._function() if self._function and self._kwargs: return self._function(**self._kwargs)
Engage contained function with optional keyword arguments.
def tax_class_based_on(self, tax_class_based_on): allowed_values = ["shippingAddress", "billingAddress"] # noqa: E501 if tax_class_based_on is not None and tax_class_based_on not in allowed_values: raise ValueError( "Invalid value for `tax_class_based_on` ({0}), must be one of {1}" # noqa: E501 .format(tax_class_based_on, allowed_values) ) self._tax_class_based_on = tax_class_based_on
Sets the tax_class_based_on of this TaxSettings. :param tax_class_based_on: The tax_class_based_on of this TaxSettings. :type: str
def create_fixed_rate_shipping(cls, fixed_rate_shipping, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_fixed_rate_shipping_with_http_info(fixed_rate_shipping, **kwargs) else: (data) = cls._create_fixed_rate_shipping_with_http_info(fixed_rate_shipping, **kwargs) return data
Create FixedRateShipping Create a new FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_fixed_rate_shipping(fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to create (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread.
def delete_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) else: (data) = cls._delete_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) return data
Delete FixedRateShipping Delete an instance of FixedRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_fixed_rate_shipping_by_id(fixed_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) else: (data) = cls._get_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, **kwargs) return data
Find FixedRateShipping Return single instance of FixedRateShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_fixed_rate_shipping_by_id(fixed_rate_shipping_id, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to return (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread.
def list_all_fixed_rate_shippings(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_fixed_rate_shippings_with_http_info(**kwargs) else: (data) = cls._list_all_fixed_rate_shippings_with_http_info(**kwargs) return data
List FixedRateShippings Return a list of FixedRateShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_fixed_rate_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FixedRateShipping] If the method is called asynchronously, returns the request thread.
def replace_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, fixed_rate_shipping, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) else: (data) = cls._replace_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) return data
Replace FixedRateShipping Replace all attributes of FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_fixed_rate_shipping_by_id(fixed_rate_shipping_id, fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to replace (required) :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to replace (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread.
def update_fixed_rate_shipping_by_id(cls, fixed_rate_shipping_id, fixed_rate_shipping, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) else: (data) = cls._update_fixed_rate_shipping_by_id_with_http_info(fixed_rate_shipping_id, fixed_rate_shipping, **kwargs) return data
Update FixedRateShipping Update attributes of FixedRateShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_fixed_rate_shipping_by_id(fixed_rate_shipping_id, fixed_rate_shipping, async=True) >>> result = thread.get() :param async bool :param str fixed_rate_shipping_id: ID of fixedRateShipping to update. (required) :param FixedRateShipping fixed_rate_shipping: Attributes of fixedRateShipping to update. (required) :return: FixedRateShipping If the method is called asynchronously, returns the request thread.
def create_wish_list(cls, wish_list, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_wish_list_with_http_info(wish_list, **kwargs) else: (data) = cls._create_wish_list_with_http_info(wish_list, **kwargs) return data
Create WishList Create a new WishList This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_wish_list(wish_list, async=True) >>> result = thread.get() :param async bool :param WishList wish_list: Attributes of wishList to create (required) :return: WishList If the method is called asynchronously, returns the request thread.
def delete_wish_list_by_id(cls, wish_list_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_wish_list_by_id_with_http_info(wish_list_id, **kwargs) else: (data) = cls._delete_wish_list_by_id_with_http_info(wish_list_id, **kwargs) return data
Delete WishList Delete an instance of WishList by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_wish_list_by_id(wish_list_id, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_wish_list_by_id(cls, wish_list_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_wish_list_by_id_with_http_info(wish_list_id, **kwargs) else: (data) = cls._get_wish_list_by_id_with_http_info(wish_list_id, **kwargs) return data
Find WishList Return single instance of WishList by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_wish_list_by_id(wish_list_id, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to return (required) :return: WishList If the method is called asynchronously, returns the request thread.
def list_all_wish_lists(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_wish_lists_with_http_info(**kwargs) else: (data) = cls._list_all_wish_lists_with_http_info(**kwargs) return data
List WishLists Return a list of WishLists This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_wish_lists(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[WishList] If the method is called asynchronously, returns the request thread.
def replace_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) else: (data) = cls._replace_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) return data
Replace WishList Replace all attributes of WishList This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_wish_list_by_id(wish_list_id, wish_list, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to replace (required) :param WishList wish_list: Attributes of wishList to replace (required) :return: WishList If the method is called asynchronously, returns the request thread.
def update_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) else: (data) = cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs) return data
Update WishList Update attributes of WishList This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True) >>> result = thread.get() :param async bool :param str wish_list_id: ID of wishList to update. (required) :param WishList wish_list: Attributes of wishList to update. (required) :return: WishList If the method is called asynchronously, returns the request thread.
def task(arg = None): # make sure stdout is patched if not hasattr(sys.stdout, 'indent_level'): sys.stdout = IndentedFile(sys.stdout) def decorator(base): info = ': ' + arg if type(arg) is str else '' header = fore.green('** ' + fore.cyan(base.__name__) + info) def func(*args, **kwargs): sys.stdout.indent_level += 1 puts(header) base(*args, **kwargs) sys.stdout.indent_level -= 1 params = inspect.formatargspec(*inspect.getargspec(base))[1:-1] specformat = fore.cyan('%s') + ' ' + fore.white('%s') func._task = True func._spec = specformat % (base.__name__, params) func._desc = re.sub('\s+', ' ', inspect.getdoc(base) or '') return func if type(arg) == types.FunctionType: return decorator(arg) else: return decorator
Task decorator
def recurse_up(directory, filename): directory = osp.abspath(directory) while True: searchfile = osp.join(directory, filename) if osp.isfile(searchfile): return directory if directory == '/': break else: directory = osp.dirname(directory) return False
Recursive walk a directory up to root until it contains `filename`
def etree_to_dict(tree): d = {tree.tag.split('}')[1]: map( etree_to_dict, tree.iterchildren() ) or tree.text} return d
Translate etree into dictionary. :param tree: etree dictionary object :type tree: <http://lxml.de/api/lxml.etree-module.html>
def csv( self, filepath=None ): self.log.debug('starting the ``csv`` method') renderedData = self._list_of_dictionaries_to_csv("machine") if filepath and renderedData != "NO MATCH": # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData
*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,"belfast, uk" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv("/path/to/myfile.csv")
def json( self, filepath=None ): self.log.debug('starting the ``json`` method') dataCopy = copy.deepcopy(self.listOfDictionaries) for d in dataCopy: for k, v in d.iteritems(): if isinstance(v, datetime): d[k] = v.strftime("%Y%m%dt%H%M%S") renderedData = json.dumps( dataCopy, separators=(',', ': '), sort_keys=True, indent=4 ) if filepath and len(self.listOfDictionaries): # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``json`` method') return renderedData
*Render the data in json format* **Key Arguments:** - ``filepath`` -- path to the file to write the json content to. Default *None* **Return:** - ``renderedData`` -- the data rendered as json **Usage:** To render the data set as json: .. code-block:: python print dataSet.json() .. code-block:: json [ { "address": "belfast, uk", "owner": "daisy", "pet": "dog" }, { "address": "the moon", "owner": "john", "pet": "snake" }, { "address": "larne", "owner": "susan", "pet": "crocodile" } ] and to save the json rendering to file: .. code-block:: python dataSet.json("/path/to/myfile.json")
def yaml( self, filepath=None ): self.log.debug('starting the ``yaml`` method') dataCopy = [] dataCopy[:] = [dict(l) for l in self.listOfDictionaries] renderedData = yaml.dump(dataCopy, default_flow_style=False) if filepath and len(self.listOfDictionaries): # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) stream = file(filepath, 'w') yaml.dump(dataCopy, stream, default_flow_style=False) stream.close() self.log.debug('completed the ``yaml`` method') return renderedData
*Render the data in yaml format* **Key Arguments:** - ``filepath`` -- path to the file to write the yaml content to. Default *None* **Return:** - ``renderedData`` -- the data rendered as yaml **Usage:** To render the data set as yaml: .. code-block:: python print dataSet.yaml() .. code-block:: yaml - address: belfast, uk owner: daisy pet: dog - address: the moon owner: john pet: snake - address: larne owner: susan pet: crocodile and to save the yaml rendering to file: .. code-block:: python dataSet.json("/path/to/myfile.yaml")
def _list_of_dictionaries_to_mysql_inserts( self, tableName, createStatement=None): self.log.debug( 'completed the ````_list_of_dictionaries_to_mysql_inserts`` function') if not len(self.listOfDictionaries): return "NO MATCH" dataCopy = copy.deepcopy(self.listOfDictionaries) if createStatement: output = createStatement + "\n" else: output = "" inserts = [] inserts = [] inserts[:] = [convert_dictionary_to_mysql_table(log=self.log, dictionary=d, dbTableName=tableName, uniqueKeyList=[ ], dateModified=False, returnInsertOnly=True, replace=True, batchInserts=False, reDatetime=self.reDatetime) for d in dataCopy] output += ";\n".join(inserts) + ";" self.log.debug( 'completed the ``_list_of_dictionaries_to_mysql_inserts`` function') return output
Convert a python list of dictionaries to pretty csv output **Key Arguments:** - ``tableName`` -- the name of the table to create the insert statements for - ``createStatement`` -- add this create statement to the top of the file. Will only be executed if no table of that name exists in database. Default *None* **Return:** - ``output`` -- the mysql insert statements (as a string)
def create_payment_token(cls, payment_token, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_payment_token_with_http_info(payment_token, **kwargs) else: (data) = cls._create_payment_token_with_http_info(payment_token, **kwargs) return data
Create PaymentToken Create a new PaymentToken This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_payment_token(payment_token, async=True) >>> result = thread.get() :param async bool :param PaymentToken payment_token: Attributes of paymentToken to create (required) :return: PaymentToken If the method is called asynchronously, returns the request thread.
def delete_payment_token_by_id(cls, payment_token_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_payment_token_by_id_with_http_info(payment_token_id, **kwargs) else: (data) = cls._delete_payment_token_by_id_with_http_info(payment_token_id, **kwargs) return data
Delete PaymentToken Delete an instance of PaymentToken by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_token_by_id(payment_token_id, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_payment_token_by_id(cls, payment_token_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs) else: (data) = cls._get_payment_token_by_id_with_http_info(payment_token_id, **kwargs) return data
Find PaymentToken Return single instance of PaymentToken by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payment_token_by_id(payment_token_id, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to return (required) :return: PaymentToken If the method is called asynchronously, returns the request thread.
def list_all_payment_tokens(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_payment_tokens_with_http_info(**kwargs) else: (data) = cls._list_all_payment_tokens_with_http_info(**kwargs) return data
List PaymentTokens Return a list of PaymentTokens This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_payment_tokens(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[PaymentToken] If the method is called asynchronously, returns the request thread.
def replace_payment_token_by_id(cls, payment_token_id, payment_token, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) else: (data) = cls._replace_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) return data
Replace PaymentToken Replace all attributes of PaymentToken This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_token_by_id(payment_token_id, payment_token, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to replace (required) :param PaymentToken payment_token: Attributes of paymentToken to replace (required) :return: PaymentToken If the method is called asynchronously, returns the request thread.
def update_payment_token_by_id(cls, payment_token_id, payment_token, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) else: (data) = cls._update_payment_token_by_id_with_http_info(payment_token_id, payment_token, **kwargs) return data
Update PaymentToken Update attributes of PaymentToken This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_payment_token_by_id(payment_token_id, payment_token, async=True) >>> result = thread.get() :param async bool :param str payment_token_id: ID of paymentToken to update. (required) :param PaymentToken payment_token: Attributes of paymentToken to update. (required) :return: PaymentToken If the method is called asynchronously, returns the request thread.
def split_in_columns(filterform, fields_per_column=None): ''' Return iterator that yields a column (iterator too). By default, flat field list is divided in columns with fields_per_column elements in each (fields_per_column is a class attribute). ''' nfields = len(filterform.fields) if fields_per_column is None: fields_per_column = filterform.fields_per_column ncolumns, tail = divmod(nfields, fields_per_column) if tail > 0: ncolumns += 1 itr = iter(filterform) for _i in range(ncolumns): yield itertools.islice(itr, fields_per_columnf split_in_columns(filterform, fields_per_column=None): ''' Return iterator that yields a column (iterator too). By default, flat field list is divided in columns with fields_per_column elements in each (fields_per_column is a class attribute). ''' nfields = len(filterform.fields) if fields_per_column is None: fields_per_column = filterform.fields_per_column ncolumns, tail = divmod(nfields, fields_per_column) if tail > 0: ncolumns += 1 itr = iter(filterform) for _i in range(ncolumns): yield itertools.islice(itr, fields_per_column)
Return iterator that yields a column (iterator too). By default, flat field list is divided in columns with fields_per_column elements in each (fields_per_column is a class attribute).
def dispatch_event(self, event: "Event") -> None: # Set the target of the event if it doesn't have one already. It could happen that # we are simply redispatching an event. if event.target is None: event.set_target(self) listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event.type) if listeners is None: return for listener in listeners: listener(event)
Dispatches the given event. It is the duty of this method to set the target of the dispatched event by calling `event.set_target(self)`. Args: event (Event): The event to dispatch. Must not be `None`. Raises: TypeError: If the event is `None` or its type is incorrect.
def remove_event_listener(self, event_type: str, event_handler: types.MethodType) -> None: # TODO: we should also accept types.FunctionType, # don't forget the documentation here and in the interface. if not isinstance(event_type, str) or event_type == "" or\ not isinstance(event_handler, types.MethodType): raise ValueError("Invalid arguments: {}, {}".format(event_type, event_handler)) listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event_type) listener: types.MethodType = None if listeners is None else listeners.get(event_handler) if listener is not None: del listeners[event_handler]
Removes the given event listener registered on the dispatcher for the given event type. Args: event_type (str): The type of the event to remove the event handler from. Must not be `None` or empty string. event_handler (types.MethodType): The event handler to remove from the given event type of the dispatcher. Must not be `None`. Raises: ValueError: If any of the parameters are invalid.
def set_target(self, target: EventDispatcherBase) -> None: if self._target is not None: raise PermissionError("The target property already has a valid value.") if not isinstance(target, EventDispatcherBase): raise TypeError("Invalid target type: {}".format(target)) self._target = target
This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance.
def download_url(url, content_type=None, download_to_file=None, retry_count=10, timeout=10.0): if not download_to_file: download_to_file = safe_mkstemp(suffix=".tmp", prefix="filedownloadutils_") try: if is_url_a_local_file(url): downloaded_file = download_local_file(url, download_to_file) else: downloaded_file = download_external_url(url, download_to_file, content_type=content_type, retry_count=retry_count, timeout=timeout) except InvenioFileDownloadError: raise return downloaded_file
Will download a file from given URL (either local or external) to the desired path (or generate one if none is given). Local files are copied directly. The function will retry a number of times based on retry_count (default 10) parameter and sleeps a number of seconds based on given timeout (default 10.0 sec) after each failed request. Returns the path to the downloaded file if successful. Otherwise an exception is raised. Given a content_type and an external URL, the function will make sure that the desired content_type is equal to the content-type of returned file. @param url: where the file lives on the interwebs @type url: string @param content_type: desired content_type to check for in external URLs. (optional) @type content_type: string @param download_to_file: where the file should live after download. (optional) @type download_to_file: string @param retry_count: number of times to retry. Defaults to 10. (optional) @type retry_count: int @param timeout: number of seconds to sleep between attempts. Defaults to 10.0 seconds. (optional) @type timeout: float @return: the path of the downloaded/copied file @raise InvenioFileDownloadError: raised upon URL/HTTP errors, file errors or wrong format
def finalize_download(url, download_to_file, content_type, request): # If format is given, a format check is performed. if content_type and content_type not in request.headers['content-type']: msg = 'The downloaded file is not of the desired format' raise InvenioFileDownloadError(msg) # Save the downloaded file to desired or generated location. to_file = open(download_to_file, 'w') try: try: while True: block = request.read(CFG_FILEUTILS_BLOCK_SIZE) if not block: break to_file.write(block) except Exception as e: msg = "Error when downloading %s into %s: %s" % \ (url, download_to_file, e) raise InvenioFileDownloadError(msg) finally: to_file.close() # Check Size filesize = os.path.getsize(download_to_file) if filesize == 0: raise InvenioFileDownloadError("%s seems to be empty" % (url,)) # download successful, return the new path return download_to_file
Finalizes the download operation by doing various checks, such as format type, size check etc.
def download_local_file(filename, download_to_file): # Try to copy. try: path = urllib2.urlparse.urlsplit(urllib.unquote(filename))[2] if os.path.abspath(path) != path: msg = "%s is not a normalized path (would be %s)." \ % (path, os.path.normpath(path)) raise InvenioFileCopyError(msg) allowed_path_list = current_app.config.get( 'CFG_BIBUPLOAD_FFT_ALLOWED_LOCAL_PATHS', [] ) allowed_path_list.append(current_app.config['CFG_TMPSHAREDDIR']) for allowed_path in allowed_path_list: if path.startswith(allowed_path): shutil.copy(path, download_to_file) if os.path.getsize(download_to_file) == 0: os.remove(download_to_file) msg = "%s seems to be empty" % (filename,) raise InvenioFileCopyError(msg) break else: msg = "%s is not in one of the allowed paths." % (path,) raise InvenioFileCopyError() except Exception as e: msg = "Impossible to copy the local file '%s' to %s: %s" % \ (filename, download_to_file, str(e)) raise InvenioFileCopyError(msg) return download_to_file
Copies a local file to Invenio's temporary directory. @param filename: the name of the file to copy @type filename: string @param download_to_file: the path to save the file to @type download_to_file: string @return: the path of the temporary file created @rtype: string @raise StandardError: if something went wrong
def safe_mkstemp(suffix, prefix='filedownloadutils_'): tmpfd, tmppath = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=current_app.config['CFG_TMPSHAREDDIR'] ) # Close the file and leave the responsability to the client code to # correctly open/close it. os.close(tmpfd) if '.' not in suffix: # Just in case format is empty return tmppath while '.' in os.path.basename(tmppath)[:-len(suffix)]: os.remove(tmppath) tmpfd, tmppath = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=current_app.config['CFG_TMPSHAREDDIR'] ) os.close(tmpfd) return tmppath
Create a temporary filename that don't have any '.' inside a part from the suffix.
def open_url(url, headers=None): request = urllib2.Request(url) if headers: for key, value in headers.items(): request.add_header(key, value) return URL_OPENER.open(request)
Opens a URL. If headers are passed as argument, no check is performed and the URL will be opened. @param url: the URL to open @type url: string @param headers: the headers to use @type headers: dictionary @return: a file-like object as returned by urllib2.urlopen.
def bulk_log(self, log_message=u"Еще одна пачка обработана", total=None, part_log_time_minutes=5): return BulkLogger(log=self.log, log_message=log_message, total=total, part_log_time_minutes=part_log_time_minutes)
Возвращает инстант логгера для обработки списокв данных :param log_message: То, что будет написано, когда время придет :param total: Общее кол-во объектов, если вы знаете его :param part_log_time_minutes: Раз в какое кол-во минут пытаться писать лог :return: BulkLogger
def db(self, db_alias, shard_key=None): if shard_key is None: shard_key = '' db_key = db_alias + '__' + str(shard_key) if db_key not in self.__db_list: self.__db_list[db_key] = DbQueryService(self, self.__default_headers, {"db_alias": db_alias, "dbAlias": db_alias, "shard_find_key": shard_key, "shardKey": shard_key}) return self.__db_list[db_key]
Получить экземпляр работы с БД :type db_alias: basestring Альяс БД из меты :type shard_key: Любой тип. Некоторый идентификатор, который поможет мете найти нужную шарду. Тип зависи от принимающей стороны :rtype: DbQueryService
def __read_developer_settings(self): self.developer_settings = read_developer_settings() if not self.developer_settings: self.log.warning("НЕ УСТАНОВЛЕНЫ настройки разработчика, это может приводить к проблемам в дальнейшей работе!")
Читает конфигурации разработчика с локальной машины или из переменных окружения При этом переменная окружения приоритетнее :return:
def api_call(self, service, method, data, options): if 'self' in data: # может не быть, если вызывается напрямую из кода, # а не из прослоек типа DbQueryService data.pop("self") if options: data.update(options) _headers = dict(self.__default_headers) if self.auth_user_id: _headers['X-META-AuthUserID'] = str(self.auth_user_id) request = { "url": self.meta_url + "/api/v1/adptools/" + service + "/" + method, "data": json.dumps(data), "headers": _headers, "timeout": (60, 1800) } for _try_idx in range(20): try: resp = requests.post(**request) if resp.status_code == 200: decoded_resp = json.loads(resp.text) if 'data' in decoded_resp: return decoded_resp['data'][method] if 'error' in decoded_resp: if 'details' in decoded_resp['error']: eprint(decoded_resp['error']['details']) raise DbQueryError(decoded_resp['error']) raise UnexpectedError() else: process_meta_api_error_code(resp.status_code, request, resp.text) except (requests.exceptions.ConnectionError, ConnectionError, TimeoutError) as e: self.log.warning('META API Connection Error. Sleep...', {"e": e}) time.sleep(15) except Exception as e: if 'Служба частично или полностью недоступна' in str(e): self.log.warning('META API Connection Error. Sleep...', {"e": e}) time.sleep(15) else: raise e raise ServerError(request)
:type app: metasdk.MetaApp
async def get_json(self, url, timeout=30, astext=False, exceptions=False): try: with async_timeout.timeout(timeout): res = await self._aio_session.get(url) if res.status != 200: _LOGGER.error("QSUSB returned %s [%s]", res.status, url) return None res_text = await res.text() except (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError) as exc: if exceptions: raise exc return None if astext: return res_text try: return json.loads(res_text) except json.decoder.JSONDecodeError: if res_text.strip(" ") == "": return None _LOGGER.error("Could not decode %s [%s]", res_text, url)
Get URL and parse JSON from text.
def stop(self): self._running = False if self._sleep_task: self._sleep_task.cancel() self._sleep_task = None
Stop listening.
def version(self): return self.get_json(URL_VERSION.format(self._url), astext=True)
Get the QS Mobile version.
def listen(self, callback=None): self._running = True self.loop.create_task(self._async_listen(callback))
Start the &listen long poll and return immediately.
async def _async_listen(self, callback=None): while True: if not self._running: return try: packet = await self.get_json( URL_LISTEN.format(self._url), timeout=30, exceptions=True) except asyncio.TimeoutError: continue except aiohttp.client_exceptions.ClientError as exc: _LOGGER.warning("ClientError: %s", exc) self._sleep_task = self.loop.create_task(asyncio.sleep(30)) try: await self._sleep_task except asyncio.CancelledError: pass self._sleep_task = None continue if isinstance(packet, dict) and QS_CMD in packet: _LOGGER.debug("callback( %s )", packet) try: callback(packet) except Exception as err: # pylint: disable=broad-except _LOGGER.error("Exception in callback\nType: %s: %s", type(err), err) else: _LOGGER.debug("unknown packet? %s", packet)
Listen loop.
def set_qs_value(self, qsid, val, success_cb): self.loop.create_task(self.async_set_qs_value(qsid, val, success_cb))
Push state to QSUSB, retry with backoff.
async def async_set_qs_value(self, qsid, val, success_cb=None): set_url = URL_SET.format(self._url, qsid, val) for _repeat in range(1, 6): set_result = await self.get_json(set_url, 2) if set_result and set_result.get('data', 'NO REPLY') != 'NO REPLY': if success_cb: success_cb() return True await asyncio.sleep(0.01 * _repeat) _LOGGER.error("Unable to set %s", set_url) return False
Push state to QSUSB, retry with backoff.
async def update_from_devices(self): res = await self.get_json(URL_DEVICES.format(self._url)) if res: self.devices.update_devices(res) return True return False
Retrieve a list of &devices and values.
def multi_ops(data_stream, *funcs): assert all(callable(func) for func in funcs), 'multi_ops can only apply functions to the first argument' assert len(funcs), 'multi_ops needs at least one function to apply to data_stream' for i in data_stream: if len(funcs) > 1: yield tuple(func(i) for func in funcs) elif len(funcs) == 1: yield funcs[0](i)
fork a generator with multiple operations/functions data_stream - an iterable data structure (ie: list/generator/tuple) funcs - every function that will be applied to the data_stream
def attowiki_distro_path(): attowiki_path = os.path.abspath(__file__) if attowiki_path[-1] != '/': attowiki_path = attowiki_path[:attowiki_path.rfind('/')] else: attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')] return attowiki_path
return the absolute complete path where attowiki is located .. todo:: use pkg_resources ?
def build_command(self): return cron_utils.cronify("crontab -l | {{ cat; echo \"{} {} {} {} {} CJOBID='{}' MAILTO='' {}\"; }} | crontab - > /dev/null".format(self._minute, self._hour, self._day_of_month, self._month_of_year, self._day_of_week, self._jobid, self._command))
Build out the crontab command
def read_environment_file(envfile=None): if envfile is None: frame = sys._getframe() envfile = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env') if not os.path.exists(envfile): warnings.warn("not reading %s - it doesn't exist." % envfile) return for k, v in parse_environment_file(envfile): os.environ.setdefault(k, v)
Read a .env file into os.environ. If not given a path to a envfile path, does filthy magic stack backtracking to find manage.py and then find the envfile.
def infer_format(filename:str) -> str: _, ext = os.path.splitext(filename) return ext
Return extension identifying format of given filename
def reversed_graph(graph:dict) -> dict: ret = defaultdict(set) for node, succs in graph.items(): for succ in succs: ret[succ].add(node) return dict(ret)
Return given graph reversed
def walk(start:list, graphs:iter) -> iter: walked = set([start]) stack = [start] while len(stack) > 0: *stack, curr = stack yield curr succs = it.chain.from_iterable(graph.get(curr, ()) for graph in graphs) for succ in succs: if succ not in walked: walked.add(curr) stack.append(succ)
walk on given graphs, beginning on start. Yield all found nodes, including start. All graph are understood as a single one, with merged keys and values.
def have_cycle(graph:dict) -> frozenset: # topological sort walked = set() # walked nodes nodes = frozenset(it.chain(it.chain.from_iterable(graph.values()), graph.keys())) # all nodes of the graph preds = reversed_graph(graph) # succ: preds last_walked_len = -1 while last_walked_len != len(walked): last_walked_len = len(walked) for node in nodes - walked: if len(preds.get(node, set()) - walked) == 0: walked.add(node) return frozenset(nodes - walked)
Perform a topologic sort to detect any cycle. Return the set of unsortable nodes. If at least one item, then there is cycle in given graph.
def file_lines(bblfile:str) -> iter: with open(bblfile) as fd: yield from (line.rstrip() for line in fd if line.rstrip())
Yield lines found in given file
def line_type(line:str) -> str: for regex, ltype in LINE_TYPES.items(): if re.fullmatch(regex, line): return ltype raise ValueError("Input line \"{}\" is not bubble formatted".format(line))
Give type of input line, as defined in LINE_TYPES >>> line_type('IN\\ta\\tb') 'IN' >>> line_type('') 'EMPTY'
def line_data(line:str) -> tuple: for regex, _ in LINE_TYPES.items(): match = re.fullmatch(regex, line) if match: return match.groups() raise ValueError("Input line \"{}\" is not bubble formatted".format(line))
Return groups found in given line >>> line_data('IN\\ta\\tb') ('IN', 'a', 'b') >>> line_data('') ()
def _catchCurrentViewContent(self): viewContent = None if self._buffer_color_mode != self._display_color_mode: viewContent = self._buffer.crop( self.View.rectToArray() ) .convert( self._display_color_mode ) else: viewContent = self._buffer.crop( self.View.rectToArray() ) # Rotate for display direction if self._display_direction == 0: return viewContent else: return viewContent.rotate( angle = self._display_direction, expand=True )
! \~english Catch the current view content @return: a PIL Image @note Automatically converts the cache color mode and at the same time rotates the captured image data according to the screen angle \~chinese 从缓存中抓取当前视图大小的数据 @return: PIL Image 对象 @note 自动转换缓存色彩模式,同时根据屏幕角度设定旋转所抓取的图像数据
def _initBuffer(self, bufferColorMode, bufferSize): # super(SSScreenBase)._initBuffer(bufferColorMode, bufferSize) self._buffer_color_mode = bufferColorMode #create screen image buffer and canvas if bufferSize==None: self._buffer = Image.new( bufferColorMode , self._display_size ) else: self._buffer = Image.new( bufferColorMode , bufferSize ) self.Canvas = ImageDraw.Draw( self._buffer ) #creare screen view self.View = SSRect( 0, 0, self._display_size[0], self._display_size[1] )
! \~english Initialize the buffer object instance, use PIL Image as for buffer @param bufferColorMode: "RGB" or "1" @param bufferSize: (width, height) \~chinese 初始化缓冲区对象实例,使用PIL Image作为缓冲区 @param bufferColorMode: 色彩模式, 取值: "RGB" 或 "1" @param bufferSize: 缓存大小 (width, height),例如: (128, 64)
def clearCanvas(self, fillColor = 0 ): self.Canvas.rectangle((0, 0, self._display_size[0], self._display_size[1]), outline=0, fill=fillColor)
! \~engliash Clear up canvas and fill color at same time @param fillColor: a color value @note The fillColor value range depends on the setting of _buffer_color_mode. * If it is SS_COLOR_MODE_MONO ("1") monochrome mode, it can only select 0: black and 1: white * If it is SS_COLOR_MODE_RGB ("RGB") color mode, RGB color values can be used \~chinese 清除画布并同时填充颜色 @param fillColor: 颜色值 @note fillColor 取值范围取决于 _buffer_color_mode 的设定。 * 如果是 SS_COLOR_MODE_MONO ("1") 单色模式,只能选择 0:黑色 和 1:白色 * 如果是 SS_COLOR_MODE_RGB ("RGB") 彩色模式,可以使用 RGB 色彩值
def clearView(self, fillColor = 0 ): self.Canvas.rectangle(self.View.rectToArray(), outline=0, fill=fillColor)
! \~english Clear up canvas with view size @param fillColor: a color value @note The fillColor value range depends on the setting of _buffer_color_mode. * If it is SS_COLOR_MODE_MONO ("1") monochrome mode, it can only select 0: black and 1: white * If it is SS_COLOR_MODE_RGB ("RGB") color mode, RGB color values can be used \~chinese 清除画布中当前视图大小的区域同时填充颜色 @param fillColor: 颜色值 @note fillColor 取值范围取决于 _buffer_color_mode 的设定。 * 如果是 SS_COLOR_MODE_MONO ("1") 单色模式,只能选择 0:黑色 和 1:白色 * 如果是 SS_COLOR_MODE_RGB ("RGB") 彩色模式,可以使用 RGB 色彩值
def redefineBuffer(self, newBuffer ): # Redefine Frame from an image object if type(self._buffer) == type(newBuffer): self._buffer = newBuffer self.Canvas = ImageDraw.Draw( self._buffer ) # self.View.resize(newBuffer.width, newBuffer.height) return True # Redefine Frame from an <PIL.ImageFile.ImageFile> if type(newBuffer).__name__.find(PIL.ImageFile.ImageFile.__name__) != -1: self._buffer = self._buffer.resize((newBuffer.width, newBuffer.height)) self._buffer.paste( newBuffer, (0,0)) # self.View.resize(newBuffer.width, newBuffer.height) return True # Recreated a new frame from dict of frame if isinstance(newBuffer, dict): self._buffer = Image.new( newBuffer["color_mode"] , newBuffer["size"] ) self.Canvas = ImageDraw.Draw( self._buffer ) return True pass
! \~english Redefine frame of Screen @param newFrame: a new fram data @note newFrame can be: * PIL Image * PIL ImageFile * Dictionary, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" } \~chinese 重新定义缓存数据 @param newFrame: 新缓存数据 \n newFrame 可以为下面值: * PIL Image * PIL ImageFile * 字典, eg. { "size":(width, height), "color_mode":"1" } or { "size":(width, height), "color_mode":"RGB" }
def resize(self, newWidth = 0, newHeight = 0): self.height = newHeight self.width = newWidth
! \~english Resize width and height of rectangles @param newWidth: new width value @param newHeight: new height value \~chinese 重新设定矩形高宽 @param newWidth: 新宽度 @param newHeight: 新高度
def adjuestSize(self, offsetWidth = 0, offsetHeight = 0): self.height += offsetHeight self.width += offsetWidth
! \~english Adjuest width and height of rectangles @param offsetWidth: adjust the width. Negative numbers are smaller, Positive number is increased @param offsetHeight: adjust the height. Negative numbers are smaller, Positive number is increased @note The negative numbers are smaller, positive number is increased,0 remains unchanged. \~chinese 调整矩形高宽数据 @param offsetWidth: 调整宽度。 负数较小,正数增加 @param offsetHeight: 调整高度。 负数较小,正数增加 @note 负数较小,正数增加,0保持不变。
def moveTo(self, newX=0, newY=0): self.x = newX self.y = newY
! \~english Move vertex of rectangles to new point (x,y) @param newX: Coordinated X value @param newY: Coordinated Y value \~chinese 移动矩形到新坐标点 (x,y) @param newX: 坐标 X @param newY: 坐标 Y
def moveOffset(self, offsetX=0, offsetY=0): self.x += offsetX self.y += offsetY
! \~english Offset vertex of rectangles to new point (x,y) @param offsetX: offset X value @param offsetY: offset Y value @note The negative numbers are left or up move , positive number is right or down move,0 remains unchanged. \~chinese 平移矩形指定的距离 (x,y) @param offsetX: 平移 X @param offsetY: 平移 Y @note 负数是左移( X )或上移( Y ),正数是右移( X )或下移( Y ),0 保持不变。
def swapWH(self): width = self.width self.width = self.height self.height = width
! \~english Swap width and height of rectangles \~chinese 交换矩形高宽边数据
def rectToArray(self, swapWH = False): if swapWH == False: return [self.x, self.y, self.x + self.width, self.y + self.height] else: return [self.x, self.y, self.x + self.height, self.y + self.width]
! \~english Rectangles converted to array of coordinates @return: an array of rect points. eg. (x1,y1,x2,y2) \~chinese 矩形数据转换为矩形坐标数组 @return: 矩形座标数组, 例如: ( x1,y1,x2,y2 )
def _needSwapWH(self, oldDirection, newDirection ): if abs(newDirection - oldDirection) == 0: return False if abs(newDirection - oldDirection) % 180 == 0: return False if abs(newDirection - oldDirection) % 90 == 0: return True return False
! \~english return screen direction status @return Boolean @note No need to rotate if the screen orientation is 0 degrees and 180 degrees \~chinese 返回屏幕方向状态 @return 布尔值 @note 如果屏幕方向是0度和180度就不需要旋转