code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_return_line_item_by_id(cls, return_line_item_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs) else: (data) = cls._get_return_line_item_by_id_with_http_info(return_line_item_id, **kwargs) return data
Find ReturnLineItem Return single instance of ReturnLineItem by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_return_line_item_by_id(return_line_item_id, async=True) >>> result = thread.get() :param async bool :param str return_line_item_id: ID of returnLineItem to return (required) :return: ReturnLineItem If the method is called asynchronously, returns the request thread.
def list_all_return_line_items(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_return_line_items_with_http_info(**kwargs) else: (data) = cls._list_all_return_line_items_with_http_info(**kwargs) return data
List ReturnLineItems Return a list of ReturnLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_return_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ReturnLineItem] If the method is called asynchronously, returns the request thread.
def replace_return_line_item_by_id(cls, return_line_item_id, return_line_item, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs) else: (data) = cls._replace_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs) return data
Replace ReturnLineItem Replace all attributes of ReturnLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_return_line_item_by_id(return_line_item_id, return_line_item, async=True) >>> result = thread.get() :param async bool :param str return_line_item_id: ID of returnLineItem to replace (required) :param ReturnLineItem return_line_item: Attributes of returnLineItem to replace (required) :return: ReturnLineItem If the method is called asynchronously, returns the request thread.
def update_return_line_item_by_id(cls, return_line_item_id, return_line_item, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs) else: (data) = cls._update_return_line_item_by_id_with_http_info(return_line_item_id, return_line_item, **kwargs) return data
Update ReturnLineItem Update attributes of ReturnLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_return_line_item_by_id(return_line_item_id, return_line_item, async=True) >>> result = thread.get() :param async bool :param str return_line_item_id: ID of returnLineItem to update. (required) :param ReturnLineItem return_line_item: Attributes of returnLineItem to update. (required) :return: ReturnLineItem If the method is called asynchronously, returns the request thread.
def query_tracking_code(tracking_code, year=None): payload = { 'Anio': year or datetime.now().year, 'Tracking': tracking_code, } response = _make_request(TRACKING_URL, payload) if not response['d']: return [] data = response['d'][0] destination = data['RetornoCadena6'] payload.update({ 'Destino': destination, }) response = _make_request(TRACKING_DETAIL_URL, payload) return _process_detail(response['d'])
Given a tracking_code return a list of events related the tracking code
def comments_nb_counts(): recid = request.view_args.get('recid') if recid is None: return elif recid == 0: return 0 else: return CmtRECORDCOMMENT.count(*[ CmtRECORDCOMMENT.id_bibrec == recid, CmtRECORDCOMMENT.star_score == 0, CmtRECORDCOMMENT.status.notin_(['dm', 'da']) ])
Get number of comments for the record `recid`.
def decide_k(airport_code): if airport_code[:1].upper() == 'K': try: # if there's a match without the K that's likely what it is. return Airport.objects.get(location_identifier__iexact=airport_code[1:]).location_identifier except Airport.DoesNotExist: return airport_code else: return airport_code
A function to decide if a leading 'K' is throwing off an airport match and return the correct code.
def parse_date(datestring): datestring = str(datestring).strip() if not datestring[0].isdigit(): raise ParseError() if 'W' in datestring.upper(): try: datestring = datestring[:-1] + str(int(datestring[-1:]) -1) except: pass for regex, pattern in DATE_FORMATS: if regex.match(datestring): found = regex.search(datestring).groupdict() dt = datetime.utcnow().strptime(found['matched'], pattern) if 'fraction' in found and found['fraction'] is not None: dt = dt.replace(microsecond=int(found['fraction'][1:])) if 'timezone' in found and found['timezone'] is not None: dt = dt.replace(tzinfo=Timezone(found.get('timezone', ''))) return dt return parse_time(datestring)
Attepmts to parse an ISO8601 formatted ``datestring``. Returns a ``datetime.datetime`` object.
def parse_time(timestring): timestring = str(timestring).strip() for regex, pattern in TIME_FORMATS: if regex.match(timestring): found = regex.search(timestring).groupdict() dt = datetime.utcnow().strptime(found['matched'], pattern) dt = datetime.combine(date.today(), dt.time()) if 'fraction' in found and found['fraction'] is not None: dt = dt.replace(microsecond=int(found['fraction'][1:])) if 'timezone' in found and found['timezone'] is not None: dt = dt.replace(tzinfo=Timezone(found.get('timezone', ''))) return dt raise ParseError()
Attepmts to parse an ISO8601 formatted ``timestring``. Returns a ``datetime.datetime`` object.
def config_param(self, conf_alias, param): data = self.data_get(conf_alias) flat_cache = self.__data_get_flatten_cache.get(conf_alias) if flat_cache is None: flat_cache = self.__flatten_dict(data, '', '.') self.__data_get_flatten_cache[conf_alias] = flat_cache if param not in flat_cache: raise KeyError("Key not found: " + conf_alias) return flat_cache.get(param)
Получает настройки с сервера, кеширует локально и дает простой интерфейс их получения :param conf_alias: :param param: :return:
def data_get(self, conf_alias, data_only=True, use_cache=True): data = self.__data_get_cache.get(conf_alias) if not use_cache or data is None: response = self.__app.native_api_call('settings', 'data/get/' + conf_alias, {}, self.__options, False, None, False, http_path="/api/meta/v1/", http_method="GET") data = json.loads(response.text) self.__data_get_cache[conf_alias] = data if data_only: return data.get("form_data") else: return data
Запрашивает данные по настройке :param data_only: Вернуть только данные без метаинформации :param conf_alias: Уникальный альяс конфига :param use_cache: Запросить один ра и далее работать с закешированной в памяти копией :return:
def connect(self): self.log.debug('starting the ``get`` method') dbSettings = self.dbSettings port = False if "tunnel" in dbSettings and dbSettings["tunnel"]: port = self._setup_tunnel( tunnelParameters=dbSettings["tunnel"] ) # SETUP A DATABASE CONNECTION host = dbSettings["host"] user = dbSettings["user"] passwd = dbSettings["password"] dbName = dbSettings["db"] dbConn = ms.connect( host=host, user=user, passwd=passwd, db=dbName, port=port, use_unicode=True, charset='utf8', local_infile=1, client_flag=ms.constants.CLIENT.MULTI_STATEMENTS, connect_timeout=36000, max_allowed_packet=51200000 ) if self.autocommit: dbConn.autocommit(True) self.log.debug('completed the ``get`` method') return dbConn
connect to the database **Return:** - ``dbConn`` -- the database connection See the class docstring for usage
def map_(cache: Mapping[Domain, Range]) -> Operator[Map[Domain, Range]]: def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]: @wraps(function) def wrapped(argument: Domain) -> Range: try: return cache[argument] except KeyError: return function(argument) return wrapped return wrapper
Returns decorator that calls wrapped function if nothing was found in cache for its argument. Wrapped function arguments should be hashable.
def updatable_map(cache: MutableMapping[Domain, Range]) -> Operator[Map]: def wrapper(function: Map[Domain, Range]) -> Map[Domain, Range]: @wraps(function) def wrapped(argument: Domain) -> Range: try: return cache[argument] except KeyError: result = function(argument) cache[argument] = result return result return wrapped return wrapper
Returns decorator that calls wrapped function if nothing was found in cache for its argument and reuses result afterwards. Wrapped function arguments should be hashable.
def property_(getter: Map[Domain, Range]) -> property: return property(map_(WeakKeyDictionary())(getter))
Returns property that calls given getter on the first access and reuses result afterwards. Class instances should be hashable and weak referenceable.
def get_context_data(self, **kwargs): self.request.session.set_test_cookie() if not self.request.session.test_cookie_worked(): messages.add_message( self.request, messages.ERROR, "Please enable cookies.") self.request.session.delete_test_cookie() return super().get_context_data(**kwargs)
Tests cookies.
def traverse(root, callback, *args, **kwargs): # type: (Nonterminal, Callable[[Any, Callable, Any, Any], Generator], Any, Any) -> Generator class MyGenerator: def __init__(self, gen): self._gen = gen def __next__(self): return next(self._gen) def inner_callback(item, *args, **kwargs): return MyGenerator(callback(item, inner_callback, *args, **kwargs)) to_call = list() to_call.append(inner_callback(root, *args, **kwargs)) while len(to_call) > 0: current = to_call.pop() try: el = next(current) to_call.append(current) if isinstance(el, MyGenerator): to_call.append(el) else: yield el except StopIteration: continue
Traverse AST based on callback. :param root: Root element of the parsed tree. :param callback: Function that accepts current node, callback `c_2` and parameters from the parent. Function must yield individual values. Its possible to yield callback c_2 **call** on any node to call the recursion. The callback can accept parameters from the parent call. The root will receive parameters from the `traverse` call. Example of pre-order traversing: def traverse_func(item, callback): if isinstance(item, Rule): yield item for el in item.to_symbols: yield callback(el) elif isinstance(item, Nonterminal): yield item yield callback(item.to_rule) else: yield item :return: Sequence of nodes to traverse.
def traverse_separated(root, callbackRules, callbackNonterminals, callbackTerminals, *args, **kwargs): # type: (Nonterminal, Callable[[Rule, Callable, Any, Any], Generator], Callable[[Nonterminal, Callable, Any, Any], Generator], Callable[[Terminal, Callable, Any, Any], Generator], Any, Any) -> Generator def separate_traverse_callback(item, callback, *args, **kwargs): if isinstance(item, Rule): return callbackRules(item, callback, *args, **kwargs) if isinstance(item, Nonterminal): return callbackNonterminals(item, callback, *args, **kwargs) if isinstance(item, Terminal): return callbackTerminals(item, callback, *args, **kwargs) return Traversing.traverse(root, separate_traverse_callback, *args, **kwargs)
Same as traverse method, but have different callbacks for rules, nonterminals and terminals. Functions accepts current node, callback `c_2` and parameters from the parent. Functions must yield individual values. Its possible to yield callback c_2 **call** on any node to call the recursion. The callback can accept parameters from the parent call. The root will receive parameters from the `traverse_separated` call. :param root: Root node of the parsed tree. :param callbackRules: Function to call for every rule. :param callbackNonterminals: Function to call for every nonterminal. :param callbackTerminals: Function to call for every terminal. :return: Sequence of nodes to traverse.
def pre_order(root): # type: (Nonterminal) -> Generator def traverse_rule(item, callback): yield item for el in item.to_symbols: yield callback(el) def traverse_nonterminal(item, callback): yield item yield callback(item.to_rule) def traverse_terminal(item, callback): yield item return Traversing.traverse_separated(root, traverse_rule, traverse_nonterminal, traverse_terminal)
Perform pre-order traversing. Expects tree like structure. Traverse in DFS fashion. :param root: Root tree of the parsed tree. :return: Sequence of nodes to traverse.
def corr_matrix(df,method = 'pearson'): # Remove all but categoricals,booleans, and numerics df = df.reset_index(drop = True) cat_cols = df.select_dtypes(include = 'category') bool_cols = df.select_dtypes(include = 'bool') df = df.select_dtypes(include = 'number') if not cols(df) + cols(bool_cols) + cols(cat_cols): return None # quit if there's none of the possible datatypes present #Convert categoricals to boolean columns insert = np.ones(rows(df)) for col_name in cat_cols: cat_df = pd.concat([cat_cols[[col_name]],pd.Series(insert)],axis = 1) # Add a column of ones as values for the pivot cat_ptable = cat_df.pivot(columns = col_name).reset_index(drop = True) cat_ptable.columns = [col_name+ "_{}".format(value) for value in cat_ptable.columns.get_level_values(col_name)] df = pd.concat([df,cat_ptable.fillna(0)],axis = 1) df = pd.concat([df,bool_cols * 1], axis = 1) return df.corr(method,0)
Returns a matrix of correlations between columns of a DataFrame. For categorical columns, it first changes those to a set of dummy variable columns. Booleans are converted to numerical as well. Also ignores any indexes set on the DataFrame Parameters: df - DataFrame DataFrame to analyze method - {'pearson', 'kendall', 'spearman'} * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation
def get_colours(color_group, color_name, reverse=False): color_group = color_group.lower() cmap = get_map(color_group, color_name, reverse=reverse) return cmap.hex_colors
if not reverse: return cmap.hex_colors else: return cmap.hex_colors[::-1]
def create_discount_coupon(cls, discount_coupon, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_discount_coupon_with_http_info(discount_coupon, **kwargs) else: (data) = cls._create_discount_coupon_with_http_info(discount_coupon, **kwargs) return data
Create DiscountCoupon Create a new DiscountCoupon This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_discount_coupon(discount_coupon, async=True) >>> result = thread.get() :param async bool :param DiscountCoupon discount_coupon: Attributes of discountCoupon to create (required) :return: DiscountCoupon If the method is called asynchronously, returns the request thread.
def delete_discount_coupon_by_id(cls, discount_coupon_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_discount_coupon_by_id_with_http_info(discount_coupon_id, **kwargs) else: (data) = cls._delete_discount_coupon_by_id_with_http_info(discount_coupon_id, **kwargs) return data
Delete DiscountCoupon Delete an instance of DiscountCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_discount_coupon_by_id(discount_coupon_id, async=True) >>> result = thread.get() :param async bool :param str discount_coupon_id: ID of discountCoupon to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_discount_coupon_by_id(cls, discount_coupon_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_discount_coupon_by_id_with_http_info(discount_coupon_id, **kwargs) else: (data) = cls._get_discount_coupon_by_id_with_http_info(discount_coupon_id, **kwargs) return data
Find DiscountCoupon Return single instance of DiscountCoupon by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_discount_coupon_by_id(discount_coupon_id, async=True) >>> result = thread.get() :param async bool :param str discount_coupon_id: ID of discountCoupon to return (required) :return: DiscountCoupon If the method is called asynchronously, returns the request thread.
def list_all_discount_coupons(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_discount_coupons_with_http_info(**kwargs) else: (data) = cls._list_all_discount_coupons_with_http_info(**kwargs) return data
List DiscountCoupons Return a list of DiscountCoupons This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_discount_coupons(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[DiscountCoupon] If the method is called asynchronously, returns the request thread.
def replace_discount_coupon_by_id(cls, discount_coupon_id, discount_coupon, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs) else: (data) = cls._replace_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs) return data
Replace DiscountCoupon Replace all attributes of DiscountCoupon This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True) >>> result = thread.get() :param async bool :param str discount_coupon_id: ID of discountCoupon to replace (required) :param DiscountCoupon discount_coupon: Attributes of discountCoupon to replace (required) :return: DiscountCoupon If the method is called asynchronously, returns the request thread.
def update_discount_coupon_by_id(cls, discount_coupon_id, discount_coupon, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs) else: (data) = cls._update_discount_coupon_by_id_with_http_info(discount_coupon_id, discount_coupon, **kwargs) return data
Update DiscountCoupon Update attributes of DiscountCoupon This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_discount_coupon_by_id(discount_coupon_id, discount_coupon, async=True) >>> result = thread.get() :param async bool :param str discount_coupon_id: ID of discountCoupon to update. (required) :param DiscountCoupon discount_coupon: Attributes of discountCoupon to update. (required) :return: DiscountCoupon If the method is called asynchronously, returns the request thread.
def get_filter(self): return self.filter_form_cls(self.request.GET, runtime_context=self.get_runtime_context(), use_filter_chaining=self.use_filter_chaining)
Get FilterForm instance.
def get_queryset(self): qs = super(FilterFormMixin, self).get_queryset() filter_form = self.get_filter() if filter_form.is_valid(): qs = filter_form.filter(qs).distinct() return qs
Return queryset with filtering applied (if filter form passes validation).
def get_context_data(self, **kwargs): context = super(FilterFormMixin, self).get_context_data(**kwargs) context[self.context_filterform_name] = self.get_filter() return context
Add filter form to the context. TODO: Currently we construct the filter form object twice - in get_queryset and here, in get_context_data. Will need to figure out a good way to eliminate extra initialization.
def last(pipe, items=1): ''' this function simply returns the last item in an iterable ''' if items == 1: tmp=None for i in pipe: tmp=i return tmp else: return tuple(deque(pipe, maxlen=items)f last(pipe, items=1): ''' this function simply returns the last item in an iterable ''' if items == 1: tmp=None for i in pipe: tmp=i return tmp else: return tuple(deque(pipe, maxlen=items))
this function simply returns the last item in an iterable
def print_help(filename, table, dest=sys.stdout): cmds = '|'.join(sorted(table.keys())) print >> dest, "Syntax: %s %s [args]" % (path.basename(filename), cmds)
Print help to the given destination file object.
def dispatch(table, args): # No arguments: print help. if len(args) == 1: print_help(args[0], table) sys.exit(0) # Bad command or incorrect number of arguments: print help to stderr. if args[1] not in table or len(args) != len(table[args[1]]) + 1: print_help(args[0], table, dest=sys.stderr) sys.exit(1) # Cast all the arguments to fit their function's signature to ensure # they're correct and to make them safe for consumption. sig = table[args[1]] try: fixed_args = [type_(arg) for arg, type_ in zip(args[2:], sig[1:])] except TypeError: # If any are wrong, complain to stderr. print_help(args[0], table, dest=sys.stderr) sys.exit(1) # Dispatch the call to the correct function. sig[0](*fixed_args)
Dispatches to a function based on the contents of `args`.
def tree_to_file(tree:'BubbleTree', outfile:str): with open(outfile, 'w') as fd: fd.write(tree_to_gexf(tree))
Compute the gexf representation of given power graph, and push it into given file.
def tree_to_gexf(tree:'BubbleTree') -> str: output_nodes, output_edges = '', '' def build_node(node:str) -> str: """Yield strings describing given node, recursively""" if tree.inclusions[node]: # it's a powernode yield '<node id="{}" label="{}">'.format(node, node) yield '<nodes>' for sub in tree.inclusions[node]: yield from build_node(sub) yield '</nodes>' yield '</node>' else: # it's a regular node yield '<node id="{}" label="{}"/>'.format(node, node) return # build full hierarchy from the roots output_nodes += '\n'.join('\n'.join(build_node(root)) for root in tree.roots) # # add the edges to the final graph for idx, (source, targets) in enumerate(tree.edges.items()): for target in targets: if source <= target: # edges dict is complete. This avoid multiple edges. output_edges += '<edge id="{}" source="{}" target="{}" />\n'.format(idx, source, target) return GEXF_TEMPLATE.format( 'directed' if tree.oriented else 'undirected', output_nodes, output_edges )
Compute the gexf representation of given power graph, and push it into given file. See https://gephi.org/gexf/format/index.html for format doc.
def find_all(s, sub, start=0, end=0, limit=-1, reverse=False): indexes = [] if not bool(s and sub): return indexes lstr = len(s) if lstr <= start: return indexes lsub = len(sub) if lstr < lsub: return indexes if limit == 0: return indexes elif limit < 0: limit = lstr end = min(end, lstr) or lstr idx = s.rfind(sub, start, end) if reverse else s.find(sub, start, end) while idx != -1: indexes.append(idx) if reverse: idx = s.rfind(sub, start, idx - lstr) else: idx = s.find(sub, idx + lsub, end) if len(indexes) >= limit: break return indexes
Find all indexes of sub in s. :param s: the string to search :param sub: the string to search for :param start: the index in s at which to begin the search (same as in ''.find) :param end: the index in s at which to stop searching (same as in ''.find) :param limit: the maximum number of matches to find :param reverse: if False search s forwards; otherwise search backwards :return: all occurrences of substring sub in string s
def _sync_string_to(bin_or_str, string): if isinstance(string, type(bin_or_str)): return string elif isinstance(string, binary_type): return string.decode(DEFAULT_ENCODING) else: return string.encode(DEFAULT_ENCODING)
Python 3 compliance: ensure two strings are the same type (unicode or binary)
def to_ascii_equivalent(text): if text is None: return None elif isinstance(text, binary_type): text = text.decode(DEFAULT_ENCODING) elif not isinstance(text, text_type): text = text_type(text) text = EMPTY_STR.join(_ASCII_PUNCTUATION_MAP.get(c, c) for c in text) return EMPTY_STR.join(c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn')
Converts any non-ASCII characters (accents, etc.) to their best-fit ASCII equivalents
def _validate_rule(self, rule): # type: (Type[Rule]) -> None if not inspect.isclass(rule) or not issubclass(rule, Rule): raise NotRuleException(rule) rule.validate(self._grammar)
Validate rule. Valid rule must inherit from Rule and have valid syntax. :param rule: Rule to validate. :raise NotRuleException: If the parameter doesn't inherit from Rule.
def _split_rules(self, original_rule): # type: (Type[Rule]) -> Iterable[Type[Rule]] if original_rule.count == 1: return [original_rule] def yielding(original_rule): for rule_index in range(original_rule.count): yield SplitRule._create_class(original_rule, rule_index) return yielding(original_rule)
Splits Rule class with multiple rules into separate classes, each with only one rule defined. The created rules inherits from SplitRule. If parameter define only one rule, its not split. :param original_rule: Rule to split. :return: Iterable of rules derived from the parameter.
def _add(self, *rules): # type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]] for rule in rules: if rule in self: continue self._validate_rule(rule) for rule in rules: for r in self._split_rules(rule): for side in r.rule: for s in side: self._assign_map[s].add(r) super().add(r) yield r
Add rules into the set. Each rule is validated and split if needed. The method add the rules into dictionary, so the rule can be deleted with terminals or nonterminals. :param rules: Rules to insert. :return: Inserted rules. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid.
def remove(self, *rules, _validate=True): # type: (Iterable[Type[Rule]], bool) -> None all_rules = set() for rule in rules: if _validate: self._validate_rule(rule) for r in self._split_rules(rule): if not self.__contains__(rule, _validate=False): raise KeyError('Rule ' + rule.__name__ + ' is not inside') all_rules.add(r) for rule in all_rules: for side in rule.rule: for s in side: self._assign_map[s].discard(rule) super().remove(rule)
Remove rules from the set. :param rules: Rules to remove. :param _validate: True if the rule should be validated before deleting. This parameter is only for internal use. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid. :raise KeyError: If the rule is not in the grammar.
def _get(self, *rules): # type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]] for rule in rules: if not inspect.isclass(rule) or not issubclass(rule, Rule): raise NotRuleException(rule) for r in self._split_rules(rule): yield self._find_rule(r)
Get rules representing parameters. The return rules can be different from parameters, in case parameter define multiple rules in one class. :param rules: For which rules get the representation. :return: List of rules representing parameters. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid.
def get_substructure(data, path): if not len(path): return data try: return get_substructure(data[path[0]], path[1:]) except (TypeError, IndexError, KeyError): return None
Tries to retrieve a sub-structure within some data. If the path does not match any sub-structure, returns None. >>> data = {'a': 5, 'b': {'c': [1, 2, [{'f': [57]}], 4], 'd': 'test'}} >>> get_substructure(island, "bc") [1, 2, [{'f': [57]}], 4] >>> get_substructure(island, ['b', 'c']) [1, 2, [{'f': [57]}], 4] >>> get_substructure(island, ['b', 'c', 2, 0, 'f', 0]) 57 >>> get_substructure(island, ['b', 'c', 2, 0, 'f', 'd']) None @param data: a container @type data: str|dict|list|(an indexable container) @param path: location of the data @type path: list|str @rtype: *
def iterable(target): ''' returns true if the given argument is iterable ''' if any(i in ('next', '__next__', '__iter__') for i in dir(target)): return True else: try: iter(target) return True except: return Falsf iterable(target): ''' returns true if the given argument is iterable ''' if any(i in ('next', '__next__', '__iter__') for i in dir(target)): return True else: try: iter(target) return True except: return False
returns true if the given argument is iterable
def _thread_worker(self): while self._running: # Retrieve next cmd, or block packet = self._queue.get(True) if isinstance(packet, dict) and QS_CMD in packet: try: self._callback_listen(packet) except Exception as err: # pylint: disable=broad-except _LOGGER.error("Exception in callback\nType: %s: %s", type(err), err) self._queue.task_done()
Process callbacks from the queue populated by &listen.
def _thread_listen(self): while self._running: try: rest = requests.get(URL_LISTEN.format(self._url), timeout=self._timeout) if rest.status_code == 200: self._queue.put(rest.json()) else: _LOGGER.error('QSUSB response code %s', rest.status_code) sleep(30) # Received for "Read timed out" and "Connection refused" except requests.exceptions.ConnectionError as err: if str(err).find('timed') > 0: # "Read timedout" update self._queue.put({QS_CMD: CMD_UPDATE}) else: # "Connection refused" QSUSB down _LOGGER.error(str(err)) sleep(60) except Exception as err: # pylint: disable=broad-except _LOGGER.error("%s - %s", str(type(err)), str(err)) sleep(5) self._queue.put({})
The main &listen loop.
def version(self): # requests.get destroys the ? import urllib with urllib.request.urlopen(URL_VERSION.format(self._url)) as response: return response.read().decode('utf-8') return False
Get the QS Mobile version.
def listen(self, callback=None, timeout=(5, 300)): if self._running: return False # if self.devices() is False: # return False self._queue = Queue() self._running = True self._timeout = timeout self._callback_listen = callback threading.Thread(target=self._thread_listen, args=()).start() threading.Thread(target=self._thread_worker, args=()).start() return True
Start the &listen long poll and return immediately.
def _callback_set_qs_value(self, key, val, success): set_url = URL_SET.format(self._url, key, val) with self._lock: for _repeat in range(1, 6): set_result = requests.get(set_url) if set_result.status_code == 200: set_result = set_result.json() if set_result.get('data', 'NO REPLY') != 'NO REPLY': # self.devices._set_qs_value(key, set_result['data']) success() return True sleep(0.01 * _repeat) _LOGGER.error("Unable to set %s", set_url) return False
Push state to QSUSB, retry with backoff.
def update_from_devices(self): # _LOGGER.warning("update from devices") try: rest = requests.get(URL_DEVICES.format(self._url)) if rest.status_code != 200: _LOGGER.error("Devices returned %s", rest.status_code) return False self.devices.update_devices(rest.json()) return True except requests.exceptions.ConnectionError as conn_err: _LOGGER.error("Could not connect: %s", conn_err) except Exception as err: # pylint: disable=broad-except _LOGGER.error(err)
Retrieve a list of &devices and values.
def color_to_tuple(color, opacity=1): if(type(color) == str and color[0] == "#"): color = hex_color_to_tuple(color) elif type(color) == str: if color in color_dict: color = color_dict[color.lower()] else: print("无法解析颜色:" + color) color = (255, 125, 0, int(255*opacity)) while len(color) < 4: color += (int(255*opacity),) return color
convert any color to standard () "red" -> 'c3B', (255, 125, 0) "#ffffff" -> 'c3B', (255, 255, 255) "#ffffffff" -> 'c4B', (255, 255, 255, 255)
def hex_color_to_tuple(hex): hex = hex[1:] length = len(hex) // 2 return tuple(int(hex[i*2:i*2+2], 16) for i in range(length))
convent hex color to tuple "#ffffff" -> (255, 255, 255) "#ffff00ff" -> (255, 255, 0, 255)
def hsla_to_rgba(h, s, l, a): h = h % 360 s = max(0, min(1, s)) l = max(0, min(1, l)) a = max(0, min(1, a)) c = (1 - abs(2*l - 1)) * s x = c * (1 - abs(h/60%2 - 1)) m = l - c/2 if h<60: r, g, b = c, x, 0 elif h<120: r, g, b = x, c, 0 elif h<180: r, g, b = 0, c, x elif h<240: r, g, b = 0, x, c elif h<300: r, g, b = x, 0, c else: r, g, b = c, 0, x return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255))
0 <= H < 360, 0 <= s,l,a < 1
def dir_freq(directory): '''Returns a list of tuples of (word,# of directories it occurs)''' content = dir_list(directory) i = 0 freqdict = {} for filename in content: filewords = eliminate_repeats(read_file(directory + '/' + filename)) for word in filewords: if freqdict.has_key(word): freqdict[word] += 1 else: freqdict[word] = 1 tupleize = [] for key in freqdict.keys(): wordtuple = (key,freqdict[key]) tupleize.append(wordtuple) return tupleizf dir_freq(directory): '''Returns a list of tuples of (word,# of directories it occurs)''' content = dir_list(directory) i = 0 freqdict = {} for filename in content: filewords = eliminate_repeats(read_file(directory + '/' + filename)) for word in filewords: if freqdict.has_key(word): freqdict[word] += 1 else: freqdict[word] = 1 tupleize = [] for key in freqdict.keys(): wordtuple = (key,freqdict[key]) tupleize.append(wordtuple) return tupleize
Returns a list of tuples of (word,# of directories it occurs)
def dir_list(directory): '''Returns the list of all files in the directory.''' try: content = listdir(directory) return content except WindowsError as winErr: print("Directory error: " + str((winErr))f dir_list(directory): '''Returns the list of all files in the directory.''' try: content = listdir(directory) return content except WindowsError as winErr: print("Directory error: " + str((winErr)))
Returns the list of all files in the directory.
def read_dir(directory): '''Returns the text of all files in a directory.''' content = dir_list(directory) text = '' for filename in content: text += read_file(directory + '/' + filename) text += ' ' return texf read_dir(directory): '''Returns the text of all files in a directory.''' content = dir_list(directory) text = '' for filename in content: text += read_file(directory + '/' + filename) text += ' ' return text
Returns the text of all files in a directory.
def assign_colors(dir_counts): '''Defines the color of a word in the cloud. Counts is a list of tuples in the form (word,occurences) The more files a word occurs in, the more red it appears in the cloud.''' frequencies = map(lambda x: x[1],dir_counts) words = map(lambda x: x[0],dir_counts) maxoccur = max(frequencies) minoccur = min(frequencies) colors = map(lambda x: colorize(x,maxoccur,minoccur),frequencies) color_dict = dict(zip(words,colors)) return color_dicf assign_colors(dir_counts): '''Defines the color of a word in the cloud. Counts is a list of tuples in the form (word,occurences) The more files a word occurs in, the more red it appears in the cloud.''' frequencies = map(lambda x: x[1],dir_counts) words = map(lambda x: x[0],dir_counts) maxoccur = max(frequencies) minoccur = min(frequencies) colors = map(lambda x: colorize(x,maxoccur,minoccur),frequencies) color_dict = dict(zip(words,colors)) return color_dict
Defines the color of a word in the cloud. Counts is a list of tuples in the form (word,occurences) The more files a word occurs in, the more red it appears in the cloud.
def colorize(occurence,maxoccurence,minoccurence): '''A formula for determining colors.''' if occurence == maxoccurence: color = (255,0,0) elif occurence == minoccurence: color = (0,0,255) else: color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255)) return colof colorize(occurence,maxoccurence,minoccurence): '''A formula for determining colors.''' if occurence == maxoccurence: color = (255,0,0) elif occurence == minoccurence: color = (0,0,255) else: color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255)) return color
A formula for determining colors.
def assign_fonts(counts,maxsize,minsize,exclude_words): '''Defines the font size of a word in the cloud. Counts is a list of tuples in the form (word,count)''' valid_counts = [] if exclude_words: for i in counts: if i[1] != 1: valid_counts.append(i) else: valid_counts = counts frequencies = map(lambda x: x[1],valid_counts) words = map(lambda x: x[0],valid_counts) maxcount = max(frequencies) font_sizes = map(lambda x:fontsize(x,maxsize,minsize,maxcount),frequencies) size_dict = dict(zip(words, font_sizes)) return size_dicf assign_fonts(counts,maxsize,minsize,exclude_words): '''Defines the font size of a word in the cloud. Counts is a list of tuples in the form (word,count)''' valid_counts = [] if exclude_words: for i in counts: if i[1] != 1: valid_counts.append(i) else: valid_counts = counts frequencies = map(lambda x: x[1],valid_counts) words = map(lambda x: x[0],valid_counts) maxcount = max(frequencies) font_sizes = map(lambda x:fontsize(x,maxsize,minsize,maxcount),frequencies) size_dict = dict(zip(words, font_sizes)) return size_dict
Defines the font size of a word in the cloud. Counts is a list of tuples in the form (word,count)
def fontsize(count,maxsize,minsize,maxcount): '''A formula for determining font sizes.''' size = int(maxsize - (maxsize)*((float(maxcount-count)/maxcount))) if size < minsize: size = minsize return sizf fontsize(count,maxsize,minsize,maxcount): '''A formula for determining font sizes.''' size = int(maxsize - (maxsize)*((float(maxcount-count)/maxcount))) if size < minsize: size = minsize return size
A formula for determining font sizes.
def search(self, query, index='default', **kwargs): # Looking up the index if index not in self.conf.indexes: self.raise_improperly_configured(index=index) # Calling the backend search method esurl = self.conf.connections[index]['URL'] esinst = pyelasticsearch.ElasticSearch(esurl) query = isinstance(query, Query) and str(query) or query self.raw_results = esinst.search(query, index=index, **kwargs) return self
kwargs supported are the parameters listed at: http://www.elasticsearch.org/guide/reference/api/search/request-body/ Namely: timeout, from, size and search_type. IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this
def _command(self, commands): if self._spi == None: raise "Do not setting SPI" GPIO.output( self._spi_dc, 0 ) self._spi.writebytes( commands )
! \~english Send command to ssd1306, DC pin need set to LOW @param commands: an byte or array of bytes \~chinese 发送命令给 SSD1306,DC 需要设定为低电平 LOW @param commands: 一个字节或字节数组
def _data(self, data): if self._spi == None: raise "Do not setting SPI" GPIO.output( self._spi_dc, 1 ) self._spi.writebytes( data )
! \~english Send data to ssd1306, DC pin need set to HIGH @param data: sent to display chip of data. it can be an byte or array of bytes \~chinese 发送数据给 SSD1306, DC 需要设定为高电平 HIGH @param data: 送到显示芯片的数据。 可以是一个字节或字节数组
def _display_buffer(self, buffer ): self._command([ self.CMD_SSD1306_SET_COLUMN_ADDR, 0, self.width-1, self.CMD_SSD1306_SET_PAGE_ADDR, 0, self._mem_pages - 1 ]) self._data( buffer )
! \~english Send buffer data to physical display. @param buffer: sent to display chip of data. \~chinese 将缓冲区数据发送到物理显示。 @param buffer: 送到显示芯片的数据。
def clear(self, fill = 0x00): self._buffer = [ fill ] * ( self.width * self._mem_pages )
! \~english Clear buffer data and fill color into buffer @param fill: a color value, it will fill into buffer.<br> The SSD1306 only chosen two colors: <br> 0 (0x0): black <br> 1 (0x1): white <br> \~chinese 清除缓冲区数据并在缓冲区中填充颜色 @param fill: 一个颜色值,它会填充到缓冲区中 <br>                      SSD1306只能选择两种颜色: <br>                         0(0x0):黑色 <br>                         1(0x1):白色 <br>
def reset(self): if self._spi_reset == None: return GPIO.output( self._spi_reset, 1 ) time.sleep(0.002) GPIO.output( self._spi_reset, 0 ) time.sleep(0.015) GPIO.output( self._spi_reset, 1 )
! \~english Reset display \~chinese 复位显示屏
def display(self, buffer = None): if buffer != None: self._display_buffer( buffer ) else: self._display_buffer( self._buffer )
! \~english Write buffer to physical display. @param buffer: Data to display,If <b>None</b> mean will use self._buffer data to display \~chinese 将缓冲区写入物理显示屏。 @param buffer: 要显示的数据,如果是 <b>None</b>(默认) 将把 self._buffer 数据写入物理显示屏
def setImage(self, image): if image.mode != '1': raise ValueError('The image color must be in mode \"1\".') imgWidth, imgHeight = image.size if imgWidth != self.width or imgHeight != self.height: raise ValueError('The image must be same dimensions as display ( {0} x {1} ).' \ .format(self.width, self.height)) # First then shift left pixByte = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80] bi = 0 pixs = image.load() for x in range( 0, self.width ): for y in range( 0, self.height, 8 ): pixBits = 0x00 # first then range(8) for py in [0,1,2,3,4,5,6,7]: pixBits |= (0x00 if pixs[x, y+py] == 0 else pixByte[py]) self._buffer[bi] = pixBits bi += 1
! \~english Convert image to the buffer, The image mode must be 1 and image size equal to the display size image type is Python Imaging Library image. @param image: a PIL image object \~chinese 将图像转换为缓冲区,这个图像的色彩模式必须为 1 同时图像大小必须等于显存大小, 图像类型: PIL Image (Python Imaging Library) @param image: PIL图像对象 \n \~ @note <pre> ssd1306.setImage( aPILImage ) ssd1306.display() </pre>
def run(self, schedule_id, auth_token, endpoint, payload, **kwargs): log = self.get_logger(**kwargs) log.info("Running instance of <%s>" % (schedule_id,)) if self.request.retries > 0: retry_delay = utils.calculate_retry_delay(self.request.retries) else: retry_delay = self.default_retry_delay headers = {"Content-Type": "application/json"} if auth_token is not None: headers["Authorization"] = "Token %s" % auth_token try: response = requests.post( url=endpoint, data=json.dumps(payload), headers=headers, timeout=settings.DEFAULT_REQUEST_TIMEOUT, ) # Expecting a 201, raise for errors. response.raise_for_status() except requests_exceptions.ConnectionError as exc: log.info("Connection Error to endpoint: %s" % endpoint) fire_metric.delay("scheduler.deliver_task.connection_error.sum", 1) self.retry(exc=exc, countdown=retry_delay) except requests_exceptions.HTTPError as exc: # Recoverable HTTP errors: 500, 401 log.info("Request failed due to status: %s" % exc.response.status_code) metric_name = ( "scheduler.deliver_task.http_error.%s.sum" % exc.response.status_code ) fire_metric.delay(metric_name, 1) self.retry(exc=exc, countdown=retry_delay) except requests_exceptions.Timeout as exc: log.info("Request failed due to timeout") fire_metric.delay("scheduler.deliver_task.timeout.sum", 1) self.retry(exc=exc, countdown=retry_delay) return True
Runs an instance of a scheduled task
def run(self, **kwargs): log = self.get_logger(**kwargs) failures = ScheduleFailure.objects log.info("Attempting to requeue <%s> failed schedules" % failures.count()) for failure in failures.iterator(): schedule = Schedule.objects.values( "id", "auth_token", "endpoint", "payload" ) schedule = schedule.get(id=failure.schedule_id) schedule["schedule_id"] = str(schedule.pop("id")) # Cleanup the failure before requeueing it. failure.delete() DeliverTask.apply_async(kwargs=schedule)
Runs an instance of a scheduled task
def get_now_sql_datetime(): ## > IMPORTS ## from datetime import datetime, date, time now = datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S") return now
*A datetime stamp in MySQL format: ``YYYY-MM-DDTHH:MM:SS``* **Return:** - ``now`` -- current time and date in MySQL format **Usage:** .. code-block:: python from fundamentals import times now = times.get_now_sql_datetime() print now # OUT: 2016-03-18T11:08:23
def calculate_time_difference(startDate, endDate): ################ > IMPORTS ################ from datetime import datetime from dateutil import relativedelta ################ > VARIABLE SETTINGS ###### ################ >ACTION(S) ################ if "T" not in startDate: startDate = startDate.strip().replace(" ", "T") if "T" not in endDate: endDate = endDate.strip().replace(" ", "T") startDate = datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S') endDate = datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S') d = relativedelta.relativedelta(endDate, startDate) relTime = "" if d.years > 0: relTime += str(d.years) + "yrs " if d.months > 0: relTime += str(d.months) + "mths " if d.days > 0: relTime += str(d.days) + "dys " if d.hours > 0: relTime += str(d.hours) + "h " if d.minutes > 0: relTime += str(d.minutes) + "m " if d.seconds > 0: relTime += str(d.seconds) + "s" ############################### if relTime == "": relTime = "0s" return relTime
*Return the time difference between two dates as a string* **Key Arguments:** - ``startDate`` -- the first date in YYYY-MM-DDTHH:MM:SS format - ``endDate`` -- the final date YYYY-MM-DDTHH:MM:SS format **Return:** - ``relTime`` -- the difference between the two dates in Y,M,D,h,m,s (string) **Usage:** .. code-block:: python from fundamentals import times diff = times.calculate_time_difference(startDate="2015-10-13 10:02:12", endDate="2017-11-04 16:47:05") print diff # OUT: 2yrs 22dys 6h 44m 53s
def bind(renderer, to): @wraps(to) def view(request, **kwargs): try: returned = to(request, **kwargs) except Exception as error: view_error = getattr(renderer, "view_error", None) if view_error is None: raise return view_error(request, error) try: return renderer.render(request, returned) except Exception as error: render_error = getattr(renderer, "render_error", None) if render_error is None: raise return render_error(request, returned, error) return view
Bind a renderer to the given callable by constructing a new rendering view.
def get_perm_model(): try: return django_apps.get_model(settings.PERM_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("PERM_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "PERM_MODEL refers to model '{}' that has not been installed".format(settings.PERM_MODEL) )
Returns the Perm model that is active in this project.
def _load_yaml_config(cls, config_data, filename="(unknown)"): try: config = yaml.safe_load(config_data) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark errmsg = ("Invalid YAML syntax in Configuration file " "%(file)s at line: %(line)s, column: %(column)s." % dict(file=filename, line=mark.line + 1, column=mark.column + 1)) else: errmsg = ("YAML error reading Configuration file " "%(file)s" % dict(file=filename)) logger.error(errmsg) raise logger.info("Configuration: %s", config) return config
Load a yaml config file.
def create_payment_transaction(cls, payment_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_payment_transaction_with_http_info(payment_transaction, **kwargs) else: (data) = cls._create_payment_transaction_with_http_info(payment_transaction, **kwargs) return data
Create PaymentTransaction Create a new PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_payment_transaction(payment_transaction, async=True) >>> result = thread.get() :param async bool :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to create (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread.
def delete_payment_transaction_by_id(cls, payment_transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_payment_transaction_by_id_with_http_info(payment_transaction_id, **kwargs) else: (data) = cls._delete_payment_transaction_by_id_with_http_info(payment_transaction_id, **kwargs) return data
Delete PaymentTransaction Delete an instance of PaymentTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_payment_transaction_by_id(payment_transaction_id, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_payment_transaction_by_id(cls, payment_transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_payment_transaction_by_id_with_http_info(payment_transaction_id, **kwargs) else: (data) = cls._get_payment_transaction_by_id_with_http_info(payment_transaction_id, **kwargs) return data
Find PaymentTransaction Return single instance of PaymentTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_payment_transaction_by_id(payment_transaction_id, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to return (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread.
def list_all_payment_transactions(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_payment_transactions_with_http_info(**kwargs) else: (data) = cls._list_all_payment_transactions_with_http_info(**kwargs) return data
List PaymentTransactions Return a list of PaymentTransactions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_payment_transactions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[PaymentTransaction] If the method is called asynchronously, returns the request thread.
def replace_payment_transaction_by_id(cls, payment_transaction_id, payment_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) else: (data) = cls._replace_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) return data
Replace PaymentTransaction Replace all attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to replace (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to replace (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread.
def update_payment_transaction_by_id(cls, payment_transaction_id, payment_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) else: (data) = cls._update_payment_transaction_by_id_with_http_info(payment_transaction_id, payment_transaction, **kwargs) return data
Update PaymentTransaction Update attributes of PaymentTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_payment_transaction_by_id(payment_transaction_id, payment_transaction, async=True) >>> result = thread.get() :param async bool :param str payment_transaction_id: ID of paymentTransaction to update. (required) :param PaymentTransaction payment_transaction: Attributes of paymentTransaction to update. (required) :return: PaymentTransaction If the method is called asynchronously, returns the request thread.
def use(self, obj, parent_form=None): if not isinstance(obj, self.input_classes): raise RuntimeError('{0!s} cannot handle a {1!s}'.format(self.__class__.__name__, obj.__class__.__name__)) self.parent_form = parent_form if self.title is None: self.title = 'file: '+obj.filename self._do_use(obj)
Note: if title is None, will be replaced with obj.filename
def sround(x, precision=0): sr = StochasticRound(precision=precision) return sr.round(x)
Round a single number using default non-deterministic generator. @param x: to round. @param precision: decimal places to round.
def round(self, x): fraction, scaled_x, scale = self._get_fraction(x) if fraction < self.minimum_stochastic_distance or 1-fraction <self.minimum_stochastic_distance: result = round(x,self.precision) else: rounddown = fraction < self.random_generator.random() if rounddown: result = math.floor(scaled_x) / scale else: result = math.ceil(scaled_x) / scale self._record_roundoff_error(x, result) return result
Round the given value. @param x: to round @type x: numeric
def submit_mail(self, send_from, send_to, subject, body, unique_id=None): self.__metadb.update(""" INSERT INTO meta.mail("template", "from", "to", "subject", "body", "attachments", "unique_id") VALUES ('meta', :send_from, :send_to, :subject, :body, null, :unique_id) ON CONFLICT (unique_id) DO NOTHING """, { "send_from": send_from, "send_to": send_to, "subject": subject, "body": body, "unique_id": unique_id })
Добавляем письмо в очередь на отправку :param send_from: Отправитель :param send_to: Получатель :param subject: Тема письма :param body: Тело письма. Можно с HTML :param unique_id: Уникальный идентификатор письма. Обычно что-то вроде md5 + человекочитаемый префикс подходят лучше всего. Письмо с одинаковым unique_id не будет добавлено
def _parse_chord_line(line): ''' Parse a chord line into a `ChordLineData` object. ''' chords = [ TabChord(position=position, chord=chord) for chord, position in Chord.extract_chordpos(line) ] return ChordLineData(chords=chordsf _parse_chord_line(line): ''' Parse a chord line into a `ChordLineData` object. ''' chords = [ TabChord(position=position, chord=chord) for chord, position in Chord.extract_chordpos(line) ] return ChordLineData(chords=chords)
Parse a chord line into a `ChordLineData` object.
def _get_line_type(line): ''' Decide the line type in function of its contents ''' stripped = line.strip() if not stripped: return 'empty' remainder = re.sub(r"\s+", " ", re.sub(CHORD_RE, "", stripped)) if len(remainder) * 2 < len(re.sub(r"\s+", " ", stripped)): return 'chord' return 'lyricf _get_line_type(line): ''' Decide the line type in function of its contents ''' stripped = line.strip() if not stripped: return 'empty' remainder = re.sub(r"\s+", " ", re.sub(CHORD_RE, "", stripped)) if len(remainder) * 2 < len(re.sub(r"\s+", " ", stripped)): return 'chord' return 'lyric'
Decide the line type in function of its contents
def parse_line(line): ''' Parse a line into a `TabLine` object. ''' line = line.rstrip() line_type = _get_line_type(line) return TabLine( type=line_type, data=_DATA_PARSERS[line_type](line), original=line, f parse_line(line): ''' Parse a line into a `TabLine` object. ''' line = line.rstrip() line_type = _get_line_type(line) return TabLine( type=line_type, data=_DATA_PARSERS[line_type](line), original=line, )
Parse a line into a `TabLine` object.
def parse_tablature(lines): ''' Parse a list of lines into a `Tablature`. ''' lines = [parse_line(l) for l in lines] return Tablature(lines=linesf parse_tablature(lines): ''' Parse a list of lines into a `Tablature`. ''' lines = [parse_line(l) for l in lines] return Tablature(lines=lines)
Parse a list of lines into a `Tablature`.
def preview(df,preview_rows = 20):#,preview_max_cols = 0): if preview_rows < 4: preview_rows = 4 preview_rows = min(preview_rows,df.shape[0]) outer = math.floor(preview_rows / 4) return pd.concat([df.head(outer), df[outer:-outer].sample(preview_rows-2*outer), df.tail(outer)])
Returns a preview of a dataframe, which contains both header rows and tail rows.
def get_info(df, verbose = None,max_cols = None, memory_usage = None, null_counts = None): assert type(df) is pd.DataFrame buffer = io.StringIO() df.info(verbose, buffer, max_cols, memory_usage, null_counts) return buffer.getvalue()
Returns the .info() output of a dataframe
def title_line(text): columns = shutil.get_terminal_size()[0] start = columns // 2 - len(text) // 2 output = '='*columns + '\n\n' + \ ' ' * start + str(text) + "\n\n" + \ '='*columns + '\n' return output
Returns a string that represents the text as a title blurb
def PhaseScreens(numTelescope,r0,pupilSize,screenSize=1024,numRealisation=-1): screenGenerators=[ScreenGenerator(screenSize,r0,pupilSize,pupilSize) for i in range(numTelescope)] iter=0 while numRealisation<0 or iter<numRealisation: iter+=1 yield np.array([next(screen) for screen in screenGenerators])
Return a generator for atmospheric wavefront perturbations across a set of *numTelescope* telescopes, each of size *pupilSize* and with Fried parameter *r0*. The perturbations are modelled as being uncorrelated between telescopes. The number of realisations is given by *numRealisation*, but if *numRealisation* is negative then an infinite sequence of realisations is generated.
def RadiusGrid(gridSize): x,y=np.mgrid[0:gridSize,0:gridSize] x = x-(gridSize-1.0)/2.0 y = y-(gridSize-1.0)/2.0 return np.abs(x+1j*y)
Return a square grid with values of the distance from the centre of the grid to each gridpoint
def CircularMaskGrid(gridSize, diameter=None): if diameter is None: diameter=gridSize return np.less_equal(RadiusGrid(gridSize),diameter/2.0)
Return a square grid with ones inside and zeros outside a given diameter circle
def AdaptiveOpticsCorrect(pupils,diameter,maxRadial,numRemove=None): gridSize=pupils.shape[-1] pupilsVector=np.reshape(pupils,(-1,gridSize**2)) zernikes=np.reshape(ZernikeGrid(gridSize,maxRadial,diameter),(-1,gridSize**2)) if numRemove is None: numRemove=zernikes.shape[0] numScreen=pupilsVector.shape[0] normalisation=1.0/np.sum(zernikes[0]) # Note extra iteration to remove residual piston for i in list(range(numRemove))+[0,]: amplitudes=np.inner(zernikes[i],pupilsVector)*normalisation pupilsVector=pupilsVector-zernikes[i]*amplitudes[:,np.newaxis] return np.reshape(pupilsVector,pupils.shape)
Correct a wavefront using Zernike rejection up to some maximal order. Can operate on multiple telescopes in parallel. Note that this version removes the piston mode as well
def FibreMode(gridSize,modeDiameter): rmode=modeDiameter/2 return np.exp(-(RadiusGrid(gridSize)/rmode)**2)/(np.sqrt(np.pi/2)*rmode)
Return a pupil-plane Gaussian mode with 1/e diameter given by *modeDiameter*, normalised so that integral power over the mode is unity
def FibreCouple(pupils,modeDiameter): gridSize=pupils.shape[-1] pupilsVector=np.reshape(pupils,(-1,gridSize**2)) mode=np.reshape(FibreMode(gridSize,modeDiameter),(gridSize**2,)) return np.inner(pupilsVector,mode)
Return the complex amplitudes coupled into a set of fibers
def SingleModeCombine(pupils,modeDiameter=None): if modeDiameter is None: modeDiameter=0.9*pupils.shape[-1] amplitudes=FibreCouple(pupils,modeDiameter) cc=np.conj(amplitudes) fluxes=(amplitudes*cc).real coherentFluxes=[amplitudes[i]*cc[j] for i in range(1,len(amplitudes)) for j in range(i)] return fluxes,coherentFluxes
Return the instantaneous coherent fluxes and photometric fluxes for a multiway single-mode fibre combiner