code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) else: (data) = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) return data
Delete StoreCreditPayment Delete an instance of StoreCreditPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) else: (data) = cls._get_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) return data
Find StoreCreditPayment Return single instance of StoreCreditPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_store_credit_payment_by_id(store_credit_payment_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to return (required) :return: StoreCreditPayment If the method is called asynchronously, returns the request thread.
def list_all_store_credit_payments(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_store_credit_payments_with_http_info(**kwargs) else: (data) = cls._list_all_store_credit_payments_with_http_info(**kwargs) return data
List StoreCreditPayments Return a list of StoreCreditPayments This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_store_credit_payments(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[StoreCreditPayment] If the method is called asynchronously, returns the request thread.
def replace_store_credit_payment_by_id(cls, store_credit_payment_id, store_credit_payment, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs) else: (data) = cls._replace_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs) return data
Replace StoreCreditPayment Replace all attributes of StoreCreditPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to replace (required) :param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to replace (required) :return: StoreCreditPayment If the method is called asynchronously, returns the request thread.
def update_store_credit_payment_by_id(cls, store_credit_payment_id, store_credit_payment, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs) else: (data) = cls._update_store_credit_payment_by_id_with_http_info(store_credit_payment_id, store_credit_payment, **kwargs) return data
Update StoreCreditPayment Update attributes of StoreCreditPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_store_credit_payment_by_id(store_credit_payment_id, store_credit_payment, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to update. (required) :param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to update. (required) :return: StoreCreditPayment If the method is called asynchronously, returns the request thread.
def replaceRule(oldRule, newRule): # type: (Rule, Rule) -> Rule for par in oldRule.from_symbols: par._set_to_rule(newRule) newRule._from_symbols.append(par) for ch in oldRule.to_symbols: ch._set_from_rule(newRule) newRule._to_symbols.append(ch) return newRule
Replace instance of Rule with another one. :param oldRule: Instance in the tree. :param newRule: Instance to replace with. :return: New instance attached to the tree.
def replaceNode(oldNode, newNode): # type: (_RuleConnectable, _RuleConnectable) -> _RuleConnectable if oldNode.from_rule is not None and len(oldNode.from_rule.to_symbols) > 0: indexParent = oldNode.from_rule.to_symbols.index(oldNode) oldNode.from_rule.to_symbols[indexParent] = newNode newNode._set_from_rule(oldNode.from_rule) if oldNode.to_rule is not None and len(oldNode.to_rule.from_symbols) > 0: indexChild = oldNode.to_rule.from_symbols.index(oldNode) oldNode.to_rule._from_symbols[indexChild] = newNode newNode._set_to_rule(oldNode.to_rule) return newNode
Replace instance of Nonterminal or Terminal in the tree with another one. :param oldNode: Old nonterminal or terminal already in the tree. :param newNode: Instance of nonterminal or terminal to replace with. :return: Instance `newNode` in the tree.
def replace(oldEl, newEl): # type: (Union[Rule, _RuleConnectable], Union[Rule, _RuleConnectable]) -> Union[Rule, _RuleConnectable] if isinstance(oldEl, Rule): return Manipulations.replaceRule(oldEl, newEl) if isinstance(oldEl, (Nonterminal, Terminal)): return Manipulations.replaceNode(oldEl, newEl)
Replace element in the parsed tree. Can be nonterminal, terminal or rule. :param oldEl: Element already in the tree. :param newEl: Element to replace with. :return: New element attached to the tree.
def search_authors(self, query): query = query.replace(" ", "+") p = subprocess.Popen("curl -H 'Accept: application/orcid+json' \ 'http://pub.sandbox-1.orcid.org/search/orcid-bio?q=" + query + "&start=0&rows=10'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) jsonResults = "" for line in p.stdout.readlines(): jsonResults = line self.authorsDict = json.loads(jsonResults)
FIXME: Don't create a process to do this!
def nmtoken_from_string(text): text = text.replace('-', '--') return ''.join([(((not char.isalnum() and char not in [ '.', '-', '_', ':' ]) and str(ord(char))) or char) for char in text])
Returns a Nmtoken from a string. It is useful to produce XHTML valid values for the 'name' attribute of an anchor. CAUTION: the function is surjective: 2 different texts might lead to the same result. This is improbable on a single page. Nmtoken is the type that is a mixture of characters supported in attributes such as 'name' in HTML 'a' tag. For example, <a name="Articles%20%26%20Preprints"> should be tranformed to <a name="Articles372037263720Preprints"> using this function. http://www.w3.org/TR/2000/REC-xml-20001006#NT-Nmtoken Also note that this function filters more characters than specified by the definition of Nmtoken ('CombiningChar' and 'Extender' charsets are filtered out).
def escape_html(text, escape_quotes=False): text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') if escape_quotes: text = text.replace('"', '&quot;') text = text.replace("'", '&#39;') return text
Escape all HTML tags, avoiding XSS attacks. < => &lt; > => &gt; & => &amp: @param text: text to be escaped from HTML tags @param escape_quotes: if True, escape any quote mark to its HTML entity: " => &quot; ' => &#39;
def tidy_html(html_buffer, cleaning_lib='utidylib'): if CFG_TIDY_INSTALLED and cleaning_lib == 'utidylib': options = dict(output_xhtml=1, show_body_only=1, merge_divs=0, wrap=0) try: output = str(tidy.parseString(html_buffer, **options)) except: output = html_buffer elif CFG_BEAUTIFULSOUP_INSTALLED and cleaning_lib == 'beautifulsoup': try: output = str(BeautifulSoup(html_buffer).prettify()) except: output = html_buffer else: output = html_buffer return output
Tidy up the input HTML using one of the installed cleaning libraries. @param html_buffer: the input HTML to clean up @type html_buffer: string @param cleaning_lib: chose the preferred library to clean the HTML. One of: - utidylib - beautifulsoup @return: a cleaned version of the input HTML @note: requires uTidylib or BeautifulSoup to be installed. If the chosen library is missing, the input X{html_buffer} is returned I{as is}.
def get_mathjax_header(https=False): if cfg['CFG_MATHJAX_HOSTING'].lower() == 'cdn': if https: mathjax_path = "https://d3eoax9i5htok0.cloudfront.net/mathjax/2.1-latest" else: mathjax_path = "http://cdn.mathjax.org/mathjax/2.1-latest" else: mathjax_path = "/vendors/MathJax" if cfg['CFG_MATHJAX_RENDERS_MATHML']: mathjax_config = "TeX-AMS-MML_HTMLorMML" else: mathjax_config = "TeX-AMS_HTML" return """<script type="text/x-mathjax-config"> MathJax.Hub.Config({ tex2jax: {inlineMath: [['$','$']], processEscapes: true}, showProcessingMessages: false, messageStyle: "none" }); </script> <script src="%(mathjax_path)s/MathJax.js?config=%(mathjax_config)s" type="text/javascript"> </script>""" % { 'mathjax_path': mathjax_path, 'mathjax_config': mathjax_config, }
Return the snippet of HTML code to put in HTML HEAD tag, in order to enable MathJax support. @param https: when using the CDN, whether to use the HTTPS URL rather than the HTTP one. @type https: bool @note: with new releases of MathJax, update this function toghether with $MJV variable in the root Makefile.am
def remove_html_markup(text, replacechar=' ', remove_escaped_chars_p=True): if not remove_escaped_chars_p: return RE_HTML_WITHOUT_ESCAPED_CHARS.sub(replacechar, text) return RE_HTML.sub(replacechar, text)
Remove HTML markup from text. @param text: Input text. @type text: string. @param replacechar: By which character should we replace HTML markup. Usually, a single space or an empty string are nice values. @type replacechar: string @param remove_escaped_chars_p: If True, also remove escaped characters like '&amp;', '&lt;', '&gt;' and '&quot;'. @type remove_escaped_chars_p: boolean @return: Input text with HTML markup removed. @rtype: string
def unescape(s, quote=False): s = s.replace('&lt;', '<') s = s.replace('&gt;', '>') if quote: s = s.replace('&quot;', '"') s = s.replace('&amp;', '&') return s
The opposite of the cgi.escape function. Replace escaped characters '&amp;', '&lt;' and '&gt;' with the corresponding regular characters. If the optional flag quote is true, the escaped quotation mark character ('&quot;') is also translated.
def wash( self, html_buffer, render_unallowed_tags=False, allowed_tag_whitelist=CFG_HTML_BUFFER_ALLOWED_TAG_WHITELIST, automatic_link_transformation=False, allowed_attribute_whitelist=CFG_HTML_BUFFER_ALLOWED_ATTRIBUTE_WHITELIST): self.reset() self.result = '' self.nb = 0 self.previous_nbs = [] self.previous_type_lists = [] self.url = '' self.render_unallowed_tags = render_unallowed_tags self.automatic_link_transformation = automatic_link_transformation self.allowed_tag_whitelist = allowed_tag_whitelist self.allowed_attribute_whitelist = allowed_attribute_whitelist self.feed(html_buffer) self.close() return self.result
Wash HTML buffer, escaping XSS attacks. @param html_buffer: text to escape @param render_unallowed_tags: if True, print unallowed tags escaping < and >. Else, only print content of unallowed tags. @param allowed_tag_whitelist: list of allowed tags @param allowed_attribute_whitelist: list of allowed attributes
def handle_data(self, data): if not self.silent: possible_urls = re.findall( r'(https?://[\w\d:#%/;$()~_?\-=\\\.&]*)', data) # validate possible urls # we'll transform them just in case # they are valid. if possible_urls and self.automatic_link_transformation: for url in possible_urls: if regex_url.search(url): transformed_url = '<a href="%s">%s</a>' % (url, url) data = data.replace(url, transformed_url) self.result += data else: self.result += cgi.escape(data, True)
Function called for text nodes
def handle_endtag(self, tag): if tag.lower() in self.allowed_tag_whitelist: self.result += '</' + tag + '>' else: if self.render_unallowed_tags: self.result += '&lt;/' + cgi.escape(tag) + '&gt;' if tag == 'style' or tag == 'script': self.silent = False
Function called for ending of tags
def handle_startendtag(self, tag, attrs): if tag.lower() in self.allowed_tag_whitelist: self.result += '<' + tag for (attr, value) in attrs: if attr.lower() in self.allowed_attribute_whitelist: self.result += ' %s="%s"' % \ (attr, self.handle_attribute_value(value)) self.result += ' />' else: if self.render_unallowed_tags: self.result += '&lt;' + cgi.escape(tag) for (attr, value) in attrs: self.result += ' %s="%s"' % \ (attr, cgi.escape(value, True)) self.result += ' /&gt;'
Function called for empty tags (e.g. <br />)
def handle_attribute_value(self, value): if self.re_js.match(value) or self.re_vb.match(value): return '' return value
Check attribute. Especially designed for avoiding URLs in the form: javascript:myXSSFunction();
def template_from_filename(filename): ext = filename.split(os.path.extsep)[-1] if not ext in TEMPLATES_MAP: raise ValueError("No template for file extension {}".format(ext)) return TEMPLATES_MAP[ext]
Returns the appropriate template name based on the given file name.
def dt2ts(dt): # Note: no assertion to really keep this fast assert isinstance(dt, (datetime.datetime, datetime.date)) ret = time.mktime(dt.timetuple()) if isinstance(dt, datetime.datetime): ret += 1e-6 * dt.microsecond return ret
Converts to float representing number of seconds since 1970-01-01 GMT.
def dt2str(dt, flagSeconds=True): if isinstance(dt, str): return dt return dt.strftime(_FMTS if flagSeconds else _FMT)
Converts datetime object to str if not yet an str.
def str2dt(s): return datetime.datetime.strptime(s, _FMTS if s.count(":") == 2 else _FMT if s.count(":") == 1 else _FMT0)
Works with time with/without seconds.
def time2seconds(t): return t.hour * 3600 + t.minute * 60 + t.second + float(t.microsecond) / 1e6
Returns seconds since 0h00.
def seconds2time(s): hour, temp = divmod(s, 3600) minute, temp = divmod(temp, 60) temp, second = math.modf(temp) return datetime.time(hour=int(hour), minute=int(minute), second=int(second), microsecond=int(round(temp * 1e6)))
Inverse of time2seconds().
def to_datetime(arg): if isinstance(arg, datetime.datetime): return arg elif arg == 0: return datetime.datetime.now() elif isinstance(arg, str): if arg == "now": arg = datetime.datetime.now() elif arg == "?": arg = datetime.datetime(1970, 1, 1) else: arg = str2dt(arg) elif isinstance(arg, datetime.date): arg = date2datetime(arg) elif isinstance(arg, (int, float)): # Suppose it is a timestamp arg = ts2dt(arg) else: raise TypeError("Wrong type for argument 'arg': {}".format(arg.__class__.__name__)) return arg
Tries to convert any type of argument to datetime Args: arg: datetime, date, or str. If "?", will be converted to 1970-1-1. if 0 or "now", will be converted to datetime.datetime.now()
def list_all_coupons(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_coupons_with_http_info(**kwargs) else: (data) = cls._list_all_coupons_with_http_info(**kwargs) return data
List Coupons Return a list of Coupons This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_coupons(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Coupon] If the method is called asynchronously, returns the request thread.
def sort_points(self, points): new_points = [] z_lookup = {} for z, x, Q in points: z_lookup[z] = (z, x, Q) z_keys = z_lookup.keys() z_keys.sort() for key in z_keys: new_points.append(z_lookup[key]) return new_points
Take points (z,x,q) and sort by increasing z
def Fit(self, zxq): z, trans, Q = zip(*zxq) assert len(trans) == len(z) ndf = len(z) - 3 z = np.array(z) trans = np.array(trans) def dbexpl(t, p): return(p[0] - p[1] * t + p[2] * t ** 2) def residuals(p, data, t): err = data - dbexpl(t, p) return err doc = {} try: assert ndf > 0 p0 = [1, 0, 0] # initial guesses pbest = leastsq(residuals, p0, args=(trans, z), full_output=1) bestparams = pbest[0] good_of_fit = sum(pbest[2]['fvec'] ** 2) good_of_fit = float(good_of_fit / ndf) doc['params'] = list(bestparams) doc['gof'] = good_of_fit except: doc['gof'] = 'FAIL' doc['params'] = [0, 0, 0] return doc
Perform a 2D fit on 2D points then return parameters :param zxq: A list where each element is (z, transverse, charge)
def _get_last_transverse_over_list(self, zxq): z_max = None x_of_interest = None for z, x, q in zxq: if z == None or z > z_max: x_of_interest = x return x_of_interest
Get transverse coord at highest z :param zx: A list where each element is (z, transverse, charge)
def create_cancel_operation(cls, cancel_operation, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_cancel_operation_with_http_info(cancel_operation, **kwargs) else: (data) = cls._create_cancel_operation_with_http_info(cancel_operation, **kwargs) return data
Create CancelOperation Create a new CancelOperation This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_cancel_operation(cancel_operation, async=True) >>> result = thread.get() :param async bool :param CancelOperation cancel_operation: Attributes of cancelOperation to create (required) :return: CancelOperation If the method is called asynchronously, returns the request thread.
def delete_cancel_operation_by_id(cls, cancel_operation_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs) else: (data) = cls._delete_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs) return data
Delete CancelOperation Delete an instance of CancelOperation by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_cancel_operation_by_id(cancel_operation_id, async=True) >>> result = thread.get() :param async bool :param str cancel_operation_id: ID of cancelOperation to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_cancel_operation_by_id(cls, cancel_operation_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs) else: (data) = cls._get_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs) return data
Find CancelOperation Return single instance of CancelOperation by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_cancel_operation_by_id(cancel_operation_id, async=True) >>> result = thread.get() :param async bool :param str cancel_operation_id: ID of cancelOperation to return (required) :return: CancelOperation If the method is called asynchronously, returns the request thread.
def list_all_cancel_operations(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_cancel_operations_with_http_info(**kwargs) else: (data) = cls._list_all_cancel_operations_with_http_info(**kwargs) return data
List CancelOperations Return a list of CancelOperations This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_cancel_operations(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[CancelOperation] If the method is called asynchronously, returns the request thread.
def replace_cancel_operation_by_id(cls, cancel_operation_id, cancel_operation, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs) else: (data) = cls._replace_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs) return data
Replace CancelOperation Replace all attributes of CancelOperation This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_cancel_operation_by_id(cancel_operation_id, cancel_operation, async=True) >>> result = thread.get() :param async bool :param str cancel_operation_id: ID of cancelOperation to replace (required) :param CancelOperation cancel_operation: Attributes of cancelOperation to replace (required) :return: CancelOperation If the method is called asynchronously, returns the request thread.
def update_cancel_operation_by_id(cls, cancel_operation_id, cancel_operation, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs) else: (data) = cls._update_cancel_operation_by_id_with_http_info(cancel_operation_id, cancel_operation, **kwargs) return data
Update CancelOperation Update attributes of CancelOperation This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_cancel_operation_by_id(cancel_operation_id, cancel_operation, async=True) >>> result = thread.get() :param async bool :param str cancel_operation_id: ID of cancelOperation to update. (required) :param CancelOperation cancel_operation: Attributes of cancelOperation to update. (required) :return: CancelOperation If the method is called asynchronously, returns the request thread.
def list_all_geo_zones(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_geo_zones_with_http_info(**kwargs) else: (data) = cls._list_all_geo_zones_with_http_info(**kwargs) return data
List GeoZones Return a list of GeoZones This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_geo_zones(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[GeoZone] If the method is called asynchronously, returns the request thread.
def item_details(item_id, lang="en"): params = {"item_id": item_id, "lang": lang} cache_name = "item_details.%(item_id)s.%(lang)s.json" % params return get_cached("item_details.json", cache_name, params=params)
This resource returns a details about a single item. :param item_id: The item to query for. :param lang: The language to display the texts in. The response is an object with at least the following properties. Note that the availability of some properties depends on the type of the item. item_id (number): The item id. name (string): The name of the item. description (string): The item description. type (string): The item type. level (integer): The required level. rarity (string): The rarity. On of ``Junk``, ``Basic``, ``Fine``, ``Masterwork``, ``Rare``, ``Exotic``, ``Ascended`` or ``Legendary``. vendor_value (integer): The value in coins when selling to a vendor. icon_file_id (string): The icon file id to be used with the render service. icon_file_signature (string): The icon file signature to be used with the render service. game_types (list): The game types where the item is usable. Currently known game types are: ``Activity``, ``Dungeon``, ``Pve``, ``Pvp``, ``PvpLobby`` and ``WvW`` flags (list): Additional item flags. Currently known item flags are: ``AccountBound``, ``HideSuffix``, ``NoMysticForge``, ``NoSalvage``, ``NoSell``, ``NotUpgradeable``, ``NoUnderwater``, ``SoulbindOnAcquire``, ``SoulBindOnUse`` and ``Unique`` restrictions (list): Race restrictions: ``Asura``, ``Charr``, ``Human``, ``Norn`` and ``Sylvari``. Each item type has an `additional key`_ with information specific to that item type. .. _additional key: item-properties.html
def recipe_details(recipe_id, lang="en"): params = {"recipe_id": recipe_id, "lang": lang} cache_name = "recipe_details.%(recipe_id)s.%(lang)s.json" % params return get_cached("recipe_details.json", cache_name, params=params)
This resource returns a details about a single recipe. :param recipe_id: The recipe to query for. :param lang: The language to display the texts in. The response is an object with the following properties: recipe_id (number): The recipe id. type (string): The type of the produced item. output_item_id (string): The item id of the produced item. output_item_count (string): The amount of items produced. min_rating (string): The minimum rating of the recipe. time_to_craft_ms (string): The time it takes to craft the item. disciplines (list): A list of crafting disciplines that can use the recipe. flags (list): Additional recipe flags. Known flags: ``AutoLearned``: Set for recipes that don't have to be discovered. ``LearnedFromItem``: Set for recipes that need a recipe sheet. ingredients (list): A list of objects describing the ingredients for this recipe. Each object contains the following properties: item_id (string): The item id of the ingredient. count (string): The amount of ingredients required.
def lambda_handler(event, context): table = boto3.resource("dynamodb").Table(os.environ['database']) results = table.scan() output = {'success': True, 'indicators': list(), 'indicatorCount': 0} for item in results.get('Items', list()): indicator = item.get('indicator', None) if not indicator: continue output['indicators'].append(indicator) output['indicators'] = list(set(output['indicators'])) output['indicatorCount'] = len(output['indicators']) return output
Main handler.
def requires_indieauth(f): @wraps(f) def decorated(*args, **kwargs): access_token = get_access_token() resp = check_auth(access_token) if isinstance(resp, Response): return resp return f(*args, **kwargs) return decorated
Wraps a Flask handler to require a valid IndieAuth access token.
def check_auth(access_token): if not access_token: current_app.logger.error('No access token.') return deny('No access token found.') request = Request( current_app.config['TOKEN_ENDPOINT'], headers={"Authorization" : ("Bearer %s" % access_token)} ) contents = urlopen(request).read().decode('utf-8') token_data = parse_qs(contents) me = token_data['me'][0] client_id = token_data['client_id'][0] if me is None or client_id is None: current_app.logger.error("Invalid token [%s]" % contents) return deny('Invalid token') me, me_error = check_me(me) if me is None: current_app.logger.error("Invalid `me` value [%s]" % me_error) return deny(me_error) scope = token_data['scope'] if not isinstance(scope, str): scope = scope[0] valid_scopes = ('post','create', ) scope_ = scope.split() scope_valid = any((val in scope_) for val in valid_scopes) if not scope_valid: current_app.logger.error("Scope '%s' does not contain 'post' or 'create'." % scope) return deny("Scope '%s' does not contain 'post' or 'create'." % scope) g.user = { 'me': me, 'client_id': client_id, 'scope': scope, 'access_token': access_token }
This function contacts the configured IndieAuth Token Endpoint to see if the given token is a valid token and for whom.
def connect_db(config): rv = sqlite3.connect(config["database"]["uri"]) rv.row_factory = sqlite3.Row return rv
Connects to the specific database.
def init_db(): with closing(connect_db()) as db: db.cursor().execute("DROP TABLE IF EXISTS entries") db.cursor().execute( """ CREATE TABLE entries ( id INTEGER PRIMARY KEY AUTOINCREMENT, title TEXT NOT NULL, text TEXT NOT NULL ) """ ) db.commit()
Creates the database tables.
def harvest_repo(root_url, archive_path, tag=None, archive_format='tar.gz'): if not git_exists(): raise Exception("Git not found. It probably needs installing.") clone_path = mkdtemp(dir=cfg['CFG_TMPDIR']) git = get_which_git() call([git, 'clone', root_url, clone_path]) chdir(clone_path) if tag: call([git, 'archive', '--format=' + archive_format, '-o', archive_path, tag]) else: call([git, 'archive', '--format=' + archive_format, '-o', archive_path, 'HEAD']) try: rmtree(clone_path) except OSError as e: # Reraise unless ENOENT: No such file or directory # (ok if directory has already been deleted) if e.errno != errno.ENOENT: raise
Archives a specific tag in a specific Git repository. :param root_url: The URL to the Git repo - Supported protocols: git, ssh, http[s]. :param archive_path: A temporary path to clone the repo to - Must end in .git :param tag: The path to which the .tar.gz will go to - Must end in the same as format (NOT inside clone_path) :param format: One of the following: tar.gz / tar / zip
def gregorian_to_julian(day): before_march = 1 if day.month < MARCH else 0 # # Number of months since March # month_index = day.month + MONTHS_PER_YEAR * before_march - MARCH # # Number of years (year starts on March) since 4800 BC # years_elapsed = day.year - JULIAN_START_YEAR - before_march total_days_in_previous_months = (153 * month_index + 2) // 5 total_days_in_previous_years = 365 * years_elapsed total_leap_days = ( (years_elapsed // 4) - (years_elapsed // 100) + (years_elapsed // 400) ) return sum([ day.day, total_days_in_previous_months, total_days_in_previous_years, total_leap_days, -32045, # Offset to get January 1, 4713 equal to 0 ])
Convert a datetime.date object to its corresponding Julian day. :param day: The datetime.date to convert to a Julian day :returns: A Julian day, as an integer
def sun_declination(day): day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal() day_angle = 2 * pi * day_of_year / 365 declination_radians = sum([ 0.006918, 0.001480*sin(3*day_angle), 0.070257*sin(day_angle), 0.000907*sin(2*day_angle), -0.399912*cos(day_angle), -0.006758*cos(2*day_angle), -0.002697*cos(3*day_angle), ]) return degrees(declination_radians)
Compute the declination angle of the sun for the given date. Uses the Spencer Formula (found at http://www.illustratingshadows.com/www-formulae-collection.pdf) :param day: The datetime.date to compute the declination angle for :returns: The angle, in degrees, of the angle of declination
def equation_of_time(day): day_of_year = day.toordinal() - date(day.year, 1, 1).toordinal() # pylint: disable=invalid-name # # Distance Earth moves from solstice to January 1 (so about 10 days) # A = EARTH_ORIBITAL_VELOCITY * (day_of_year + 10) # # Distance Earth moves from solstice to day_of_year # 2 is the number of days from Jan 1 to periheleon # This is the result of a lot of constants collapsing # B = A + 1.914 * sin(radians(EARTH_ORIBITAL_VELOCITY * (day_of_year - 2))) # # Compute "the difference between the angles moved at mean speed, and at # the corrected speed projected onto the equatorial plane, and [divide] by # 180 to get the difference in 'half turns'" # movement_on_equatorial_plane = degrees( atan2( tan(radians(B)), cos(EARTH_AXIS_TILT) ) ) eot_half_turns = (A - movement_on_equatorial_plane) / 180 result = 720 * (eot_half_turns - int(eot_half_turns + 0.5)) return radians(result)
Compute the equation of time for the given date. Uses formula described at https://en.wikipedia.org/wiki/Equation_of_time#Alternative_calculation :param day: The datetime.date to compute the equation of time for :returns: The angle, in radians, of the Equation of Time
def compute_zuhr_utc(day, longitude): eot = equation_of_time(day) # # Formula as described by PrayTime.org doesn't work in Eastern hemisphere # because it expects to be subtracting a negative longitude. +abs() should # do the trick # zuhr_time_utc = 12 + (abs(longitude) / 15) - eot return abs(zuhr_time_utc) % 24
Compute the UTC floating point time for Zuhr given date and longitude. This function is necessary since all other prayer times are based on the time for Zuhr :param day: The day to compute Zuhr adhan for :param longitude: Longitude of the place of interest :returns: The UTC time for Zuhr, as a floating point number in [0, 24)
def compute_time_at_sun_angle(day, latitude, angle): positive_angle_rad = radians(abs(angle)) angle_sign = abs(angle)/angle latitude_rad = radians(latitude) declination = radians(sun_declination(day)) numerator = -sin(positive_angle_rad) - sin(latitude_rad) * sin(declination) denominator = cos(latitude_rad) * cos(declination) time_diff = degrees(acos(numerator/denominator)) / 15 return time_diff * angle_sign
Compute the floating point time difference between mid-day and an angle. All the prayers are defined as certain angles from mid-day (Zuhr). This formula is taken from praytimes.org/calculation :param day: The day to which to compute for :param longitude: Longitude of the place of interest :angle: The angle at which to compute the time :returns: The floating point time delta between Zuhr and the angle, the sign of the result corresponds to the sign of the angle
def time_at_shadow_length(day, latitude, multiplier): latitude_rad = radians(latitude) declination = radians(sun_declination(day)) angle = arccot( multiplier + tan(abs(latitude_rad - declination)) ) numerator = sin(angle) - sin(latitude_rad)*sin(declination) denominator = cos(latitude_rad) * cos(declination) return degrees(acos(numerator/denominator)) / 15
Compute the time at which an object's shadow is a multiple of its length. Specifically, determine the time the length of the shadow is a multiple of the object's length + the length of the object's shadow at noon This is used in the calculation for Asr time. Hanafi uses a multiplier of 2, and everyone else uses a multiplier of 1 Algorithm taken almost directly from PrayTimes.org code :param day: The day which to compute for :param latitude: The latitude of the place of interest :param: multiplier: The multiplier of the object's length :returns: The floating point time delta between Zuhr and the time at which the lenghth of the shadow is as defined
def round(self, x): fraction, scaled_x, scale = self._get_fraction(x) rounddown = fraction < .5 if rounddown: result = math.floor(scaled_x) / scale else: result = math.ceil(scaled_x) / scale self._record_roundoff_error(x, result) return result
Round the given value. @param x: to round @type x: numeric
def parse_range_header(range): ''' Parse a range header as used by the dojo Json Rest store. :param str range: The content of the range header to be parsed. eg. `items=0-9` :returns: A dict with keys start, finish and number or `False` if the range is invalid. ''' match = re.match('^items=([0-9]+)-([0-9]+)$', range) if match: start = int(match.group(1)) finish = int(match.group(2)) if finish < start: finish = start return { 'start': start, 'finish': finish, 'number': finish - start + 1 } else: return Falsf parse_range_header(range): ''' Parse a range header as used by the dojo Json Rest store. :param str range: The content of the range header to be parsed. eg. `items=0-9` :returns: A dict with keys start, finish and number or `False` if the range is invalid. ''' match = re.match('^items=([0-9]+)-([0-9]+)$', range) if match: start = int(match.group(1)) finish = int(match.group(2)) if finish < start: finish = start return { 'start': start, 'finish': finish, 'number': finish - start + 1 } else: return False
Parse a range header as used by the dojo Json Rest store. :param str range: The content of the range header to be parsed. eg. `items=0-9` :returns: A dict with keys start, finish and number or `False` if the range is invalid.
def on(self, event): handler = self._handlers.get(event, None) if not handler: raise ValueError("Unknown event '{}'".format(event)) return handler.register
Returns a wrapper for the given event. Usage: @dispatch.on("my_event") def handle_my_event(foo, bar, baz): ...
def register(self, event, keys): if self.running: raise RuntimeError("Can't register while running") handler = self._handlers.get(event, None) if handler is not None: raise ValueError("Event {} already registered".format(event)) self._handlers[event] = EventHandler(event, keys, loop=self.loop)
Register a new event with available keys. Raises ValueError when the event has already been registered. Usage: dispatch.register("my_event", ["foo", "bar", "baz"])
def unregister(self, event): if self.running: raise RuntimeError("Can't unregister while running") self._handlers.pop(event, None)
Remove all registered handlers for an event. Silent return when event was not registered. Usage: dispatch.unregister("my_event") dispatch.unregister("my_event") # no-op
async def trigger(self, event, kwargs): await self._queue.put((event, kwargs)) self._resume_processing.set()
Enqueue an event for processing
async def _task(self): if self._handlers.values(): start_tasks = [h.start() for h in self._handlers.values()] await asyncio.wait(start_tasks, loop=self.loop) while self.running: if self.events: event, kwargs = await self._queue.get() handler = self._handlers.get(event, None) if handler: handler(kwargs) else: # Resume on either the next `trigger` call or a `stop` await self._resume_processing.wait() self._resume_processing.clear() # Give all the handlers a chance to complete their pending tasks tasks = [handler.stop() for handler in self._handlers.values()] if tasks: await asyncio.wait(tasks, loop=self.loop) # Let the shutdown process continue await self._complete_shutdown()
Main queue processor
def fmultiprocess( log, function, inputArray, poolSize=False, timeout=3600, **kwargs): log.debug('starting the ``multiprocess`` function') # DEFINTE POOL SIZE - NUMBER OF CPU CORES TO USE (BEST = ALL - 1) if not poolSize: poolSize = psutil.cpu_count() if poolSize: p = Pool(processes=poolSize) else: p = Pool() cpuCount = psutil.cpu_count() chunksize = int((len(inputArray) + 1) / (cpuCount * 3)) if chunksize == 0: chunksize = 1 # MAP-REDUCE THE WORK OVER MULTIPLE CPU CORES if "log" in inspect.getargspec(function)[0]: mapfunc = partial(function, log=log, **kwargs) resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize) else: mapfunc = partial(function, **kwargs) resultArray = p.map_async(mapfunc, inputArray, chunksize=chunksize) resultArray = resultArray.get(timeout=timeout) p.close() p.terminate() log.debug('completed the ``multiprocess`` function') return resultArray
multiprocess pool **Key Arguments:** - ``log`` -- logger - ``function`` -- the function to multiprocess - ``inputArray`` -- the array to be iterated over - ``poolSize`` -- limit the number of CPU that are used in multiprocess job - ``timeout`` -- time in sec after which to raise a timeout error if the processes have not completed **Return:** - ``resultArray`` -- the array of results **Usage:** .. code-block:: python from fundamentals import multiprocess # DEFINE AN INPUT ARRAY inputArray = range(10000) results = multiprocess(log=log, function=functionName, poolSize=10, timeout=300, inputArray=inputArray, otherFunctionKeyword="cheese")
def restriction(lam, mu, orbitals, U, beta): return 2*orbitals*fermi_dist(-(mu + lam), beta) - expected_filling(-1*lam, orbitals, U, beta)
Equation that determines the restriction on lagrange multipier
def pressision_try(orbitals, U, beta, step): mu, lam = main(orbitals, U, beta, step) mu2, lam2 = linspace(0, U*orbitals, step), zeros(step) for i in range(99): lam2[i+1] = fsolve(restriction, lam2[i], (mu2[i+1], orbitals, U, beta)) plot(mu2, 2*orbitals*fermi_dist(-(mu2+lam2), beta), label='Test guess') legend(loc=0)
perform a better initial guess of lambda no improvement
def get_python_logger(): global _python_logger if _python_logger is None: fn = "a99.log" l = logging.Logger("a99", level=a99.logging_level) if a99.flag_log_file: add_file_handler(l, fn) if a99.flag_log_console: ch = logging.StreamHandler() ch.setFormatter(_fmtr) l.addHandler(ch) _python_logger = l for line in a99.format_box("a99 logging session started @ {}".format(a99.now_str())): l.info(line) if a99.flag_log_file: l.info("$ Logging to console $") if a99.flag_log_file: l.info("$ Logging to file '{}' $".format(fn)) return _python_logger
Returns logger to receive Python messages (as opposed to Fortran). At first call, _python_logger is created. At subsequent calls, _python_logger is returned. Therefore, if you want to change `a99.flag_log_file` or `a99.flag_log_console`, do so before calling get_python_logger(), otherwise these changes will be ineffective.
def add_file_handler(logger, logFilename=None): assert isinstance(logger, logging.Logger) ch = logging.FileHandler(logFilename, "a") # ch.setFormatter(logging._defaultFormatter) # todo may change to have same formatter as last handler of logger ch.setFormatter(_fmtr) logger.addHandler(ch)
Adds file handler to logger. File is opened in "a" mode (append)
def symmetric_difference_update(self, other): # type: (Iterable[Any]) -> _BaseSet intersect = self.intersection(other) self.remove(*intersect) for elem in set(other).difference(intersect): self.add(elem) return self
Update the TerminalSet. Keep elements from self and other, but discard elements that are in both. :param other: Iterable object with elements to compare with. :return: Current instance with updated state.
def spin_z(particles, index): mat = np.zeros((2**particles, 2**particles)) for i in range(2**particles): ispin = btest(i, index) if ispin == 1: mat[i, i] = 1 else: mat[i, i] = -1 return 1/2.*mat
Generates the spin_z projection operator for a system of N=particles and for the selected spin index name. where index=0..N-1
def spin_gen(particles, index, gauge=1): mat = np.zeros((2**particles, 2**particles)) flipper = 2**index for i in range(2**particles): ispin = btest(i, index) if ispin == 1: mat[i ^ flipper, i] = 1 else: mat[i ^ flipper, i] = gauge return mat
Generates the generic spin operator in z basis for a system of N=particles and for the selected spin index name. where index=0..N-1 The gauge term sets the behavoir for a system away from half-filling
def insert(self, val, pipe=None): p = self.redis.pipeline() if pipe is None else pipe try: key, token, formatted_key, formatted_token = self.next_formatted_pair() p.watch(formatted_key, formatted_token) # Make this atomic p.multi() # Associate both the value and token with the key to # allow `get_token(key)` p.hsetnx(formatted_key, 'value', val) p.hsetnx(formatted_key, 'token', token) p.setnx(formatted_token, key) if pipe is None: results = p.execute() if not results[-2] or not results[-3]: raise KeyInsertError(key, 'key exists') if not results[-1]: raise TokenInsertError(token, 'token exists') return Pair(key, token) except WatchError: raise finally: if pipe is None: p.reset()
\ Inserts a value and returns a :class:`Pair <shorten.Pair>`. .. admonition :: Key Safety Keys and tokens are always inserted with a :class:`Pipeline`, so irrevocable keys will never occur. If `pipe` is given, :class:`KeyInsertError <shorten.KeyInsertError>` and :class:`TokenInsertError <shorten.TokenInsertError>` will not be thrown if duplicate keys and tokens exist. Instead, the nth-from-last results must be checked: :: pipe = redis.pipeline() key, token = short.insert('value', pipe) results = pipe.execute() if not results[-2]: raise KeyInsertError(key) if not results[-1]: raise TokenInsertError(token) :attr val: a value to insert. :attr pipe: a Redis pipeline. If `None`, the pair will be returned immediately. Otherwise they must be extracted from the pipeline results (see above).
def revoke(self, token, pipe=None): p = self.redis.pipeline() if pipe is None else pipe formatted_token = self.format_token(token) try: p.watch(formatted_token) # Get the key immediately key = p.get(formatted_token) formatted_key = self.format_key(key) # Make this atomic p.multi() p.delete(formatted_key, formatted_token) if pipe is None: if not p.execute()[-1]: raise RevokeError(token, 'token not found') except WatchError: raise finally: if pipe is None: p.reset()
\ Revokes the key associated with the given revokation token. If the token does not exist, a :class:`KeyError <KeyError>` is thrown. Otherwise `None` is returned. If `pipe` is given, then a :class:`RevokeError <shorten.RevokeError>` will not be thrown if the key does not exist. The n-th from last result should be checked like so: :: pipe = redis.Pipeline() store.revoke(token, pipe=pipe) results = pipe.execute() if not results[-1]: raise RevokeError(token) :param pipe: a Redis pipeline. If `None`, the token will be revoked immediately. Otherwise they must be extracted from the pipeline results (see above).
async def oauth(request): provider = request.match_info.get('provider') client, _ = await app.ps.oauth.login(provider, request) user, data = await client.user_info() response = ( "<a href='/'>back</a><br/><br/>" "<ul>" "<li>ID: {u.id}</li>" "<li>Username: {u.username}</li>" "<li>First, last name: {u.first_name}, {u.last_name}</li>" "<li>Email: {u.email}</li>" "<li>Link: {u.link}</li>" "<li>Picture: {u.picture}</li>" "<li>Country, city: {u.country}, {u.city}</li>" "</ul>" ).format(u=user) response += "<code>%s</code>" % html.escape(repr(data)) return response
Oauth example.
def parse(representation): representation = str(representation).upper().strip() if '/' in representation: return parse_interval(representation) if representation[0] is 'P': return parse_duration(representation) return parse_date(representation)
Attempts to parse an ISO8601 formatted ``representation`` string, which could be of any valid ISO8601 format (date, time, duration, interval). Return value is specific to ``representation``.
def BeginOfEventAction(self, event): self.log.info("Simulating event %s", event.GetEventID()) self.sd.setEventNumber(event.GetEventID())
Save event number
def EndOfEventAction(self, event): self.log.debug('Processesing simulated event %d', event.GetEventID()) docs = self.sd.getDocs() self.sd.clearDocs() for processor in self.processors: docs = processor.process(docs) if not docs: self.log.warning('%s did not return documents in process()!', processor.__class__.__name__)
At the end of an event, grab sensitive detector hits then run processor loop
def setup( self): return self.arguments, self.settings, self.log, self.dbConn
**Summary:** *setup the attributes and return*
def _checkServer(self, address, port): # CREATE A TCP SOCKET import socket s = socket.socket() try: s.connect((address, port)) return True except socket.error, e: self.log.warning( """Connection to `%(address)s` on port `%(port)s` failed - try again: %(e)s""" % locals()) return False return None
*Check that the TCP Port we've decided to use for tunnelling is available*
def run(self): if KSER_METRICS_ENABLED == "yes": from prometheus_client import start_http_server logger.info("Metric.Starting...") start_http_server( os.getenv("KSER_METRICS_PORT", 8888), os.getenv("KSER_METRICS_ADDRESS", "0.0.0.0") ) logger.info("{}.Starting...".format(self.__class__.__name__)) while True: if self.is_active() is True: msg = next(self.client) data = msg.value.decode('utf-8') if self.client.config['enable_auto_commit'] is False: self.client.commit() logger.debug("{}: Manual commit done.".format( self.__class__.__name__ )) self.REGISTRY.run(data) else: logger.warning("Consumer is paused") time.sleep(60)
Run consumer
def generate_s3_bucket(): logger.debug("[#] Setting up S3 bucket") client = boto3.client("s3", region_name=PRIMARY_REGION) buckets = client.list_buckets() matches = [x for x in buckets.get('Buckets', list()) if x['Name'].startswith(S3_BUCKET)] if len(matches) > 0: logger.debug("[*] Bucket already exists") return matches.pop() response = client.create_bucket( Bucket=S3_BUCKET, CreateBucketConfiguration={ 'LocationConstraint': PRIMARY_REGION } ) logger.info("[#] Successfully setup the S3 bucket") return response
Create the blockade bucket if not already there.
def remove_s3_bucket(): logger.debug("[#] Removing S3 bucket") client = boto3.client("s3", region_name=PRIMARY_REGION) buckets = client.list_buckets() matches = [x for x in buckets.get('Buckets', list()) if x['Name'].startswith(S3_BUCKET_NAME)] if len(matches) == 0: return match = matches.pop()['Name'] try: response = client.list_objects_v2( Bucket=match, ) except client.exceptions.NoSuchBucket: logger.info("[!] S3 bucket already deleted") return True while response['KeyCount'] > 0: logger.debug('[*] Deleting %d objects from bucket %s' % (len(response['Contents']), match)) response = client.delete_objects( Bucket=match, Delete={ 'Objects': [{'Key': obj['Key']} for obj in response['Contents']] } ) response = client.list_objects_v2( Bucket=match, ) logger.debug('[#] Deleting bucket %s' % match) response = client.delete_bucket( Bucket=match ) logger.info("[#] Successfully deleted the S3 bucket") return response
Remove the Blockade bucket.
def generate_dynamodb_tables(): logger.debug("[#] Setting up DynamoDB tables") client = boto3.client('dynamodb', region_name=PRIMARY_REGION) existing_tables = client.list_tables()['TableNames'] responses = list() for label in DYNAMODB_TABLES: if label in existing_tables: logger.debug("[*] Table %s already exists" % (label)) continue kwargs = { 'TableName': label, 'ProvisionedThroughput': { 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } } kwargs.update(DYNAMODB_SCHEMAS[label]) response = client.create_table(**kwargs) responses.append(response) logger.debug("[#] Successfully setup DynamoDB table %s" % (label)) logger.info("[#] Successfully setup DynamoDB tables") return responses
Create the Blockade DynamoDB tables.
def remove_dynamodb_tables(): logger.debug("[#] Removing DynamoDB tables") client = boto3.client('dynamodb', region_name=PRIMARY_REGION) responses = list() for label in DYNAMODB_TABLES: logger.debug("[*] Removing %s table" % (label)) try: response = client.delete_table( TableName=label ) except client.exceptions.ResourceNotFoundException: logger.info("[!] Table %s already removed" % (label)) continue responses.append(response) logger.debug("[*] Removed %s table" % (label)) logger.info("[#] Successfully removed DynamoDB tables") return responses
Remove the Blockade DynamoDB tables.
def generate_lambda_functions(): logger.debug("[#] Setting up the Lambda functions") aws_lambda = boto3.client('lambda', region_name=PRIMARY_REGION) functions = aws_lambda.list_functions().get('Functions') existing_funcs = [x['FunctionName'] for x in functions] iam = boto3.resource('iam') account_id = iam.CurrentUser().arn.split(':')[4] responses = list() for label in LAMBDA_FUNCTIONS: if label in existing_funcs: logger.debug("[*] Lambda function %s already exists" % (label)) continue dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path = dir_path.replace('/cli', '/aws') kwargs = { 'Runtime': 'python2.7', 'Role': 'arn:aws:iam::{0}:role/{1}'.format(account_id, BLOCKADE_ROLE), 'Timeout': 3, 'MemorySize': 128, 'Publish': True, 'Code': { 'ZipFile': open("{0}/lambda-zips/{1}.zip".format(dir_path, label), 'rb').read() } } kwargs.update(LAMBDA_SCHEMA[label]) logger.debug("[#] Setting up the %s Lambda function" % (label)) response = aws_lambda.create_function(**kwargs) responses.append(response) logger.debug("[#] Successfully setup Lambda function %s" % (label)) logger.info("[#] Successfully setup Lambda functions") return responses
Create the Blockade lambda functions.
def remove_lambda_functions(): logger.debug("[#] Removing the Lambda functions") client = boto3.client('lambda', region_name=PRIMARY_REGION) responses = list() for label in LAMBDA_FUNCTIONS: try: response = client.delete_function( FunctionName=label, ) except client.exceptions.ResourceNotFoundException: logger.info("[!] Function %s already removed" % (label)) continue responses.append(response) logger.debug("[*] Removed %s function" % (label)) logger.info("[#] Successfully removed Lambda functions") return responses
Remove the Blockade Lambda functions.
def generate_api_gateway(): logger.debug("[#] Setting up the API Gateway") client = boto3.client('apigateway', region_name=PRIMARY_REGION) matches = [x for x in client.get_rest_apis().get('items', list()) if x['name'] == API_GATEWAY] if len(matches) > 0: logger.debug("[#] API Gateway already setup") return matches.pop() response = client.create_rest_api( name=API_GATEWAY, description='REST-API to power the Blockade service' ) logger.info("[#] Successfully setup the API Gateway") return response
Create the Blockade API Gateway REST service.
def generate_admin_resource(): logger.debug("[#] Setting up the admin resource") client = boto3.client('apigateway', region_name=PRIMARY_REGION) existing = get_api_gateway_resource("admin") if existing: logger.debug("[#] API admin resource already created") return True matches = [x for x in client.get_rest_apis().get('items', list()) if x['name'] == API_GATEWAY] match = matches.pop() resource_id = get_api_gateway_resource('/') response = client.create_resource( restApiId=match.get('id'), parentId=resource_id, pathPart='admin' ) logger.info("[#] Successfully setup the admin resource") return response
Create the Blockade admin resource for the REST services.
def get_api_gateway_resource(name): client = boto3.client('apigateway', region_name=PRIMARY_REGION) matches = [x for x in client.get_rest_apis().get('items', list()) if x['name'] == API_GATEWAY] match = matches.pop() resources = client.get_resources(restApiId=match.get('id')) resource_id = None for item in resources.get('items', list()): if item.get('pathPart', '/') != name: continue resource_id = item['id'] return resource_id
Get the resource associated with our gateway.
def remove_api_gateway(): logger.debug("[#] Removing API Gateway") client = boto3.client('apigateway', region_name=PRIMARY_REGION) matches = [x for x in client.get_rest_apis().get('items', list()) if x['name'] == API_GATEWAY] if len(matches) == 0: logger.info("[!] API Gateway already removed") return True match = matches.pop() response = client.delete_rest_api( restApiId=match.get('id') ) logger.info("[#] Removed API Gateway") return response
Remove the Blockade REST API service.
def method_delegate(**methods): methods = {k.upper(): v for k, v in iteritems(methods)} if PY3: methods = {k.encode("utf-8"): v for k, v in iteritems(methods)} def render(request): renderer = methods.get(request.method) if renderer is None: return Response(code=405) return renderer(request) return render
Construct a renderer that delegates based on the request's HTTP method.
def traverse(path, request, resource): path = path.lstrip(b"/") for component in path and path.split(b"/"): if getattr(resource, "is_leaf", False): break resource = resource.get_child(name=component, request=request) return resource
Traverse a root resource, retrieving the appropriate child for the request.
def every_other(pipe, how_many=1): ''' feeding this function a pipe yields every other (or how ever many) objects you want at a time. ''' for i,x in zip(pipe, cycle(repeater([True,False], how_many))): if x: yield f every_other(pipe, how_many=1): ''' feeding this function a pipe yields every other (or how ever many) objects you want at a time. ''' for i,x in zip(pipe, cycle(repeater([True,False], how_many))): if x: yield i
feeding this function a pipe yields every other (or how ever many) objects you want at a time.
def create_address(cls, address, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_address_with_http_info(address, **kwargs) else: (data) = cls._create_address_with_http_info(address, **kwargs) return data
Create Address Create a new Address This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_address(address, async=True) >>> result = thread.get() :param async bool :param Address address: Attributes of address to create (required) :return: Address If the method is called asynchronously, returns the request thread.
def delete_address_by_id(cls, address_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_address_by_id_with_http_info(address_id, **kwargs) else: (data) = cls._delete_address_by_id_with_http_info(address_id, **kwargs) return data
Delete Address Delete an instance of Address by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_address_by_id(address_id, async=True) >>> result = thread.get() :param async bool :param str address_id: ID of address to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_address_by_id(cls, address_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_address_by_id_with_http_info(address_id, **kwargs) else: (data) = cls._get_address_by_id_with_http_info(address_id, **kwargs) return data
Find Address Return single instance of Address by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_address_by_id(address_id, async=True) >>> result = thread.get() :param async bool :param str address_id: ID of address to return (required) :return: Address If the method is called asynchronously, returns the request thread.
def list_all_addresses(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_addresses_with_http_info(**kwargs) else: (data) = cls._list_all_addresses_with_http_info(**kwargs) return data
List Addresses Return a list of Addresses This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_addresses(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Address] If the method is called asynchronously, returns the request thread.
def replace_address_by_id(cls, address_id, address, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_address_by_id_with_http_info(address_id, address, **kwargs) else: (data) = cls._replace_address_by_id_with_http_info(address_id, address, **kwargs) return data
Replace Address Replace all attributes of Address This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_address_by_id(address_id, address, async=True) >>> result = thread.get() :param async bool :param str address_id: ID of address to replace (required) :param Address address: Attributes of address to replace (required) :return: Address If the method is called asynchronously, returns the request thread.
def update_address_by_id(cls, address_id, address, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_address_by_id_with_http_info(address_id, address, **kwargs) else: (data) = cls._update_address_by_id_with_http_info(address_id, address, **kwargs) return data
Update Address Update attributes of Address This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_address_by_id(address_id, address, async=True) >>> result = thread.get() :param async bool :param str address_id: ID of address to update. (required) :param Address address: Attributes of address to update. (required) :return: Address If the method is called asynchronously, returns the request thread.
def escape_shell_arg(shell_arg): if isinstance(shell_arg, six.text_type): msg = "ERROR: escape_shell_arg() expected string argument but " \ "got '%s' of type '%s'." % (repr(shell_arg), type(shell_arg)) raise TypeError(msg) return "'%s'" % shell_arg.replace("'", r"'\''")
Escape shell argument shell_arg by placing it within single-quotes. Any single quotes found within the shell argument string will be escaped. @param shell_arg: The shell argument to be escaped. @type shell_arg: string @return: The single-quote-escaped value of the shell argument. @rtype: string @raise TypeError: if shell_arg is not a string. @see: U{http://mail.python.org/pipermail/python-list/2005-October/346957.html}
def retry_mkstemp(suffix='', prefix='tmp', directory=None, max_retries=3): if directory is None: directory = current_app.config['CFG_TMPSHAREDDIR'] for retry_count in range(1, max_retries + 1): try: tmp_file_fd, tmp_file_name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=directory) except OSError as e: if e.errno == 19 and retry_count <= max_retries: # AFS Glitch? time.sleep(10) else: raise else: break return tmp_file_fd, tmp_file_name
Make mkstemp more robust against AFS glitches.
def split_cli_ids_arg(value): def parse(el): el = el.strip() if not el: ret = [] elif '-' in el: start, end = el.split('-', 1) ret = xrange(int(start), int(end) + 1) else: ret = [int(el)] return ret return set(chain(*(parse(c) for c in value.split(',') if c.strip())))
Split ids given in the command line Possible formats are: * 1 * 1,2,3,4 * 1-5,20,30,40 Returns respectively * set([1]) * set([1,2,3,4]) * set([1,2,3,4,5,20,30,40])
def get_declared_fields(bases, attrs, cls_filter, with_base_fields=True, extra_attr_name='base_fields'): fields = [(field_name, attrs.pop(field_name))\ for field_name, obj in attrs.items()\ if isinstance(obj, cls_filter)] fields.sort(key=lambda x: x[1].creation_counter) # If this class is subclassing another Form, add that Form's fields. # Note that we loop over the bases in *reverse*. This is necessary in # order to preserve the correct order of fields. if with_base_fields: for base in reversed(bases): if hasattr(base, extra_attr_name): fields = getattr(base, extra_attr_name).items() + fields else: for base in reversed(bases): if hasattr(base, 'declared_fields'): fields = base.declared_fields.items() + fields return SortedDict(fields)
Create a list of form field instances from the passed in 'attrs', plus any similar fields on the base classes (in 'bases'). This is used by both the Form and ModelForm metclasses. If 'with_base_fields' is True, all fields from the bases are used. Otherwise, only fields in the 'declared_fields' attribute on the bases are used. The distinction is useful in ModelForm subclassing. Also integrates any additional media definitions
def declarative_fields(cls_filter, meta_base=type, extra_attr_name='base_fields'): def __new__(cls, name, bases, attrs): attrs[extra_attr_name] = fields = get_declared_fields(bases, attrs, cls_filter, extra_attr_name=extra_attr_name) attrs[extra_attr_name + '_names'] = set(fields.keys()) new_class = meta_base.__new__(cls, name, bases, attrs) return new_class return type('', (meta_base,), {'__new__': __new__})
Metaclass that converts Field attributes to a dictionary called 'base_fields', taking into account parent class 'cls_filter'.