code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get(self): url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug("Making request to slick at url %s", url) r = requests.get(url) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url)
Get the specified object from slick. You specify which one you want by providing the id as a parameter to the parent object. Example: slick.projects("4fd8cd95e4b0ee7ba54b9885").get()
def update(self): obj = self.data url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out last_stats_code = None last_body = None for retry in range(3): try: json_data = obj.to_json() self.logger.debug("Making request to slick at url %s, with data: %s", url, json_data) r = requests.put(url, data=json_data, headers=json_content) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: last_stats_code = r.status_code last_body = r.text self.logger.warn("Slick status code: %d", r.status_code) self.logger.warn("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) traceback.print_exc() raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code. Last status code: %d, body: %s", url, last_stats_code, last_body)
Update the specified object from slick. You specify the object as a parameter, using the parent object as a function. Example: proj = slick.projects.findByName("foo") ... update proj here slick.projects(proj).update()
def create(self): obj = self.data self.data = None url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: json_data = obj.to_json() self.logger.debug("Making request to slick at url %s, with data: %s", url, json_data) r = requests.post(url, data=json_data, headers=json_content) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return self.model.from_dict(r.json()) else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url)
Create the specified object (perform a POST to the api). You specify the object as a parameter, using the parent object as a function. Example: proj = Project() ... add project data here proj = slick.projects(proj).create()
def remove(self): url = self.getUrl() # hopefully when we discover what problems exist in slick to require this, we can take the loop out for retry in range(3): try: self.logger.debug("Making DELETE request to slick at url %s", url) r = requests.delete(url) self.logger.debug("Request returned status code %d", r.status_code) if r.status_code is 200: return None else: self.logger.debug("Body of what slick returned: %s", r.text) except BaseException as error: self.logger.warn("Received exception while connecting to slick at %s", url, exc_info=sys.exc_info()) raise SlickCommunicationError( "Tried 3 times to request data from slick at url %s without a successful status code.", url)
Remove or delete the specified object from slick. You specify which one you want by providing the id as a parameter to the parent object, using it as a function. Example: slick.projects("4fd8cd95e4b0ee7ba54b9885").remove()
def upload_local_file(self, local_file_path, file_obj=None): if file_obj is None and not os.path.exists(local_file_path): return storedfile = StoredFile() storedfile.mimetype = mimetypes.guess_type(local_file_path)[0] storedfile.filename = os.path.basename(local_file_path) if file_obj is None: storedfile.length = os.stat(local_file_path).st_size else: file_obj.seek(0,os.SEEK_END) storedfile.length = file_obj.tell() file_obj.seek(0) storedfile = self(storedfile).create() md5 = hashlib.md5() url = self(storedfile).getUrl() + "/addchunk" if file_obj is None: with open(local_file_path, 'rb') as filecontents: upload_chunks(url, storedfile, filecontents) else: upload_chunks(url, storedfile, file_obj) return self(storedfile).update()
Create a Stored File and upload it's data. This is a one part do it all type method. Here is what it does: 1. "Discover" information about the file (mime-type, size) 2. Create the stored file object in slick 3. Upload (chunked) all the data in the local file 4. re-fetch the stored file object from slick, and return it
def lookup_cc_partner(nu_pid): neutrino_type = math.fabs(nu_pid) assert neutrino_type in [12, 14, 16] cc_partner = neutrino_type - 1 # get e, mu, tau cc_partner = math.copysign( cc_partner, nu_pid) # make sure matter/antimatter cc_partner = int(cc_partner) # convert to int return cc_partner
Lookup the charge current partner Takes as an input neutrino nu_pid is a PDG code, then returns the charged lepton partner. So 12 (nu_e) returns 11. Keeps sign
def _set_vector_value(self, var_name, value): self.particle[var_name] = convert_3vector_to_dict(value) for coord in self.particle[var_name].keys(): new_value = Distribution(self.particle[var_name][coord]) self.particle[var_name][coord] = new_value
Private
def block_comment(solver, start, end): '''embedable block comment''' text, pos = solver.parse_state length = len(text) startlen = len(start) endlen = len(end) if pos==length: return if not text[pos:].startswith(start): return level = 1 p = pos+1 while p<length: if text[p:].startswith(end): level -= 1 p += endlen if level==0: break elif text[p:].startswith(start): level += 1 p += startlen else: p += 1 else: return solver.parse_state = text, p yield cont, text[pos:p] solver.parse_state = text, pof block_comment(solver, start, end): '''embedable block comment''' text, pos = solver.parse_state length = len(text) startlen = len(start) endlen = len(end) if pos==length: return if not text[pos:].startswith(start): return level = 1 p = pos+1 while p<length: if text[p:].startswith(end): level -= 1 p += endlen if level==0: break elif text[p:].startswith(start): level += 1 p += startlen else: p += 1 else: return solver.parse_state = text, p yield cont, text[pos:p] solver.parse_state = text, pos
embedable block comment
def formatException(self, record): if record.exc_info is None: return {} (exc_type, exc_message, trace) = record.exc_info return { 'e': { 'class': str(type(exc_type).__name__), # ZeroDivisionError 'message': str(exc_message), # integer division or modulo by zero 'trace': list(traceback.format_tb(trace)), } }
Format and return the specified exception information as a string. :type record logging.LogRecord :rtype: dict
def insert(self, val): key, token, formatted_key, formatted_token = self.next_formatted_pair() if self.has_key(key): raise KeyInsertError(key) if self.has_token(token): raise TokenInsertError(token) # Memcache is down or read-only if not self._mc.add(formatted_key, (val, token)): raise KeyInsertError(key, 'key could not be stored') if not self._mc.add(formatted_token, key): raise TokenInsertError(token, 'token could not be stored') return Pair(key, token)
\ Inserts a value and returns a :class:`Pair <Pair>`. If the generated key exists or memcache cannot store it, a :class:`KeyInsertError <shorten.KeyInsertError>` is raised (or a :class:`TokenInsertError <shorten.TokenInsertError>` if a token exists or cannot be stored).
def pip_install(*args): ''' Run pip install ... Explicitly ignores user's config. ''' pip_cmd = os.path.join(os.path.dirname(sys.executable), 'pip') with set_env('PIP_CONFIG_FILE', os.devnull): cmd = [pip_cmd, 'install'] + list(args) print_command(cmd) subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderrf pip_install(*args): ''' Run pip install ... Explicitly ignores user's config. ''' pip_cmd = os.path.join(os.path.dirname(sys.executable), 'pip') with set_env('PIP_CONFIG_FILE', os.devnull): cmd = [pip_cmd, 'install'] + list(args) print_command(cmd) subprocess.call(cmd, stdout=sys.stdout, stderr=sys.stderr)
Run pip install ... Explicitly ignores user's config.
def indent_text(text, nb_tabs=0, tab_str=" ", linebreak_input="\n", linebreak_output="\n", wrap=False): r if not wrap: lines = text.split(linebreak_input) tabs = nb_tabs * tab_str output = "" for line in lines: output += tabs + line + linebreak_output return output else: return wrap_text_in_a_box(body=text, style='no_border', tab_str=tab_str, tab_num=nb_tabs)
r"""Add tabs to each line of text. :param text: the text to indent :param nb_tabs: number of tabs to add :param tab_str: type of tab (could be, for example "\t", default: 2 spaces :param linebreak_input: linebreak on input :param linebreak_output: linebreak on output :param wrap: wethever to apply smart text wrapping. (by means of wrap_text_in_a_box) :return: indented text as string
def wait_for_user(msg=""): if '--yes-i-know' in sys.argv: return print(msg) try: answer = raw_input("Please confirm by typing 'Yes, I know!': ") except KeyboardInterrupt: print() answer = '' if answer != 'Yes, I know!': sys.stderr.write("ERROR: Aborted.\n") sys.exit(1) return
Print MSG and a confirmation prompt. Waiting for user's confirmation, unless silent '--yes-i-know' command line option was used, in which case the function returns immediately without printing anything.
def guess_minimum_encoding(text, charsets=('ascii', 'latin1', 'utf8')): text_in_unicode = text.decode('utf8', 'replace') for charset in charsets: try: return (text_in_unicode.encode(charset), charset) except (UnicodeEncodeError, UnicodeDecodeError): pass return (text_in_unicode.encode('utf8'), 'utf8')
Try to guess the minimum charset that is able to represent. Try to guess the minimum charset that is able to represent the given text using the provided charsets. text is supposed to be encoded in utf8. Returns (encoded_text, charset) where charset is the first charset in the sequence being able to encode text. Returns (text_in_utf8, 'utf8') in case no charset is able to encode text. @note: If the input text is not in strict UTF-8, then replace any non-UTF-8 chars inside it.
def encode_for_xml(text, wash=False, xml_version='1.0', quote=False): text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') if quote: text = text.replace('"', '&quot;') if wash: text = wash_for_xml(text, xml_version=xml_version) return text
Encode special characters in a text so that it would be XML-compliant. :param text: text to encode :return: an encoded text
def wash_for_xml(text, xml_version='1.0'): if xml_version == '1.0': return RE_ALLOWED_XML_1_0_CHARS.sub( '', unicode(text, 'utf-8')).encode('utf-8') else: return RE_ALLOWED_XML_1_1_CHARS.sub( '', unicode(text, 'utf-8')).encode('utf-8')
Remove any character which isn't a allowed characters for XML. The allowed characters depends on the version of XML. - XML 1.0: <http://www.w3.org/TR/REC-xml/#charsets> - XML 1.1: <http://www.w3.org/TR/xml11/#charsets> :param text: input string to wash. :param xml_version: version of the XML for which we wash the input. Value for this parameter can be '1.0' or '1.1'
def wash_for_utf8(text, correct=True): if isinstance(text, unicode): return text.encode('utf-8') errors = "ignore" if correct else "strict" return text.decode("utf-8", errors).encode("utf-8", errors)
Return UTF-8 encoded binary string with incorrect characters washed away. :param text: input string to wash (can be either a binary or Unicode string) :param correct: whether to correct bad characters or throw exception
def nice_number(number, thousands_separator=',', max_ndigits_after_dot=None): if isinstance(number, float): if max_ndigits_after_dot is not None: number = round(number, max_ndigits_after_dot) int_part, frac_part = str(number).split('.') return '%s.%s' % (nice_number(int(int_part), thousands_separator), frac_part) else: chars_in = list(str(number)) number = len(chars_in) chars_out = [] for i in range(0, number): if i % 3 == 0 and i != 0: chars_out.append(thousands_separator) chars_out.append(chars_in[number - i - 1]) chars_out.reverse() return ''.join(chars_out)
Return nicely printed number NUMBER in language LN. Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. If max_ndigits_after_dot is specified and the number is float, the number is rounded by taking in consideration up to max_ndigits_after_dot digit after the dot. This version does not pay attention to locale. See tmpl_nice_number_via_locale().
def nice_size(size): unit = 'B' if size > 1024: size /= 1024.0 unit = 'KB' if size > 1024: size /= 1024.0 unit = 'MB' if size > 1024: size /= 1024.0 unit = 'GB' return '%s %s' % (nice_number(size, max_ndigits_after_dot=2), unit)
Nice size. :param size: the size. :type size: int :return: a nicely printed size. :rtype: string
def remove_line_breaks(text): return unicode(text, 'utf-8').replace('\f', '').replace('\n', '') \ .replace('\r', '').replace(u'\xe2\x80\xa8', '') \ .replace(u'\xe2\x80\xa9', '').replace(u'\xc2\x85', '') \ .encode('utf-8')
Remove line breaks from input. Including unicode 'line separator', 'paragraph separator', and 'next line' characters.
def decode_to_unicode(text, default_encoding='utf-8'): if not text: return "" try: return text.decode(default_encoding) except (UnicodeError, LookupError): pass detected_encoding = None if CHARDET_AVAILABLE: # We can use chardet to perform detection res = chardet.detect(text) if res['confidence'] >= 0.8: detected_encoding = res['encoding'] if detected_encoding is None: # No chardet detection, try to make a basic guess dummy, detected_encoding = guess_minimum_encoding(text) return text.decode(detected_encoding)
Decode input text into Unicode representation. Decode input text into Unicode representation by first using the default encoding utf-8. If the operation fails, it detects the type of encoding used in the given text. For optimal result, it is recommended that the 'chardet' module is installed. NOTE: Beware that this might be slow for *very* large strings. If chardet detection fails, it will try to decode the string using the basic detection function guess_minimum_encoding(). Also, bear in mind that it is impossible to detect the correct encoding at all times, other then taking educated guesses. With that said, this function will always return some decoded Unicode string, however the data returned may not be the same as original data in some cases. :param text: the text to decode :type text: string :param default_encoding: the character encoding to use. Optional. :type default_encoding: string :return: input text as Unicode :rtype: string
def to_unicode(text): if isinstance(text, unicode): return text if isinstance(text, six.string_types): return decode_to_unicode(text) return unicode(text)
Convert to unicode.
def translate_latex2unicode(text, kb_file=None): if kb_file is None: kb_file = get_kb_filename() # First decode input text to Unicode try: text = decode_to_unicode(text) except UnicodeDecodeError: text = unicode(wash_for_utf8(text)) # Load translation table, if required if CFG_LATEX_UNICODE_TRANSLATION_CONST == {}: _load_latex2unicode_constants(kb_file) # Find all matches and replace text for match in CFG_LATEX_UNICODE_TRANSLATION_CONST['regexp_obj'] \ .finditer(text): # If LaTeX style markers {, } and $ are before or after the # matching text, it will replace those as well text = re.sub("[\{\$]?%s[\}\$]?" % (re.escape(match.group()),), CFG_LATEX_UNICODE_TRANSLATION_CONST[ 'table'][match.group()], text) # Return Unicode representation of translated text return text
Translate latex text to unicode. This function will take given text, presumably containing LaTeX symbols, and attempts to translate it to Unicode using the given or default KB translation table located under CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb. The translated Unicode string will then be returned. If the translation table and compiled regular expression object is not previously generated in the current session, they will be. :param text: a text presumably containing LaTeX symbols. :type text: string :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: Unicode representation of translated text :rtype: unicode
def _load_latex2unicode_constants(kb_file=None): if kb_file is None: kb_file = get_kb_filename() try: data = open(kb_file) except IOError: # File not found or similar sys.stderr.write( "\nCould not open LaTeX to Unicode KB file. " "Aborting translation.\n") return CFG_LATEX_UNICODE_TRANSLATION_CONST latex_symbols = [] translation_table = {} for line in data: # The file has form of latex|--|utf-8. First decode to Unicode. line = line.decode('utf-8') mapping = line.split('|--|') translation_table[mapping[0].rstrip('\n')] = mapping[1].rstrip('\n') latex_symbols.append(re.escape(mapping[0].rstrip('\n'))) data.close() CFG_LATEX_UNICODE_TRANSLATION_CONST[ 'regexp_obj'] = re.compile("|".join(latex_symbols)) CFG_LATEX_UNICODE_TRANSLATION_CONST['table'] = translation_table
Load LaTeX2Unicode translation table dictionary. Load LaTeX2Unicode translation table dictionary and regular expression object from KB to a global dictionary. :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: dict of type: {'regexp_obj': regexp match object, 'table': dict of LaTeX -> Unicode mappings} :rtype: dict
def translate_to_ascii(values): r if not values and not isinstance(values, str): return values if isinstance(values, str): values = [values] for index, value in enumerate(values): if not value: continue unicode_text = decode_to_unicode(value) if u"[?]" in unicode_text: decoded_text = [] for unicode_char in unicode_text: decoded_char = unidecode(unicode_char) # Skip unrecognized characters if decoded_char != "[?]": decoded_text.append(decoded_char) ascii_text = ''.join(decoded_text).encode('ascii') else: ascii_text = unidecode(unicode_text).replace( u"[?]", u"").encode('ascii') values[index] = ascii_text return values
r"""Transliterate the string into ascii representation. Transliterate the string contents of the given sequence into ascii representation. Returns a sequence with the modified values if the module 'unidecode' is available. Otherwise it will fall back to the inferior strip_accents function. For example: H\xc3\xb6hne becomes Hohne. Note: Passed strings are returned as a list. :param values: sequence of strings to transform :type values: sequence :return: sequence with values transformed to ascii :rtype: sequence
def xml_entities_to_utf8(text, skip=('lt', 'gt', 'amp')): def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)).encode("utf-8") else: return unichr(int(text[2:-1])).encode("utf-8") except ValueError: pass else: # named entity if text[1:-1] not in skip: try: text = unichr( html_entities.name2codepoint[text[1:-1]]) \ .encode("utf-8") except KeyError: pass return text # leave as is return re.sub("&#?\w+;", fixup, text)
Translate HTML or XML character references to UTF-8. Removes HTML or XML character references and entities from a text string and replaces them with their UTF-8 representation, if possible. :param text: The HTML (or XML) source text. :type text: string :param skip: list of entity names to skip when transforming. :type skip: iterable :return: The plain text, as a Unicode string, if necessary. @author: Based on http://effbot.org/zone/re-sub.htm#unescape-html
def slugify(text, delim=u'-'): result = [] for word in _punct_re.split(text.lower()): result.extend(unidecode(word).split()) return unicode(delim.join(result))
Generate an ASCII-only slug.
def show_diff(original, modified, prefix='', suffix='', prefix_unchanged=' ', suffix_unchanged='', prefix_removed='-', suffix_removed='', prefix_added='+', suffix_added=''): import difflib differ = difflib.Differ() result = [prefix] for line in differ.compare(modified.splitlines(), original.splitlines()): if line[0] == ' ': # Mark as unchanged result.append( prefix_unchanged + line[2:].strip() + suffix_unchanged) elif line[0] == '-': # Mark as removed result.append(prefix_removed + line[2:].strip() + suffix_removed) elif line[0] == '+': # Mark as added/modified result.append(prefix_added + line[2:].strip() + suffix_added) result.append(suffix) return '\n'.join(result)
Return the diff view between original and modified strings. Function checks both arguments line by line and returns a string with a: - prefix_unchanged when line is common to both sequences - prefix_removed when line is unique to sequence 1 - prefix_added when line is unique to sequence 2 and a corresponding suffix in each line :param original: base string :param modified: changed string :param prefix: prefix of the output string :param suffix: suffix of the output string :param prefix_unchanged: prefix of the unchanged line :param suffix_unchanged: suffix of the unchanged line :param prefix_removed: prefix of the removed line :param suffix_removed: suffix of the removed line :param prefix_added: prefix of the added line :param suffix_added: suffix of the added line :return: string with the comparison of the records :rtype: string
def escape_latex(text): r text = unicode(text.decode('utf-8')) CHARS = { '&': r'\&', '%': r'\%', '$': r'\$', '#': r'\#', '_': r'\_', '{': r'\{', '}': r'\}', '~': r'\~{}', '^': r'\^{}', '\\': r'\textbackslash{}', } escaped = "".join([CHARS.get(char, char) for char in text]) return escaped.encode('utf-8')
r"""Escape characters of given text. This function takes the given text and escapes characters that have a special meaning in LaTeX: # $ % ^ & _ { } ~ \
def _copy_attr(self, module, varname, cls, attrname=None): if not hasattr(module, varname): raise RuntimeError("Variable '{}' not found".format(varname)) obj = getattr(module, varname) if not isinstance(obj, cls): raise RuntimeError( "Expecting fobj to be a {}, not a '{}'".format(cls.__name__, obj.__class__.__name__)) if attrname is None: attrname = varname setattr(self, attrname, obj)
Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname
def __check_to_permit(self, entry_type, entry_filename): rules = self.__filter_rules[entry_type] # Should explicitly include? for pattern in rules[fss.constants.FILTER_INCLUDE]: if fnmatch.fnmatch(entry_filename, pattern): _LOGGER_FILTER.debug("Entry explicitly INCLUDED: [%s] [%s] " "[%s]", entry_type, pattern, entry_filename) return True # Should explicitly exclude? for pattern in rules[fss.constants.FILTER_EXCLUDE]: if fnmatch.fnmatch(entry_filename, pattern): _LOGGER_FILTER.debug("Entry explicitly EXCLUDED: [%s] [%s] " "[%s]", entry_type, pattern, entry_filename) return False # Implicitly include. _LOGGER_FILTER.debug("Entry IMPLICITLY included: [%s] [%s]", entry_type, entry_filename) return True
Applying the filter rules.
def get_next_item(self): # Try to pop something off the local input-queue. try: return self.__local_input_q.get(block=False) except queue.Empty: pass # Try to pop something off the external input-queue. return self.input_q.get(block=False)
Override the default functionality to not only try to pull things off the external input-queue, but to first try to pull things from a local input-queue that we'll primarily depend on. We'll only use the external input-queue to get the initial root-path (we could reuse it to do the recursion, but it's more costly and prone to delay).
def index_nearest(array, value): idx = (np.abs(array-value)).argmin() return idx
Finds index of nearest value in array. Args: array: numpy array value: Returns: int http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
def BSearch(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos != hi and a[pos] == x else -1
Returns index of x in a, or -1 if x not in a. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose.
def BSearchRound(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) if pos >= hi: return hi - 1 elif a[pos] == x or pos == lo: return pos else: return pos - 1 if x - a[pos - 1] <= a[pos] - x else pos
Returns index of a that is closest to x. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose.
def BSearchCeil(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos < hi else -1
Returns lowest i such as a[i] >= x, or -1 if x > all elements in a So, if x is in between two elements in a, this function will return the index of the higher element, hence "Ceil". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search
def BSearchFloor(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos - 1 if pos >= hi \ else (pos if x == a[pos] else (pos - 1 if pos > lo else -1))
Returns highest i such as a[i] <= x, or -1 if x < all elements in a So, if x is in between two elements in a, this function will return the index of the lower element, hence "Floor". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search
def FindNotNaNBackwards(x, i): while i >= 0: if not np.isnan(x[i]): return i i -= 1 return -1
Returns last position (starting at i backwards) which is not NaN, or -1.
def census(self, *scales): params = {'mode': 'score+rank+rrank+prank+prrank'} if scales: params['scale'] = '+'.join(str(x) for x in scales) @api_query('census', **params) async def result(_, root): return [ CensusScaleCurrent(scale_elem) for scale_elem in root.find('CENSUS') ] return result(self)
Current World Census data. By default returns data on today's featured World Census scale, use arguments to get results on specific scales. In order to request data on all scales at once you can do ``x.census(*range(81))``. Parameters ---------- scales : int World Census scales, integers between 0 and 85 inclusive. Returns ------- an :class:`ApiQuery` of a list of :class:`CensusScaleCurrent`
def censushistory(self, *scales): params = {'mode': 'history'} if scales: params['scale'] = '+'.join(str(x) for x in scales) @api_query('census', **params) async def result(_, root): return [ CensusScaleHistory(scale_elem) for scale_elem in root.find('CENSUS') ] return result(self)
Historical World Census data. Was split into its own method for the sake of simplicity. By default returns data on today's featured World Census scale, use arguments to get results on specific scales. In order to request data on all scales at once you can do ``x.censushistory(*range(81))``. Returns data for the entire length of history NationStates stores. There is no way to override that. Parameters ---------- scales : int World Census scales, integers between 0 and 85 inclusive. Returns ------- an :class:`ApiQuery` of a list of :class:`CensusScaleHistory`
async def censusranks(self, scale): order = count(1) for offset in count(1, 20): census_ranks = await self._get_censusranks( scale=scale, start=offset) for census_rank in census_ranks: assert census_rank.rank == next(order) yield census_rank if len(census_ranks) < 20: break
Iterate through nations ranked on the World Census scale. If the ranks change while you interate over them, they may be inconsistent. Parameters ---------- scale : int A World Census scale, an integer between 0 and 85 inclusive. Returns ------- asynchronous iterator of :class:`CensusRank`
def loads(astring): try: return marshal.loads(zlib.decompress(astring)) except zlib.error as e: raise SerializerError( 'Cannot decompress object ("{}")'.format(str(e)) ) except Exception as e: # marshal module does not provide a proper Exception model raise SerializerError( 'Cannot restore object ("{}")'.format(str(e)) )
Decompress and deserialize string into Python object via marshal.
def loads(astring): try: return pickle.loads(zlib.decompress(astring)) except zlib.error as e: raise SerializerError( 'Cannot decompress object ("{}")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object ("{}")'.format(str(e)) )
Decompress and deserialize string into Python object via pickle.
def loads(astring): try: return pickle.loads(lzma.decompress(astring)) except lzma.LZMAError as e: raise SerializerError( 'Cannot decompress object ("{}")'.format(str(e)) ) except pickle.UnpicklingError as e: raise SerializerError( 'Cannot restore object ("{}")'.format(str(e)) )
Decompress and deserialize string into a Python object via pickle.
def search(self, path_expression, mode=UXP, values=None, ifunc=lambda x: x): # keys = path_expression if isinstance(path_expression, six.string_types) else path_expression[-1] path_and_value_list = iterutils.search( self.data, path_expression=path_expression, required_values=values, exact=(mode[1] == "x")) return self.__return_value(path_and_value_list, mode, ifunc)
find matches for the given path expression in the data :param path_expression: path tuple or string :return:
def __visit_index_path(self, src, p, k, v): cp = p + (k,) self.path_index[cp] = self.indexed_obj_factory(p, k, v, self.path_index.get(cp)) if cp in self.path_index: # if self.path_index[cp].assert_val_equals(v): # raise ValueError('unexpected value change at path_index[{}]'.format(cp)) self.path_index[cp].add_src(src) else: self.path_index[cp] = Flobject(val=v, path=cp, srcs=set([src]))
Called during processing of source data
def get_default_data_path(*args, module=None, class_=None, flag_raise=True): if module is None: module = __get_filetypes_module() if class_ is not None: pkgname = class_.__module__ mseq = pkgname.split(".") if len(mseq) < 2 or mseq[1] != "filetypes": raise ValueError("Invalid module name for class '{}': '{}' " "(must be '(...).filetypes[.(...)]')".format(class_.__name__, pkgname)) # gets "root" module object # For example, if pkgname is "pyfant.filetypes.filemain", module below will be # the "pyfant" module object module = sys.modules[mseq[0]] module_path = os.path.split(module.__file__)[0] p = os.path.abspath(os.path.join(module_path, "data", "default", *args)) if flag_raise: if not os.path.isfile(p): raise RuntimeError("Path not found '{}'".format(p)) return p
Returns path to default data directory Arguments 'module' and 'class' give the chance to return path relative to package other than f311.filetypes Args: module: Python module object. It is expected that this module has a sub-subdirectory named 'data/default' class_: Python class object to extract path information from. If this argument is used, it will be expected that the class "root" package will have a sub-subdirectory named 'data/default'. Argument 'class_' **has precedence over argument 'module'** flag_raise: raises error if file is not found. This can be turned off for whichever purpose
def copy_default_data_file(filename, module=None): if module is None: module = __get_filetypes_module() fullpath = get_default_data_path(filename, module=module) shutil.copy(fullpath, ".")
Copies file from default data directory to local directory.
def _find_display(self): self.display_num = 2 while os.path.isdir(XVFB_PATH % (self.display_num,)): self.display_num += 1
Find a usable display, which doesn't have an existing Xvfb file
def comments(recid): from invenio_access.local_config import VIEWRESTRCOLL from invenio_access.mailcookie import \ mail_cookie_create_authorize_action from .api import check_user_can_view_comments auth_code, auth_msg = check_user_can_view_comments(current_user, recid) if auth_code and current_user.is_guest: cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, { 'collection': g.collection}) url_args = {'action': cookie, 'ln': g.ln, 'referer': request.referrer} flash(_("Authorization failure"), 'error') return redirect(url_for('webaccount.login', **url_args)) elif auth_code: flash(auth_msg, 'error') abort(401) # FIXME check restricted discussion comments = CmtRECORDCOMMENT.query.filter(db.and_( CmtRECORDCOMMENT.id_bibrec == recid, CmtRECORDCOMMENT.in_reply_to_id_cmtRECORDCOMMENT == 0, CmtRECORDCOMMENT.star_score == 0 )).order_by(CmtRECORDCOMMENT.date_creation).all() return render_template('comments/comments.html', comments=comments, option='comments')
Display comments.
def getattr(self, key, default=None, callback=None): u value = self._xml.text if key == 'text' else self._xml.get(key, default) return callback(value) if callback else value
u"""Getting the attribute of an element. >>> xml = etree.Element('root') >>> xml.text = 'text' >>> Node(xml).getattr('text') 'text' >>> Node(xml).getattr('text', callback=str.upper) 'TEXT' >>> Node(xml).getattr('wrong_attr', default='default') 'default'
def setattr(self, key, value): u if key == 'text': self._xml.text = str(value) else: self._xml.set(key, str(value))
u"""Sets an attribute on a node. >>> xml = etree.Element('root') >>> Node(xml).setattr('text', 'text2') >>> Node(xml).getattr('text') 'text2' >>> Node(xml).setattr('attr', 'val') >>> Node(xml).getattr('attr') 'val'
def get(self, default=None, callback=None): u value = self._xml.text if self._xml.text else default return callback(value) if callback else value
u"""Returns leaf's value.
def to_str(self, pretty_print=False, encoding=None, **kw): u if kw.get('without_comments') and not kw.get('method'): kw.pop('without_comments') kw['method'] = 'c14n' kw['with_comments'] = False return etree.tostring( self._xml, pretty_print=pretty_print, encoding=encoding, **kw )
u"""Converts a node with all of it's children to a string. Remaining arguments are passed to etree.tostring as is. kwarg without_comments: bool because it works only in C14N flags: 'pretty print' and 'encoding' are ignored. :param bool pretty_print: whether to format the output :param str encoding: which encoding to use (ASCII by default) :rtype: str :returns: node's representation as a string
def iter_children(self, key=None): u tag = None if key: tag = self._get_aliases().get(key) if not tag: raise KeyError(key) for child in self._xml.iterchildren(tag=tag): if len(child): yield self.__class__(child) else: yield Literal(child)
u"""Iterates over children. :param key: A key for filtering children by tagname.
def update(self, **kwargs): u for key, value in kwargs.items(): helper = helpers.CAST_DICT.get(type(value), str) tag = self._get_aliases().get(key, key) elements = list(self._xml.iterchildren(tag=tag)) if elements: for element in elements: element.text = helper(value) else: element = etree.Element(key) element.text = helper(value) self._xml.append(element) self._aliases = None
u"""Updating or creation of new simple nodes. Each dict key is used as a tagname and value as text.
def sget(self, path, default=NONE_NODE): u attrs = str(path).split(".") text_or_attr = None last_attr = attrs[-1] # Case of getting text or attribute if last_attr == '#text' or last_attr.startswith('@'): # #text => text, @attr => attr text_or_attr = last_attr[1:] attrs = attrs[:-1] # When getting #text and @attr we want default value to be None. if default is NONE_NODE: default = None my_object = self for attr in attrs: try: if isinstance(my_object, (list, tuple)) and re.match('^\-?\d+$', attr): my_object_next = my_object[int(attr)] else: my_object_next = getattr(my_object, attr) my_object = my_object_next except (AttributeError, KeyError, IndexError): return default # Return #text or @attr if text_or_attr: try: return my_object.getattr(text_or_attr) except AttributeError: # myObject can be a list. return None else: return my_object
u"""Enables access to nodes if one or more of them don't exist. Example: >>> m = Mappet('<root><tag attr1="attr text">text value</tag></root>') >>> m.sget('tag') text value >>> m.sget('tag.@attr1') 'attr text' >>> m.sget('tag.#text') 'text value' >>> m.sget('reply.vms_model_cars.car.0.params.doors') NONE_NODE Accessing nonexistent path returns None-like object with mocked converting functions which returns None: >>> m.sget('reply.fake_node').to_dict() is None True
def create(self, tag, value): u child_tags = {child.tag for child in self._xml} if tag in child_tags: raise KeyError('Node {} already exists in XML tree.'.format(tag)) self.set(tag, value)
u"""Creates a node, if it doesn't exist yet. Unlike attribute access, this allows to pass a node's name with hyphens. Those hyphens will be normalized automatically. In case the required element already exists, raises an exception. Updating/overwriting should be done using `update``.
def set(self, name, value): u try: # Searches for a node to assign to. element = next(self._xml.iterchildren(tag=name)) except StopIteration: # There is no such node in the XML tree. We create a new one # with current root as parent (self._xml). element = etree.SubElement(self._xml, name) if isinstance(value, dict): self.assign_dict(element, value) elif isinstance(value, (list, tuple, set)): self.assign_sequence_or_set(element, value) else: # Literal value. self.assign_literal(element, value) # Clear the aliases. self._aliases = None
u"""Assigns a new XML structure to the node. A literal value, dict or list can be passed in. Works for all nested levels. Dictionary: >>> m = Mappet('<root/>') >>> m.head = {'a': 'A', 'b': {'#text': 'B', '@attr': 'val'}} >>> m.head.to_str() '<head><a>A</a><b attr="val">B</b></head>' List: >>> m.head = [{'a': i} for i in 'ABC'] >>> m.head.to_str() '<head><a>A</a><a>B</a><a>C</a></head>' Literals: >>> m.head.leaf = 'A' >>> m.head.leaf.get() 'A'
def assign_dict(self, node, xml_dict): new_node = etree.Element(node.tag) # Replaces the previous node with the new one self._xml.replace(node, new_node) # Copies #text and @attrs from the xml_dict helpers.dict_to_etree(xml_dict, new_node)
Assigns a Python dict to a ``lxml`` node. :param node: A node to assign the dict to. :param xml_dict: The dict with attributes/children to use.
def assign_literal(element, value): u # Searches for a conversion method specific to the type of value. helper = helpers.CAST_DICT.get(type(value), str) # Removes all children and attributes. element.clear() element.text = helper(value)
u"""Assigns a literal. If a given node doesn't exist, it will be created. :param etree.Element element: element to which we assign. :param value: the value to assign
def to_dict(self, **kw): u _, value = helpers.etree_to_dict(self._xml, **kw).popitem() return value
u"""Converts the lxml object to a dict. possible kwargs: without_comments: bool
def _get_aliases(self): u if self._aliases is None: self._aliases = {} if self._xml is not None: for child in self._xml.iterchildren(): self._aliases[helpers.normalize_tag(child.tag)] = child.tag return self._aliases
u"""Creates a dict with aliases. The key is a normalized tagname, value the original tagname.
def xpath( self, path, namespaces=None, regexp=False, smart_strings=True, single_use=False, ): u if ( namespaces in ['exslt', 're'] or (regexp and not namespaces) ): namespaces = {'re': "http://exslt.org/regular-expressions"} if single_use: node = self._xml.xpath(path) else: xpe = self.xpath_evaluator( namespaces=namespaces, regexp=regexp, smart_strings=smart_strings ) node = xpe(path) if len(node) == 1: node = node[0] if len(node): return self.__class__(node) else: return Literal(node) return node
u"""Executes XPath query on the ``lxml`` object and returns a correct object. :param str path: XPath string e.g., 'cars'/'car' :param str/dict namespaces: e.g., 'exslt', 're' or ``{'re': "http://exslt.org/regular-expressions"}`` :param bool regexp: if ``True`` and no namespaces is provided, it will use ``exslt`` namespace :param bool smart_strings: :param bool single_use: faster method for using only once. Does not create ``XPathEvaluator`` instance. >>> root = mappet.Mappet("<root><a>aB</a><b>aBc</b></root>") >>> root.XPath( "//*[re:test(., '^abc$', 'i')]", namespaces='exslt', regexp=True, )
def xpath_evaluator(self, namespaces=None, regexp=False, smart_strings=True): u return etree.XPathEvaluator( self._xml, namespaces=namespaces, regexp=regexp, smart_strings=smart_strings )
u"""Creates an XPathEvaluator instance for an ElementTree or an Element. :returns: ``XPathEvaluator`` instance
def get_last_modified_date(*args, **kwargs): try: latest_note = Note.objects.latest() latest_release = Release.objects.latest() except ObjectDoesNotExist: return None return max(latest_note.modified, latest_release.modified)
Returns the date of the last modified Note or Release. For use with Django's last_modified decorator.
def using_ios_stash(): ''' returns true if sys path hints the install is running on ios ''' print('detected install path:') print(os.path.dirname(__file__)) module_names = set(sys.modules.keys()) return 'stash' in module_names or 'stash.system' in module_namef using_ios_stash(): ''' returns true if sys path hints the install is running on ios ''' print('detected install path:') print(os.path.dirname(__file__)) module_names = set(sys.modules.keys()) return 'stash' in module_names or 'stash.system' in module_names
returns true if sys path hints the install is running on ios
def pad_bin_image_to_shape(image, shape): h, w = shape ih, iw = image.shape assert ih <= h assert iw <= w if iw < w: result = numpy.hstack((image, numpy.zeros((ih, w - iw), bool))) else: result = image if ih < h: result = numpy.vstack((result, numpy.zeros((h - ih, w), bool))) return result
Padd image to size :shape: with zeros
def best_convolution(bin_template, bin_image, tollerance=0.5, overlap_table=OVERLAP_TABLE): template_sum = numpy.count_nonzero(bin_template) th, tw = bin_template.shape ih, iw = bin_image.shape if template_sum == 0 or th == 0 or tw == 0: # If we don't have a template return [] if th > ih or tw > iw: # If the template is bigger than the image return [] # How many cells can we split the image into? max_vert_cells = ih // th max_hor_cells = iw // th # Try to work out how many times we can stack the image usable_factors = {n: factors for n, factors in overlap_table.iteritems() if ((template_sum + 1) ** (n)) < ACCURACY_LIMIT} overlap_options = [(factor, n // factor) for n, factors in usable_factors.iteritems() for factor in factors if (factor <= max_vert_cells and n // factor <= max_hor_cells)] if not overlap_options: # We can't stack the image return convolution(bin_template, bin_image, tollerance=tollerance) best_overlap = min(overlap_options, key=lambda x: ((ih // x[0] + th) * (iw // x[1] + tw))) return overlapped_convolution(bin_template, bin_image, tollerance=tollerance, splits=best_overlap)
Selects and applies the best convolution method to find template in image. Returns a list of matches in (width, height, x offset, y offset) format (where the x and y offsets are from the top left corner). As the images are binary images, we can utilise the extra bit space in the float64's by cutting the image into tiles and stacking them into variable grayscale values. This allows converting a sparse binary image into a dense(r) grayscale one.
def get_partition_scores(image, min_w=1, min_h=1): h, w = image.shape[:2] if w == 0 or h == 0: return [] area = h * w cnz = numpy.count_nonzero total = cnz(image) if total == 0 or area == total: return [] if h < min_h * 2: y_c = [] else: y_c = [(-abs((count / ((h - y) * w)) - ((total - count) / (y * w))), y, 0) for count, y in ((cnz(image[y:]), y) for y in range(min_h, image.shape[0] - min_h))] if w < min_w * 2: x_c = [] else: x_c = [(-abs((count / (h * (w - x))) - ((total - count) / (h * x))), x, 1) for count, x in ((cnz(image[:, x:]), x) for x in range(min_w, image.shape[1] - min_w))] return sorted(x_c + y_c)
Return list of best to worst binary splits along the x and y axis.
def binary_partition_image(image, min_w=1, min_h=1, depth=0, max_depth=-1): if max_depth >= 0 and depth >= max_depth: return None partition = get_best_partition(image, min_w=min_w, min_h=min_h) if partition is None: return None pos, axis = partition if axis == 0: p1 = binary_partition_image( image[pos:], min_w, min_h, depth + 1, max_depth) p2 = binary_partition_image( image[:pos], min_w, min_h, depth + 1, max_depth) elif axis == 1: p1 = binary_partition_image( image[:, pos:], min_w, min_h, depth + 1, max_depth) p2 = binary_partition_image( image[:, :pos], min_w, min_h, depth + 1, max_depth) return [pos, axis, [p1, p2]]
Return a bsp of [pos, axis, [before_node, after_node]] nodes where leaf nodes == None. If max_depth < 0 this function will continue until all leaf nodes have been found, if it is >= 0 leaf nodes will be created at that depth. min_w and min_h are the minimum width or height of a partition.
def find_threshold_near_density(img, density, low=0, high=255): size = numpy.size(img) densities = [] last_t = None while True: t = ((high - low) // 2) + low if t == last_t: densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1])) return densities[0][1] else: last_t = t d = numpy.count_nonzero(img > t) / size densities.append((d, t)) if d < density: high = t elif d >= density: # search away from low low = t
Find a threshold where the fraction of pixels above the threshold is closest to density where density is (count of pixels above threshold / count of pixels). The highest threshold closest to the desired density will be returned. Use low and high to exclude undesirable thresholds. :param img: target image :type img: 2d :class:`numpy.ndarray` :param density: target density :type density: float between 0.0 and 1.0 :param low: min threshold to test :type low: ubyte :param migh: max threshold to test :type low: ubyte :rtype: ubyte
def filter_greys_using_image(image, target): maskbase = numpy.array(range(256), dtype=numpy.uint8) mask = numpy.where(numpy.in1d(maskbase, numpy.unique(image)), maskbase, 0) return mask[target]
Filter out any values in target not in image :param image: image containing values to appear in filtered image :param target: the image to filter :rtype: 2d :class:`numpy.ndarray` containing only value in image and with the same dimensions as target
def get_swagger_view(title=None, url=None, generator_class=SchemaGenerator): return schemas.get_schema_view( title=title, url=url, renderer_classes=[ CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer], generator_class=generator_class)
Returns schema view which renders Swagger/OpenAPI.
def __init_defaults(self, config): provider = self.__provider if provider == 'sqlite': config.setdefault('dbname', ':memory:') config.setdefault('create_db', True) elif provider == 'mysql': config.setdefault('port', 3306) config.setdefault('charset', 'utf8') elif provider == 'postgres': config.setdefault('port', 5432) elif provider == 'oracle': config.setdefault('port', 1521) else: raise ValueError('Unsupported provider "{}"'.format(provider)) if provider != 'sqlite': config.setdefault('host', 'localhost') config.setdefault('user', None) config.setdefault('password', None) config.setdefault('dbname', None)
Initializes the default connection settings.
def persist_one(self, file_base64_content, filename, extension, mime, is_private=True): return self.__app.api_call("MediaService", "persist_one", locals(), {})
Загружает файл в облако :type origin: string Принимает значения ROBOT, USER
def upload(self, file_descriptor, settings): multipart_form_data = { 'file': file_descriptor } params = {"settings": json.dumps(settings)} dr = self.__app.native_api_call('media', 'upload', params, self.__options, True, multipart_form_data, False, http_path="/api/meta/v1/", http_method='POST', connect_timeout_sec=60 * 10) return json.loads(dr.text)
Загружает файл в облако :param file_descriptor: открытый дескриптор :param settings: настройки загрузки :rtype: requests.Response
def download(self, media_id, as_stream=False): response = self.__app.native_api_call('media', 'd/' + media_id, {}, self.__options, False, None, as_stream, http_path="/api/meta/v1/", http_method='GET') return response
Скачивает указанный файл :param media_id: string :rtype: requests.Response
def info(self, media_id): dr = self.__app.native_api_call('media', 'i/' + media_id, {}, self.__options, False, None, False, http_path="/api/meta/v1/", http_method='GET') return json.loads(dr.text)
Получить информацию по файлу :param media_id: :rtype: requests.Response
def create_order(cls, order, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_order_with_http_info(order, **kwargs) else: (data) = cls._create_order_with_http_info(order, **kwargs) return data
Create Order Create a new Order This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_order(order, async=True) >>> result = thread.get() :param async bool :param Order order: Attributes of order to create (required) :return: Order If the method is called asynchronously, returns the request thread.
def delete_order_by_id(cls, order_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_order_by_id_with_http_info(order_id, **kwargs) else: (data) = cls._delete_order_by_id_with_http_info(order_id, **kwargs) return data
Delete Order Delete an instance of Order by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_order_by_id(order_id, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_order_by_id(cls, order_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_order_by_id_with_http_info(order_id, **kwargs) else: (data) = cls._get_order_by_id_with_http_info(order_id, **kwargs) return data
Find Order Return single instance of Order by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_order_by_id(order_id, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to return (required) :return: Order If the method is called asynchronously, returns the request thread.
def list_all_orders(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_orders_with_http_info(**kwargs) else: (data) = cls._list_all_orders_with_http_info(**kwargs) return data
List Orders Return a list of Orders This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_orders(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Order] If the method is called asynchronously, returns the request thread.
def replace_order_by_id(cls, order_id, order, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_order_by_id_with_http_info(order_id, order, **kwargs) else: (data) = cls._replace_order_by_id_with_http_info(order_id, order, **kwargs) return data
Replace Order Replace all attributes of Order This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_order_by_id(order_id, order, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to replace (required) :param Order order: Attributes of order to replace (required) :return: Order If the method is called asynchronously, returns the request thread.
def update_order_by_id(cls, order_id, order, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_order_by_id_with_http_info(order_id, order, **kwargs) else: (data) = cls._update_order_by_id_with_http_info(order_id, order, **kwargs) return data
Update Order Update attributes of Order This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_order_by_id(order_id, order, async=True) >>> result = thread.get() :param async bool :param str order_id: ID of order to update. (required) :param Order order: Attributes of order to update. (required) :return: Order If the method is called asynchronously, returns the request thread.
async def newnations(self, root): return [aionationstates.Nation(n) for n in root.find('NEWNATIONS').text.split(',')]
Most recently founded nations, from newest. Returns ------- an :class:`ApiQuery` of a list of :class:`Nation`
async def regions(self, root): return [aionationstates.Region(r) for r in root.find('REGIONS').text.split(',')]
List of all the regions, seemingly in order of creation. Returns ------- an :class:`ApiQuery` of a list of :class:`Region`
def regionsbytag(self, *tags): if len(tags) > 10: raise ValueError('You can specify up to 10 tags') if not tags: raise ValueError('No tags specified') # We don't check for invalid tags here because the behaviour is # fairly intuitive - quering for a non-existent tag returns no # regions, excluding it returns all of them. @api_query('regionsbytag', tags=','.join(tags)) async def result(_, root): text = root.find('REGIONS').text return ([aionationstates.Region(r) for r in text.split(',')] if text else []) return result(self)
All regions with any of the named tags. Parameters ---------- *tags : str Regional tags. Can be preceded by a ``-`` to select regions without that tag. Returns ------- an :class:`ApiQuery` of a list of :class:`Region`
def dispatch(self, id): @api_query('dispatch', dispatchid=str(id)) async def result(_, root): elem = root.find('DISPATCH') if not elem: raise NotFound(f'No dispatch found with id {id}') return Dispatch(elem) return result(self)
Dispatch by id. Parameters ---------- id : int Dispatch id. Returns ------- an :class:`ApiQuery` of :class:`Dispatch` Raises ------ :class:`NotFound` If a dispatch with the requested id doesn't exist.
def dispatchlist(self, *, author=None, category=None, subcategory=None, sort='new'): params = {'sort': sort} if author: params['dispatchauthor'] = author # Here we do need to ensure that our categories are valid, cause # NS just ignores the categories it doesn't recognise and returns # whatever it feels like. if category and subcategory: if (category not in dispatch_categories or subcategory not in dispatch_categories[category]): raise ValueError('Invalid category/subcategory') params['dispatchcategory'] = f'{category}:{subcategory}' elif category: if category not in dispatch_categories: raise ValueError('Invalid category') params['dispatchcategory'] = category else: raise ValueError('Cannot request subcategory without category') @api_query('dispatchlist', **params) async def result(_, root): return [ DispatchThumbnail._from_elem(elem) for elem in root.find('DISPATCHLIST') ] return result(self)
Find dispatches by certain criteria. Parameters ---------- author : str Name of the nation authoring the dispatch. category : str Dispatch's primary category. subcategory : str Dispatch's secondary category. sort : str Sort order, 'new' or 'best'. Returns ------- an :class:`ApiQuery` of a list of :class:`DispatchThumbnail`
def poll(self, id): @api_query('poll', pollid=str(id)) async def result(_, root): elem = root.find('POLL') if not elem: raise NotFound(f'No poll found with id {id}') return Poll(elem) return result(self)
Poll with a given id. Parameters ---------- id : int Poll id. Returns ------- an :class:`ApiQuery` of :class:`Poll` Raises ------ :class:`NotFound` If a poll with the requested id doesn't exist.
def banner(self, *ids, _expand_macros=None): async def noop(s): return s _expand_macros = _expand_macros or noop @api_query('banner', banner=','.join(ids)) async def result(_, root): banners = [await Banner(elem, _expand_macros) for elem in root.find('BANNERS')] if not len(banners) == len(ids): raise NotFound('one of the banner ids provided is invalid') return banners return result(self)
Get data about banners by their ids. Macros in banners' names and descriptions are not expanded. Parameters ---------- *ids : str Banner ids. Returns ------- an :class:`ApiQuery` of a list of :class:`Banner` Raises ------ :class:`NotFound` If any of the provided ids is invalid.
async def send_telegram(self, *, client_key, telegram_id, telegram_key, recepient): params = { 'a': 'sendTG', 'client': client_key, 'tgid': str(telegram_id), 'key': telegram_key, 'to': recepient } return await self._call_api(params)
A basic interface to the Telegrams API. Parameters ---------- client_key : str Telegrams API Client Key. telegram_id : int or str Telegram id. telegram_key : str Telegram key. recepient : str Name of the nation you want to telegram. Returns ------- an awaitable
async def happenings(self, *, nations=None, regions=None, filters=None, beforeid=None, beforetime=None): while True: happening_bunch = await self._get_happenings( nations=nations, regions=regions, filters=filters, beforeid=beforeid, beforetime=beforetime ) for happening in happening_bunch: yield happening if len(happening_bunch) < 100: break beforeid = happening_bunch[-1].id
Iterate through happenings from newest to oldest. Parameters ---------- nations : iterable of str Nations happenings of which will be requested. Cannot be specified at the same time with ``regions``. regions : iterable of str Regions happenings of which will be requested. Cannot be specified at the same time with ``nations``. filters : iterable of str Categories to request happenings by. Available filters are: ``law``, ``change``, ``dispatch``, ``rmb``, ``embassy``, ``eject``, ``admin``, ``move``, ``founding``, ``cte``, ``vote``, ``resolution``, ``member``, and ``endo``. beforeid : int Only request happenings before this id. beforetime : :class:`datetime.datetime` Only request happenings that were emitted before this moment. Returns ------- an asynchronous iterator yielding any of the classes from \ the :mod:`~aionationstates.happenings` module
def find_potential_match_regions(template, transformed_array, method='correlation', raw_tolerance=0.666): if method == 'correlation': match_value = np.sum(template**2) # this will be the value of the match in the elif method == 'squared difference': match_value = 0 elif method == 'correlation coefficient': temp_minus_mean = template - np.mean(template) match_value = np.sum(temp_minus_mean**2) else: raise ValueError('Matching method not implemented') condition = ((np.round(transformed_array, decimals=3)>=match_value*raw_tolerance) & (np.round(transformed_array, decimals=3)<=match_value*(1./raw_tolerance))) return np.transpose(condition.nonzero())
To prevent prohibitively slow calculation of normalisation coefficient at each point in image find potential match points, and normalise these only these. This function uses the definitions of the matching functions to calculate the expected match value and finds positions in the transformed array matching these- normalisation will then eliminate false positives
def normalise_correlation(image_tile_dict, transformed_array, template, normed_tolerance=1): template_norm = np.linalg.norm(template) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)])*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape #points_from_transformed_array = [(match[0] + h - 1, match[1] + w - 1) for match in match_points] image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} result = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return result.keys()
Calculates the normalisation coefficients of potential match positions Then normalises the correlation at these positions, and returns them if they do indeed constitute a match
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1): template_mean = np.mean(template) template_minus_mean = template - template_mean template_norm = np.linalg.norm(template_minus_mean) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return normalised_matches.keys()
As above, but for when the correlation coefficient matching method is used
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1): template_norm_squared = np.sum(template**2) image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))} #print image_matches_normalised cutoff = h*w*255**2*sq_diff_tolerance normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff} return normalised_matches.keys()
As above, but for when the squared differences matching method is used
def __init_os_api(self): loader = loading.get_plugin_loader('password') auth = loader.load_from_options(auth_url=self._os_auth_url, username=self._os_username, password=self._os_password, project_name=self._os_tenant_name) sess = session.Session(auth=auth) self.nova_client = nova_client.Client(self.nova_api_version, session=sess) self.neutron_client = neutron_client.Client(session=sess) self.glance_client = glance_client.Client('2', session=sess) self.cinder_client = cinder_client.Client('2', session=sess)
Initialise client objects for talking to OpenStack API. This is in a separate function so to be called by ``__init__`` and ``__setstate__``.
def stop_instance(self, instance_id): instance = self._load_instance(instance_id) instance.delete() del self._instances[instance_id]
Stops the instance gracefully. :param str instance_id: instance identifier