code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_matching(self, source_id): value = self._accessor.get_by_id(source_id) if not value is None: reg = get_current_registry() prx_fac = reg.getUtility(IDataTraversalProxyFactory) prx = prx_fac.make_proxy(value, self._accessor, self.relationship_direction, self.relation_operation) else: prx = None return prx
Returns a matching target object for the given source ID.
def update_attribute_value_items(self): for attr in self._attribute_iterator(): if attr.kind != RESOURCE_ATTRIBUTE_KINDS.COLLECTION: try: attr_val = self._get_proxied_attribute_value(attr) except AttributeError: continue else: yield (attr, attr_val)
Returns an iterator of items for an attribute value map to use for an UPDATE operation. The iterator ignores collection attributes as these are processed implicitly by the traversal algorithm. :returns: iterator yielding tuples with objects implementing :class:`everest.resources.interfaces.IResourceAttribute` as the first and the proxied attribute value as the second argument.
def get_attribute_proxy(self, attribute): attr_val = self._get_relation_attribute_value(attribute) if attr_val is None: prx = None else: if not self._accessor is None: # FIXME: This implies that the accessor is an aggregate. acc = self._accessor.get_root_aggregate(attribute.attr_type) else: acc = None reg = get_current_registry() prx_fac = reg.getUtility(IDataTraversalProxyFactory) prx = prx_fac.make_proxy(attr_val, acc, self.relationship_direction, self.relation_operation, options= self._get_proxy_options(attribute)) return prx
Returns a traversal proxy (cardinality ONE) or an iterable sequence data traversal proxy (cardinality MANY) for the specified relation attribute value of the proxied data. :raises ValueError: If :param:`attribute` is a terminal attribute.
def get_entity(self): if self._accessor is None: if self.__converted_entity is None: self.__converted_entity = self._convert_to_entity() else: # If we have an accessor, we can get the proxied entity by ID. # FIXME: This is a hack that is only used for REMOVE operations # with data elements. self.__converted_entity = \ self.get_matching(self.get_id()).get_entity() return self.__converted_entity
Returns the entity converted from the proxied data.
def run(self, visitor): if __debug__: self.__log_run(visitor) visitor.prepare() if self.__root_is_sequence: if not self._tgt_prx is None: tgts = iter(self._tgt_prx) else: tgts = None if not self._src_prx is None: srcs = iter(self._src_prx) else: srcs = None self.traverse_many(None, srcs, tgts, visitor) else: self.traverse_one(None, self._src_prx, self._tgt_prx, visitor) visitor.finalize()
:param visitor: visitor to call with every node in the domain tree. :type visitor: subclass of :class:`everest.entities.traversal.DomainVisitor`
async def info(self, obj_id=None): '''Get info about object id |coro| Parameters ---------- obj_id : str, list if not provided, server info is retured(as a dict). Otherwise, an object with that id is returned (or objects if `obj_id` is a list). ''' if obj_id: try: return await self.process(obj_id) except JSONDecodeError: raise LookupError('Error object with that id does not exist', obj_id) else: return await self.connector.getJson('/system/info/public', remote=Falseync def info(self, obj_id=None): '''Get info about object id |coro| Parameters ---------- obj_id : str, list if not provided, server info is retured(as a dict). Otherwise, an object with that id is returned (or objects if `obj_id` is a list). ''' if obj_id: try: return await self.process(obj_id) except JSONDecodeError: raise LookupError('Error object with that id does not exist', obj_id) else: return await self.connector.getJson('/system/info/public', remote=False)
Get info about object id |coro| Parameters ---------- obj_id : str, list if not provided, server info is retured(as a dict). Otherwise, an object with that id is returned (or objects if `obj_id` is a list).
async def nextUp(self, userId=None): '''returns list of items marked as `next up` |coro| Parameters ---------- userId : str if provided, then the list returned is the one that that use will see. Returns ------- list the itmes that will appear as next up (for user if id was given) ''' json = await self.connector.getJson('/Shows/NextUp', pass_uid=True, remote=False, userId=userId ) return await self.process(jsonync def nextUp(self, userId=None): '''returns list of items marked as `next up` |coro| Parameters ---------- userId : str if provided, then the list returned is the one that that use will see. Returns ------- list the itmes that will appear as next up (for user if id was given) ''' json = await self.connector.getJson('/Shows/NextUp', pass_uid=True, remote=False, userId=userId ) return await self.process(json)
returns list of items marked as `next up` |coro| Parameters ---------- userId : str if provided, then the list returned is the one that that use will see. Returns ------- list the itmes that will appear as next up (for user if id was given)
async def update(self): ''' reload all cached information |coro| Notes ----- This is a slow process, and will remove the cache before updating. Thus it is recomended to use the `*_force` properties, which will only update the cache after data is retrived. ''' keys = self.extras.keys() self.extras = {} for key in keys: try: func = getattr(self, key, None) if callable(func): func() except: pasync def update(self): ''' reload all cached information |coro| Notes ----- This is a slow process, and will remove the cache before updating. Thus it is recomended to use the `*_force` properties, which will only update the cache after data is retrived. ''' keys = self.extras.keys() self.extras = {} for key in keys: try: func = getattr(self, key, None) if callable(func): func() except: pass
reload all cached information |coro| Notes ----- This is a slow process, and will remove the cache before updating. Thus it is recomended to use the `*_force` properties, which will only update the cache after data is retrived.
async def create_playlist(self, name, *songs): '''create a new playlist |coro| Parameters ---------- name : str name of new playlist songs : array_like list of song ids to add to playlist ''' data = {'Name': name} ids = [i.id for i in (await self.process(songs))] if ids: data['Ids'] = ','.join(ids) # TODO - return playlist not status return await self.connector.post('/Playlists', data=data, pass_uid=True, remote=False ync def create_playlist(self, name, *songs): '''create a new playlist |coro| Parameters ---------- name : str name of new playlist songs : array_like list of song ids to add to playlist ''' data = {'Name': name} ids = [i.id for i in (await self.process(songs))] if ids: data['Ids'] = ','.join(ids) # TODO - return playlist not status return await self.connector.post('/Playlists', data=data, pass_uid=True, remote=False )
create a new playlist |coro| Parameters ---------- name : str name of new playlist songs : array_like list of song ids to add to playlist
def load_all(self, group): for ep in iter_entry_points(group=group): plugin = ep.load() plugin(self.__config)
Loads all plugins advertising entry points with the given group name. The specified plugin needs to be a callable that accepts the everest configurator as single argument.
def gui(): sel = psidialogs.multi_choice(libraries(), 'select libraries to remove from %s!' % libraries_dir(), title='remove boards') print('%s selected' % sel) if sel: if psidialogs.ask_yes_no('Do you really want to remove selected libraries?\n' + '\n'.join(sel)): for x in sel: remove_lib(x) print('%s was removed' % x)
remove libraries by GUI.
def copy_document(destination, identifier, pointer): record = Record.get_record(identifier) click.echo(json.dumps( Document(record, pointer).copy(destination) ))
Copy file to a new destination.
def setcontents(source, identifier, pointer): record = Record.get_record(identifier) Document(record, pointer).setcontents(source)
Patch existing bibliographic record.
def src2ast(src: str) -> Expression: try: return ast.parse(src, mode='eval') except SyntaxError: raise ValueError("Not a valid expression.") from None
Return ast.Expression created from source code given in `src`.
def names(expr: AST) -> Set[str]: nodes = [node for node in ast.walk(expr) if isinstance(node, ast.Name)] loaded = {node.id for node in nodes if isinstance(node.ctx, ast.Load)} stored = {node.id for node in nodes if isinstance(node.ctx, ast.Store)} return loaded - stored
Names of globals in `expr`.
def replace_name(expr: AST, old_name: str, new_name: str) -> AST: return _NameReplacer(old_name, new_name).visit(deepcopy(expr))
Replace all Name nodes named `old_name` with nodes named `new_name`.
def Negation(expr: Expression) -> Expression: expr = Expression(_negate(expr.body)) return ast.fix_missing_locations(expr)
Return expression which is the negation of `expr`.
def Conjunction(expr1: Expression, expr2: Expression) -> Expression: expr = Expression(ast.BoolOp(ast.And(), [expr1.body, expr2.body])) return ast.fix_missing_locations(expr)
Return expression which is the conjunction of `expr1` and `expr2`.
def Disjunction(expr1: Expression, expr2: Expression) -> Expression: expr = Expression(ast.BoolOp(ast.Or(), [expr1.body, expr2.body])) return ast.fix_missing_locations(expr)
Return expression which is the disjunction of `expr1` and `expr2`.
def Contradiction(expr1: Expression, expr2: Expression) -> Expression: expr = Disjunction(Conjunction(expr1, Negation(expr2)), Conjunction(Negation(expr1), expr2)) return ast.fix_missing_locations(expr)
Return expression which is the contradiction of `expr1` and `expr2`.
def diff_binding(self) -> int: try: prev_op, prev_op_binding = self.nested_ops[-2] except IndexError: prev_op, prev_op_binding = None, 0 try: curr_op, curr_op_binding = self.nested_ops[-1] except IndexError: curr_op, curr_op_binding = None, 0 # special case if prev_op is ast.Pow and isinstance(curr_op, (ast.Invert, ast.USub)): return 1 # print(prev_op, prev_op_binding, curr_op, curr_op_binding) return curr_op_binding - prev_op_binding
Return the difference betweens the binding levels of the current and the previous operator.
def wrap_expr(self, src: str, dfltChaining: bool) -> str: diff_binding = self.op_man.diff_binding() if diff_binding < 0 or diff_binding == 0 and not dfltChaining: return self.parenthesize(src) else: return src
Wrap `src` in parentheses if neccessary.
def visit(self, node: AST, dfltChaining: bool = True) -> str: # print(node.__class__.__name__) if node is None: return '' if isinstance(node, ast.Expression): return self.visit(node.body) # dispatch to specific or generic method method = 'visit_' + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) return visitor(node, dfltChaining)
Process `node` by dispatching to a handler.
def generic_visit(self, node: AST, dfltChaining: bool = True) -> str: for field, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, AST): self.visit(item) elif isinstance(value, AST): self.visit(value)
Default handler, called if no explicit visitor function exists for a node.
def visit_NameConstant(self, node: AST, dfltChaining: bool = True) -> str: return str(node.value)
Return `node`s name as string.
def visit_Num(self, node: AST, dfltChaining: bool = True) -> str: return str(node.n)
Return `node`s number as string.
def visit_Str(self, node: AST, dfltChaining: bool = True) -> str: return repr(node.s)
Return `node`s string representation.
def visit_FormattedValue(self, node: AST, dfltChaining: bool = True) -> str: format_spec = node.format_spec return f"{{{self.visit(node.value)}" \ f"{self.CONV_MAP.get(node.conversion, '')}" \ f"{':'+self._nested_str(format_spec) if format_spec else ''}}}"
Return `node`s value formatted according to its format spec.
def visit_Tuple(self, node: AST, dfltChaining: bool = True) -> str: elems = (self.visit(elt) for elt in node.elts) return f"({', '.join(elems)}{')' if len(node.elts) != 1 else ',)'}"
Return tuple representation of `node`s elements.
def visit_Set(self, node: AST, dfltChaining: bool = True) -> str: return '{' + ', '.join([self.visit(elt) for elt in node.elts]) + '}'
Return set representation of `node`s elements.
def visit_Dict(self, node: AST, dfltChaining: bool = True) -> str: items = (': '.join((self.visit(key), self.visit(value))) for key, value in zip(node.keys, node.values)) return f"{{{', '.join(items)}}}"
Return dict representation of `node`s elements.
def visit_Name(self, node: AST, dfltChaining: bool = True) -> str: return node.id
Return `node`s id.
def visit_Starred(self, node: AST, dfltChaining: bool = True) -> str: with self.op_man(node): return f"*{self.visit(node.value)}"
Return representation of starred expresssion.
def visit_Expr(self, node: AST, dfltChaining: bool = True) -> str: return self.visit(node.value)
Return representation of nested expression.
def visit_UnaryOp(self, node: AST, dfltChaining: bool = True) -> str: op = node.op with self.op_man(op): return self.visit(op) + self.visit(node.operand)
Return representation of `node`s operator and operand.
def visit_BinOp(self, node: AST, dfltChaining: bool = True) -> str: op = node.op with self.op_man(op): if isinstance(op, ast.Pow): # Pow chains right-to-left src = self.visit(op).join((self.visit(node.left, dfltChaining=False), self.visit(node.right))) else: src = self.visit(op).join((self.visit(node.left), self.visit(node.right, dfltChaining=False))) return self.wrap_expr(src, dfltChaining)
Return `node`s operator and operands as inlined expression.
def visit_Div(self, node: AST, dfltChaining: bool = True) -> str: return '/' if self.compact else ' / '
Return division sign.
def visit_BoolOp(self, node: AST, dfltChaining: bool = True) -> str: op = node.op with self.op_man(op): src = self.visit(op).join([self.visit(node.values[0])] + [self.visit(val, dfltChaining=False) for val in node.values[1:]]) return self.wrap_expr(src, dfltChaining)
Return `node`s operator and operands as inlined expression.
def visit_Compare(self, node: AST, dfltChaining: bool = True) -> str: # all comparison operators have the same precedence, # we just take the first one as representative first_op = node.ops[0] with self.op_man(first_op): cmps = [' '.join((self.visit(op), self.visit(cmp, dfltChaining=False))) for op, cmp in zip(node.ops, node.comparators)] src = ' '.join((self.visit(node.left), ' '.join(cmps))) return self.wrap_expr(src, dfltChaining)
Return `node`s operators and operands as inlined expression.
def visit_keyword(self, node: AST, dfltChaining: bool = True) -> str: arg = node.arg if arg is None: return f"**{self.visit(node.value)}" else: return f"{arg}={self.visit(node.value)}"
Return representation of `node` as keyword arg.
def visit_Call(self, node: AST, dfltChaining: bool = True) -> str: args = node.args try: kwds = node.keywords except AttributeError: kwds = [] self.compact = True args_src = (self.visit(arg) for arg in args) kwds_src = (self.visit(kwd) for kwd in kwds) param_src = ', '.join(chain(args_src, kwds_src)) src = f"{self.visit(node.func)}({param_src})" self.compact = False return src
Return `node`s representation as function call.
def visit_arguments(self, node: AST, dfltChaining: bool = True) -> str: args = node.args dflts = node.defaults vararg = node.vararg kwargs = node.kwonlyargs kwdflts = node.kw_defaults kwarg = node.kwarg self.compact = True n_args_without_dflt = len(args) - len(dflts) args_src = (arg.arg for arg in args[:n_args_without_dflt]) dflts_src = (f"{arg.arg}={self.visit(dflt)}" for arg, dflt in zip(args[n_args_without_dflt:], dflts)) vararg_src = (f"*{vararg.arg}",) if vararg else () kwargs_src = ((f"{kw.arg}={self.visit(dflt)}" if dflt is not None else f"{kw.arg}") for kw, dflt in zip(kwargs, kwdflts)) kwarg_src = (f"**{kwarg.arg}",) if kwarg else () src = ', '.join(chain(args_src, dflts_src, vararg_src, kwargs_src, kwarg_src)) self.compact = False return src
Return `node`s representation as argument list.
def visit_Lambda(self, node: AST, dfltChaining: bool = True) -> str: with self.op_man(node): src = f"lambda {self.visit(node.args)}: {self.visit(node.body)}" return self.wrap_expr(src, dfltChaining)
Return `node`s representation as lambda expression.
def visit_IfExp(self, node: AST, dfltChaining: bool = True) -> str: with self.op_man(node): src = " if ".join((self.visit(node.body, dfltChaining=False), " else ".join((self.visit(node.test), self.visit(node.orelse))))) return self.wrap_expr(src, dfltChaining)
Return `node`s representation as ... if ... else ... expression.
def visit_Attribute(self, node: AST, dfltChaining: bool = True) -> str: return '.'.join((self.visit(node.value), node.attr))
Return `node`s representation as attribute access.
def visit_Slice(self, node: AST, dfltChaining: bool = True) -> str: elems = [self.visit(node.lower), self.visit(node.upper)] if node.step is not None: elems.append(self.visit(node.step)) return ':'.join(elems)
Return `node`s representation as slice.
def visit_ExtSlice(self, node: AST, dfltChaining: bool = True) -> str: return ', '.join((self.visit(dim) for dim in node.dims))
Return `node`s representation as extended slice.
def visit_comprehension(self, node: AST, dfltChaining: bool = True) -> str: target = node.target try: elts = target.elts # we have a tuple of names except AttributeError: names = self.visit(target) else: names = ', '.join(self.visit(elt) for elt in elts) src = f"for {names} in {self.visit(node.iter)}" if node.ifs: src += f" {' '.join('if ' + self.visit(if_) for if_ in node.ifs)}" return src
Return `node`s representation as comprehension.
def visit_ListComp(self, node: AST, dfltChaining: bool = True) -> str: return f"[{self.visit(node.elt)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)}]"
Return `node`s representation as list comprehension.
def visit_SetComp(self, node: AST, dfltChaining: bool = True) -> str: return f"{{{self.visit(node.elt)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)}}}"
Return `node`s representation as set comprehension.
def visit_DictComp(self, node: AST, dfltChaining: bool = True) -> str: return f"{{{self.visit(node.key)}: {self.visit(node.value)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)}}}"
Return `node`s representation as dict comprehension.
def visit_GeneratorExp(self, node: AST, dfltChaining: bool = True) -> str: return f"({self.visit(node.elt)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)})"
Return `node`s representation as generator expression.
def getargs(): parser = argparse.ArgumentParser( description='Python Disk Usage Calculator.') parser.add_argument("path", type=chkpath, nargs='?', default=".", help="A valid path.") return parser.parse_args()
Return a list of valid arguments.
def visible_line_width(self, position = Point): extra_char_width = len([ None for c in self[:position].line_buffer if 0x2013 <= ord(c) <= 0xFFFD]) return len(self[:position].quoted_text()) + self[:position].line_buffer.count(u"\t")*7 + extra_char_width
Return the visible width of the text in line buffer up to position.
def copy_region_to_clipboard(self): # () u'''Copy the text in the region to the windows clipboard.''' if self.enable_win32_clipboard: mark = min(self.mark, len(self.line_buffer)) cursor = min(self.point, len(self.line_buffer)) if self.mark == -1: return begin = min(cursor, mark) end = max(cursor, mark) toclipboard = u"".join(self.line_buffer[begin:end]) clipboard.SetClipboardText(toclipboardf copy_region_to_clipboard(self): # () u'''Copy the text in the region to the windows clipboard.''' if self.enable_win32_clipboard: mark = min(self.mark, len(self.line_buffer)) cursor = min(self.point, len(self.line_buffer)) if self.mark == -1: return begin = min(cursor, mark) end = max(cursor, mark) toclipboard = u"".join(self.line_buffer[begin:end]) clipboard.SetClipboardText(toclipboard)
u'''Copy the text in the region to the windows clipboard.
def copy_selection_to_clipboard(self): # () u'''Copy the text in the region to the windows clipboard.''' if self.enable_win32_clipboard and self.enable_selection and self.selection_mark >= 0: selection_mark = min(self.selection_mark,len(self.line_buffer)) cursor = min(self.point,len(self.line_buffer)) if self.selection_mark == -1: return begin = min(cursor, selection_mark) end = max(cursor, selection_mark) toclipboard = u"".join(self.line_buffer[begin:end]) clipboard.SetClipboardText(toclipboardf copy_selection_to_clipboard(self): # () u'''Copy the text in the region to the windows clipboard.''' if self.enable_win32_clipboard and self.enable_selection and self.selection_mark >= 0: selection_mark = min(self.selection_mark,len(self.line_buffer)) cursor = min(self.point,len(self.line_buffer)) if self.selection_mark == -1: return begin = min(cursor, selection_mark) end = max(cursor, selection_mark) toclipboard = u"".join(self.line_buffer[begin:end]) clipboard.SetClipboardText(toclipboard)
u'''Copy the text in the region to the windows clipboard.
def run(self): import fnmatch import shutil import glob matches = [] matches.extend(glob.glob('./*.pyc')) matches.extend(glob.glob('./*.pyd')) matches.extend(glob.glob('./*.pyo')) matches.extend(glob.glob('./*.so')) dirs = [] dirs.extend(glob.glob('./__pycache__')) dirs.extend(glob.glob('docs/_build')) for cleandir in [SOURCE, 'test', 'examples']: for root, dirnames, filenames in os.walk(cleandir): for filename in fnmatch.filter(filenames, '*.pyc'): matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.pyd'): matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.pyo'): matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.so'): matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.dll'): matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.c'): matches.append(os.path.join(root, filename)) for dirname in fnmatch.filter(dirnames, '__pycache__'): dirs.append(os.path.join(root, dirname)) for match in matches: os.remove(match) for dir in dirs: shutil.rmtree(dir)
Run CleanUp.
def run(self): if os.system('git add .'): sys.exit(1) if self.message is not None: os.system('git commit -a -m "' + self.message + '"') else: os.system('git commit -a')
Run git add and commit with message if provided.
def saxon6(self, elem, **params): java = os.environ.get('java') or 'java' saxon6path = os.path.join(JARS, 'saxon.jar') # saxon 6.5.5, included with jing and trang with tempfile.TemporaryDirectory() as tempdir: if self.fn is None: xslfn = os.path.join(tempdir, "xslt.xsl") self.write(fn=xslfn) else: xslfn = self.fn srcfn = os.path.join(tempdir, "src.xml") outfn = os.path.join(tempdir, "out.xml") XML(fn=srcfn, root=elem).write() cmd = [java, '-jar', saxon6path, '-o', outfn, srcfn, xslfn] \ + ["%s=%r" % (key, params[key]) for key in params.keys()] log.debug("saxon6: %r " % cmd) try: subprocess.check_output(cmd) except subprocess.CalledProcessError as e: error = html.unescape(str(e.output, 'UTF-8')) raise RuntimeError(error).with_traceback(sys.exc_info()[2]) from None if self.find(self.root, "xsl:output") is None or self.find(self.root, "xsl:output").get('method')=='xml': return etree.parse(outfn) else: return open(outfn, 'rb').read().decode('utf-8')
Use Saxon6 to process the element. If the XSLT has a filename (fn), use that. Otherwise, make temp.
def uri(self, value): jsonpointer.set_pointer(self.record, self.pointer, value)
Set new uri value in record. It will not change the location of the underlying file!
def open(self, mode='r', **kwargs): _fs, filename = opener.parse(self.uri) return _fs.open(filename, mode=mode, **kwargs)
Open file ``uri`` under the pointer.
def move(self, dst, **kwargs): _fs, filename = opener.parse(self.uri) _fs_dst, filename_dst = opener.parse(dst) movefile(_fs, filename, _fs_dst, filename_dst, **kwargs) self.uri = dst
Move file to a new destination and update ``uri``.
def copy(self, dst, **kwargs): _fs, filename = opener.parse(self.uri) _fs_dst, filename_dst = opener.parse(dst) copyfile(_fs, filename, _fs_dst, filename_dst, **kwargs) return [{'op': 'replace', 'path': self.pointer, 'value': dst}]
Copy file to a new destination. Returns JSON Patch with proposed change pointing to new copy.
def setcontents(self, source, **kwargs): if isinstance(source, six.string_types): _file = opener.open(source, 'rb') else: _file = source # signals.document_before_content_set.send(self) data = _file.read() _fs, filename = opener.parse(self.uri) _fs.setcontents(filename, data, **kwargs) _fs.close() # signals.document_after_content_set.send(self) if isinstance(source, six.string_types) and hasattr(_file, 'close'): _file.close()
Create a new file from a string or file-like object.
def remove(self, force=False): if force: _fs, filename = opener.parse(self.uri) _fs.remove(filename) self.uri = None
Remove file reference from record. If force is True it removes the file from filesystem
def prefixed_to_namespaced(C, prefixed_name, namespaces): if ':' not in prefixed_name: return prefixed_name else: prefix, name = prefixed_name.split(':') namespace = namespaces[prefix] return "{%s}%s" % (namespace, name)
for a given prefix:name, return {namespace}name from the given namespaces dict
def xpath(C, node, path, namespaces=None, extensions=None, smart_strings=True, **args): return node.xpath( path, namespaces=namespaces or C.NS, extensions=extensions, smart_strings=smart_strings, **args )
shortcut to Element.xpath()
def find(C, node, path, namespaces=None, extensions=None, smart_strings=True, **args): xp = node.xpath( path, namespaces=namespaces or C.NS, extensions=extensions, smart_strings=smart_strings, **args ) if len(xp) > 0: return xp[0]
use Element.xpath() rather than Element.find() in order to normalize the interface
def tobytes( self, root=None, encoding='UTF-8', doctype=None, canonicalized=True, xml_declaration=True, pretty_print=True, with_comments=True, ): if root is None: root = self.root if canonicalized == True: return self.canonicalized_bytes(root) else: return etree.tostring( root, encoding=encoding or self.info.encoding, doctype=doctype or self.info.doctype, xml_declaration=xml_declaration, pretty_print=pretty_print, with_comments=with_comments, )
return the content of the XML document as a byte string suitable for writing
def tostring(self, root=None, doctype=None, pretty_print=True): if root is None: root = self.root return etree.tounicode( root, doctype=doctype or self.info.doctype, pretty_print=pretty_print )
return the content of the XML document as a unicode string
def digest(self, **args): return String(XML.canonicalized_string(self.root)).digest(**args)
calculate a digest based on the hash of the XML content
def element(self, tag_path, test=None, **attributes): xpath = tag_path tests = ["@%s='%s'" % (k, attributes[k]) for k in attributes] if test is not None: tests.insert(0, test) if len(tests) > 0: xpath += "[%s]" % ' and '.join(tests) e = self.find(self.root, xpath) if e is None: tag = tag_path.split('/')[-1].split('[')[0] tagname = tag.split(':')[-1] if ':' in tag: nstag = tag.split(':')[0] tag = "{%s}%s" % (self.NS[nstag], tagname) e = etree.Element(tag, **attributes) return e
given a tag in xpath form and optional attributes, find the element in self.root or return a new one.
def Element(cls, s, *args): sargs = [] for arg in args: if type(arg) == etree._Element: sargs.append(etree.tounicode(arg)) else: sargs.append(arg) if type(s) == etree._Element: t = etree.tounicode(s) else: t = s if len(args) == 0: return XML.fromstring(t) else: return XML.fromstring(t % tuple(sargs))
given a string s and string *args, return an Element.
def jing(self, tag=None, schemas=None, schemafn=None, ext='.rnc'): from . import JARS java = os.environ.get('java') or 'java' jingfn = os.path.join(JARS, 'jing.jar') tag = tag or self.root.tag schemas = schemas or self.schemas schemafn = schemafn or Schema.filename(tag, schemas, ext=ext) if schemafn is not None: cmd = [java, '-jar', jingfn, '-c', schemafn] if os.path.splitext(schemafn)[-1].lower() != '.rnc': cmd.pop(cmd.index('-c')) try: fn = self.fn if fn is None or not os.path.exists(fn): tempf = tempfile.NamedTemporaryFile() fn = tempf.name tempf.close() open(fn, 'wb').write(etree.tounicode(self.root).encode('utf-8')) subprocess.check_output(cmd + [fn]) except subprocess.CalledProcessError as e: tbtext = html.unescape(str(e.output, 'UTF-8')) raise RuntimeError(tbtext).with_traceback(sys.exc_info()[2]) from None
use the (included) jing library to validate the XML.
def namespace(self, elem=None): if elem is None: elem = self.root return XML.tag_namespace(elem.tag)
return the URL, if any, for the doc root or elem, if given.
def tag_namespace(cls, tag): md = re.match("^(?:\{([^\}]*)\})", tag) if md is not None: return md.group(1)
return the namespace for a given tag, or '' if no namespace given
def tag_name(cls, tag): while isinstance(tag, etree._Element): tag = tag.tag return tag.split('}')[-1]
return the name of the tag, with the namespace removed
def element_map( self, tags=None, xpath="//*", exclude_attribs=[], include_attribs=[], attrib_vals=False, hierarchy=False, minimize=False, ): if tags is None: tags = Dict() for elem in self.root.xpath(xpath): if elem.tag not in tags.keys(): tags[elem.tag] = Dict(**{'parents': [], 'children': [], 'attributes': Dict()}) for a in [ a for a in elem.attrib.keys() if (include_attribs == [] and a not in exclude_attribs) or (a in include_attribs) ]: # Attribute Names if a not in tags[elem.tag].attributes.keys(): tags[elem.tag].attributes[a] = [] # Attribute Values if attrib_vals == True and elem.get(a) not in tags[elem.tag].attributes[a]: tags[elem.tag].attributes[a].append(elem.get(a)) # Hierarchy: Parents and Children if hierarchy == True: parent = elem.getparent() if parent is not None and parent.tag not in tags[elem.tag].parents: tags[elem.tag].parents.append(parent.tag) for child in elem.xpath("*"): if child.tag not in tags[elem.tag].children: tags[elem.tag].children.append(child.tag) if minimize == True: for tag in tags.keys(): if tags[tag].get('parents') == []: tags[tag].pop('parents') if tags[tag].get('children') == []: tags[tag].pop('children') if tags[tag].get('attributes') == {}: tags[tag].pop('attributes') if tags[tag] == {}: tags.pop(tag) return tags
return a dict of element tags, their attribute names, and optionally attribute values, in the XML document
def as_dict(self, elem=None, ignore_whitespace=True, namespaces=True): if elem is None: elem = self.root tag = self.tag_dict_key(elem.tag, namespaces=namespaces) d = Dict(**{tag: {}}) d[tag].update( **{ '@' + self.tag_dict_key(k, namespaces=namespaces): elem.attrib[k] for k in elem.attrib.keys() } ) nodes = [] if elem.text is not None and (elem.text.strip() != '' or ignore_whitespace != True): nodes.append(str(elem.text)) for ch in [ e for e in elem.getchildren() if type(e) == etree._Element ]: # *** IGNORE EVERYTHING EXCEPT ELEMENTS *** nodes.append( self.as_dict(elem=ch, ignore_whitespace=ignore_whitespace, namespaces=namespaces) ) if ch.tail is not None and (ch.tail.strip() != '' or ignore_whitespace != True): d[tag].append(ch.tail) if nodes != []: d[tag]['nodes'] = nodes return d
Create a generalized dict output from this elem (default self.root). Rules: * Elements are objects with a single key, which is the tag. + if namespaces==True, the namespace or its prefix is included in the tag. * The value is an object: + '@name' = attribute with name="value", value is string + 'text' = text string + 'children' = children list, consisting of 0 or more text or element nodes: + text is represented as strings + elements are represented as objects * If ignore_whitespace==True, then whitespace-only element text and tail will be omitted. * Comments and processing instructions are ignored. * The "tail" of the given element (or XML.root) node is also ignored.
def dict_key_tag(Class, key, namespaces=None): namespaces = namespaces or Class.NS ns = Class.tag_namespace(key) tag = Class.tag_name(key) if ns is None and ':' in key: prefix, tag = key.split(':') if prefix in namespaces.keys(): ns = namespaces[prefix] if ns is not None: tag = "{%s}%s" % (ns, tag) return tag
convert a dict key into an element or attribute name
def from_dict(Class, element_data, fn=None): from .builder import Builder B = Builder(default=Class.DEFAULT_NS, **Class.NS) keys = list(element_data.keys()) assert len(keys) == 1 key = keys[0] elem_tag = Class.dict_key_tag(key) elem = B(elem_tag) for k in element_data[key].keys(): # attributes if k[0] == '@': attr_name = Class.dict_key_tag(k[1:]) elem.set(attr_name, element_data[key][k]) elif k == 'nodes': for node in element_data[key][k]: if type(node) == str: if len(elem) == 0: elem.text = node else: elem[-1].tail = node else: child_elem = Class.from_dict(node) elem.append(child_elem) else: raise ValueError('unsupported data: %r: %r' % (k, element_data[key][k])) return elem
reverse of XML.as_dict(): Create a new XML element from the given element_data. Rules: * element_data is a dict with one key, which is the name of the element * The element name can be in "prefix:..."" form, if the namespace prefix is in self.NS * The element name can also be in "{namespace}..." form * element_data[key] is a list. * element_data[key][0] is a dict with the element's attributes * element_data[key][1:] are strings and dicts * strings are interpreted as text * dicts are interpreted as elements, which must follow the rules of element_data. * namespaces are applied from self.NS
def replace_with_contents(c, elem): "removes an element and leaves its contents in its place. Namespaces supported." parent = elem.getparent() index = parent.index(elem) children = elem.getchildren() previous = elem.getprevious() # text if index == 0: parent.text = (parent.text or '') + (elem.text or '') else: previous.tail = (previous.tail or '') + (elem.text or '') # children for child in children: parent.insert(index + children.index(child), child) # tail if len(children) > 0: last_child = children[-1] last_child.tail = (last_child.tail or '') + (elem.tail or '') else: if index == 0: parent.text = (parent.text or '') + (elem.tail or '') else: previous.tail = (previous.tail or '') + (elem.tail or '') # elem parent.remove(elemf replace_with_contents(c, elem): "removes an element and leaves its contents in its place. Namespaces supported." parent = elem.getparent() index = parent.index(elem) children = elem.getchildren() previous = elem.getprevious() # text if index == 0: parent.text = (parent.text or '') + (elem.text or '') else: previous.tail = (previous.tail or '') + (elem.text or '') # children for child in children: parent.insert(index + children.index(child), child) # tail if len(children) > 0: last_child = children[-1] last_child.tail = (last_child.tail or '') + (elem.tail or '') else: if index == 0: parent.text = (parent.text or '') + (elem.tail or '') else: previous.tail = (previous.tail or '') + (elem.tail or '') # elem parent.remove(elem)
removes an element and leaves its contents in its place. Namespaces supported.
def remove_range(cls, elem, end_elem, delete_end=True): while elem is not None and elem != end_elem and end_elem not in elem.xpath("descendant::*"): parent = elem.getparent() nxt = elem.getnext() parent.remove(elem) if DEBUG == True: print(etree.tounicode(elem)) elem = nxt if elem == end_elem: if delete_end == True: cls.remove(end_elem, leave_tail=True) elif elem is None: if parent.tail not in [None, '']: parent.tail = '' cls.remove_range(parent.getnext(), end_elem) XML.remove_if_empty(parent) elif end_elem in elem.xpath("descendant::*"): if DEBUG == True: print(elem.text) elem.text = '' cls.remove_range(elem.getchildren()[0], end_elem) XML.remove_if_empty(elem) else: print("LOGIC ERROR", file=sys.stderr)
delete everything from elem to end_elem, including elem. if delete_end==True, also including end_elem; otherwise, leave it.
def wrap_content(cls, container, wrapper): "wrap the content of container element with wrapper element" wrapper.text = (container.text or '') + (wrapper.text or '') container.text = '' for ch in container: wrapper.append(ch) container.insert(0, wrapper) return containef wrap_content(cls, container, wrapper): "wrap the content of container element with wrapper element" wrapper.text = (container.text or '') + (wrapper.text or '') container.text = '' for ch in container: wrapper.append(ch) container.insert(0, wrapper) return container
wrap the content of container element with wrapper element
def merge_contiguous(C, node, xpath, namespaces=None): new_node = deepcopy(node) elems = XML.xpath(new_node, xpath, namespaces=namespaces) elems.reverse() for elem in elems: nxt = elem.getnext() if elem.attrib == {}: XML.replace_with_contents(elem) elif ( elem.tail in [None, ''] and nxt is not None and elem.tag == nxt.tag and elem.attrib == nxt.attrib ): # merge nxt with elem # -- append nxt.text to elem last child tail if len(elem.getchildren()) > 0: lastch = elem.getchildren()[-1] lastch.tail = (lastch.tail or '') + (nxt.text or '') else: elem.text = (elem.text or '') + (nxt.text or '') # -- append nxt children to elem children for ch in nxt.getchildren(): elem.append(ch) # -- remove nxt XML.remove(nxt, leave_tail=True) return new_node
Within a given node, merge elements that are next to each other if they have the same tag and attributes.
def unnest(c, elem, ignore_whitespace=False): parent = elem.getparent() gparent = parent.getparent() index = parent.index(elem) # put everything up to elem into a new parent element right before the current parent preparent = etree.Element(parent.tag) preparent.text, parent.text = (parent.text or ''), '' for k in parent.attrib.keys(): preparent.set(k, parent.get(k)) if index > 0: for ch in parent.getchildren()[:index]: preparent.append(ch) gparent.insert(gparent.index(parent), preparent) XML.remove_if_empty(preparent, leave_tail=True, ignore_whitespace=ignore_whitespace) # put the element right before the current parent XML.remove(elem, leave_tail=True) gparent.insert(gparent.index(parent), elem) elem.tail = '' # if the original parent is empty, remove it XML.remove_if_empty(parent, leave_tail=True, ignore_whitespace=ignore_whitespace)
unnest the element from its parent within doc. MUTABLE CHANGES
def interior_nesting(cls, elem1, xpath, namespaces=None): for elem2 in elem1.xpath(xpath, namespaces=namespaces): child_elem1 = etree.Element(elem1.tag) for k in elem1.attrib: child_elem1.set(k, elem1.get(k)) child_elem1.text, elem2.text = elem2.text, '' for ch in elem2.getchildren(): child_elem1.append(ch) elem2.insert(0, child_elem1) XML.replace_with_contents(elem1)
for elem1 containing elements at xpath, embed elem1 inside each of those elements, and then remove the original elem1
def fragment_nesting(cls, elem1, tag2, namespaces=None): elems2 = elem1.xpath("child::%s" % tag2, namespaces=namespaces) while len(elems2) > 0: elem2 = elems2[0] parent2 = elem2.getparent() index2 = parent2.index(elem2) # all of elem2 has a new tag1 element embedded inside of it child_elem1 = etree.Element(elem1.tag) for k in elem1.attrib: child_elem1.set(k, elem1.get(k)) elem2.text, child_elem1.text = '', elem2.text for ch in elem2.getchildren(): child_elem1.append(ch) elem2.insert(0, child_elem1) # new_elem1 for all following children of parent2 new_elem1 = etree.Element(elem1.tag) for k in elem1.attrib: new_elem1.set(k, elem1.get(k)) new_elem1.text, elem2.tail = elem2.tail, '' for ch in parent2.getchildren()[index2 + 1 :]: new_elem1.append(ch) # elem2 is placed after parent2 parent = parent2.getparent() parent.insert(parent.index(parent2) + 1, elem2) last_child = elem2 # new_elem1 is placed after elem2 parent.insert(parent.index(elem2) + 1, new_elem1) new_elem1.tail, elem1.tail = elem1.tail, '' XML.remove_if_empty(elem1) XML.remove_if_empty(new_elem1) # repeat until all tag2 elements are unpacked from the new_elem1 elem1 = new_elem1 elems2 = elem1.xpath("child::%s" % tag2, namespaces=namespaces)
for elem1 containing elements with tag2, fragment elem1 into elems that are adjacent to and nested within tag2
def communityvisibilitystate(self): if self._communityvisibilitystate == None: return None elif self._communityvisibilitystate in self.VisibilityState: return self.VisibilityState[self._communityvisibilitystate] else: #Invalid State return None
Return the Visibility State of the Users Profile
def personastate(self): if self._personastate == None: return None elif self._personastate in self.PersonaState: return self.PersonaState[self._personastate] else: #Invalid State return None
Return the Persona State of the Users Profile
def mcus(): ls = [] for h in hwpack_names(): for b in board_names(h): ls += [mcu(b, h)] ls = sorted(list(set(ls))) return ls
MCU list.
def logpath2dt(filepath): return datetime.datetime.strptime(re.match(r'.*/(.*) .*$',filepath).groups()[0],'%Y-%m-%d %H-%M')
given a dataflashlog in the format produced by Mission Planner, return a datetime which says when the file was downloaded from the APM
def cross(series, cross=0, direction='cross'): # Find if values are above or bellow yvalue crossing: above=series.values > cross below=scipy.logical_not(above) left_shifted_above = above[1:] left_shifted_below = below[1:] x_crossings = [] # Find indexes on left side of crossing point if direction == 'rising': idxs = (left_shifted_above & below[0:-1]).nonzero()[0] elif direction == 'falling': idxs = (left_shifted_below & above[0:-1]).nonzero()[0] else: rising = left_shifted_above & below[0:-1] falling = left_shifted_below & above[0:-1] idxs = (rising | falling).nonzero()[0] # Calculate x crossings with interpolation using formula for a line: x1 = series.index.values[idxs] x2 = series.index.values[idxs+1] y1 = series.values[idxs] y2 = series.values[idxs+1] x_crossings = (cross-y1)*(x2-x1)/(y2-y1) + x1 return x_crossings
From http://stackoverflow.com/questions/10475488/calculating-crossing-intercept-points-of-a-series-or-dataframe Given a Series returns all the index values where the data values equal the 'cross' value. Direction can be 'rising' (for rising edge), 'falling' (for only falling edge), or 'cross' for both edges
def read_body(response, max_bytes=None): finished = Deferred() response.deliverBody(TruncatingReadBodyProtocol( response.code, response.phrase, finished, max_bytes)) return finished
Return a `Deferred` yielding at most *max_bytes* bytes from the body of a Twisted Web *response*, or the whole body if *max_bytes* is `None`.
def describe_error(failure): if failure.check(ResponseFailed): if any(f.check(InfiniteRedirection) for f in failure.value.reasons): return u'Encountered too many redirects.' return u'Received incomplete response from server.' if failure.check(ConnectError, DNSLookupError, BlacklistedHost): return u'Could not connect to server.' return failure
If *failure* is a common connection error, return a Unicode string describing it. Otherwise, return *failure*.
def request(self, method, uri, headers=None, bodyProducer=None): hostname = urlparse(uri).hostname ip_str = yield self.resolve(hostname) # `ipaddress` takes a Unicode string and I don't really care to # handle `UnicodeDecodeError` separately. ip = ipaddress.ip_address(ip_str.decode('ascii', 'replace')) if ip.is_private or ip.is_loopback or ip.is_link_local: raise BlacklistedHost(hostname, ip) response = yield self.agent.request(method, uri, headers, bodyProducer) returnValue(response)
Issue a request to the server indicated by *uri*.
def percentage_played(self): '''returns played percentage [0,1] of item''' played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks') total = self.object_dict.get('RunTimeTicks') or 1 return (played or 0)/totaf percentage_played(self): '''returns played percentage [0,1] of item''' played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks') total = self.object_dict.get('RunTimeTicks') or 1 return (played or 0)/total
returns played percentage [0,1] of item
def url(self): '''url of the item Notes ----- if remote-adderes was given, then that is used as the base ''' path = '/web/itemdetails.html?id={}'.format(self.id) return self.connector.get_url(path, attach_api_key=Falsef url(self): '''url of the item Notes ----- if remote-adderes was given, then that is used as the base ''' path = '/web/itemdetails.html?id={}'.format(self.id) return self.connector.get_url(path, attach_api_key=False)
url of the item Notes ----- if remote-adderes was given, then that is used as the base
async def update(self, fields=''): '''reload object info from emby |coro| Parameters ---------- fields : str additional fields to request when updating See Also -------- refresh : same thing send : post : ''' path = 'Users/{{UserId}}/Items/{}'.format(self.id) info = await self.connector.getJson(path, remote=False, Fields='Path,Overview,'+fields ) self.object_dict.update(info) self.extras = {} return selync def update(self, fields=''): '''reload object info from emby |coro| Parameters ---------- fields : str additional fields to request when updating See Also -------- refresh : same thing send : post : ''' path = 'Users/{{UserId}}/Items/{}'.format(self.id) info = await self.connector.getJson(path, remote=False, Fields='Path,Overview,'+fields ) self.object_dict.update(info) self.extras = {} return self
reload object info from emby |coro| Parameters ---------- fields : str additional fields to request when updating See Also -------- refresh : same thing send : post :
async def send(self): '''send data that was changed to emby |coro| This should be used after using any of the setter. Not necessarily immediately, but soon after. See Also -------- post: same thing update : refresh : Returns ------- aiohttp.ClientResponse or None if nothing needed updating ''' # Why does the whole dict need to be sent? # because emby is dumb, and will break if I don't path = 'Items/{}'.format(self.id) resp = await self.connector.post(path, data=self.object_dict, remote=False) if resp.status == 400: await EmbyObject(self.object_dict, self.connector).update() resp = await self.connector.post(path,data=self.object_dict,remote=False) return resync def send(self): '''send data that was changed to emby |coro| This should be used after using any of the setter. Not necessarily immediately, but soon after. See Also -------- post: same thing update : refresh : Returns ------- aiohttp.ClientResponse or None if nothing needed updating ''' # Why does the whole dict need to be sent? # because emby is dumb, and will break if I don't path = 'Items/{}'.format(self.id) resp = await self.connector.post(path, data=self.object_dict, remote=False) if resp.status == 400: await EmbyObject(self.object_dict, self.connector).update() resp = await self.connector.post(path,data=self.object_dict,remote=False) return resp
send data that was changed to emby |coro| This should be used after using any of the setter. Not necessarily immediately, but soon after. See Also -------- post: same thing update : refresh : Returns ------- aiohttp.ClientResponse or None if nothing needed updating