code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_git_remote_url(path='.', remote='origin'): return dulwich.repo.Repo.discover(path).get_config()\ .get((b'remote', remote.encode('utf-8')), b'url').decode('utf-8')
Get git remote url :param path: path to repo :param remote: :return: remote url or exception
def plantuml(desc): classes, relations, inherits = desc result = [ '@startuml', 'skinparam defaultFontName Courier', ] for cls in classes: # issue #11 - tabular output of class members (attrs) # http://stackoverflow.com/a/8356620/258194 # build table class_desc = [] # table columns class_desc += [(i[1], i[0]) for i in cls['cols']] # class properties class_desc += [('+', i) for i in cls['props']] # methods class_desc += [('%s()' % i, '') for i in cls['methods']] result.append( 'Class %(name)s {\n%(desc)s\n}' % { 'name': cls['name'], 'desc': '\n'.join(tabular_output(class_desc)), } ) for item in inherits: result.append("%(parent)s <|-- %(child)s" % item) for item in relations: result.append("%(from)s <--o %(to)s: %(by)s" % item) result += [ 'right footer generated by sadisplay v%s' % __version__, '@enduml', ] return '\n\n'.join(result)
Generate plantuml class diagram :param desc: result of sadisplay.describe function Return plantuml class diagram string
def is_reference_target(resource, rtype, label): prop = resource.props.references.get(rtype, False) if prop: return label in prop
Return true if the resource has this rtype with this label
def get_sources(self, resources): rtype = self.rtype # E.g. category label = self.props.label # E.g. category1 result = [ resource for resource in resources.values() if is_reference_target(resource, rtype, label) ] return result
Filter resources based on which have this reference
def icon(self): path = self._icon if not path: return '' path = os.path.expandvars(os.path.expanduser(path)) if path.startswith('.'): base_path = os.path.dirname(self.filepath()) path = os.path.abspath(os.path.join(base_path, path)) return path
Returns the icon filepath for this plugin. :return <str>
def addPluginPath(cls, pluginpath): prop_key = '_%s__pluginpath' % cls.__name__ curr_path = getattr(cls, prop_key, None) if not curr_path: curr_path = [] setattr(cls, prop_key, curr_path) if isinstance(pluginpath, basestring): pluginpath = pluginpath.split(os.path.pathsep) for path in pluginpath: if not path: continue path = os.path.expanduser(os.path.expandvars(path)) paths = path.split(os.path.pathsep) if len(paths) > 1: cls.addPluginPath(paths) else: curr_path.append(path)
Adds the plugin path for this class to the given path. The inputted pluginpath value can either be a list of strings, or a string containing paths separated by the OS specific path separator (':' on Mac & Linux, ';' on Windows) :param pluginpath | [<str>, ..] || <str>
def pluginRegisterType(cls): default = Plugin.Type.Module default |= Plugin.Type.Package default |= Plugin.Type.RegistryFile return getattr(cls, '_%s__pluginRegisterType', default)
Returns the register type for this plugin class. :return <Plugin.RegisterType>
def plugin(cls, name): cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}) return plugs.get(nstr(name))
Retrieves the plugin based on the inputted name. :param name | <str> :return <Plugin>
def pluginNames(cls, enabled=True): return map(lambda x: x.name(), cls.plugins(enabled))
Returns the names of the plugins for a given class. :param enabled | <bool> || None :return [<str>, ..]
def plugins(cls, enabled=True): cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}).values() if enabled is None: return plugs return filter(lambda x: x.isEnabled() == enabled, plugs)
Returns the plugins for the given class. :param enabled | <bool> || None :return [<Plugin>, ..]
def register(cls, plugin): plugs = getattr(cls, '_%s__plugins' % cls.__name__, None) if plugs is None: cls.loadPlugins() plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}) if plugin.name() in plugs: inst = plugs[plugin.name()] # assign the plugin instance to the proxy if isinstance(inst, PluginProxy) and \ not isinstance(plugin, PluginProxy) and \ not inst._instance: inst._instance = plugin return True return False plugs[plugin.name()] = plugin setattr(cls, '_%s__plugins' % cls.__name__, plugs) return True
Registers the given plugin instance to this system. If a plugin with the same name is already registered, then this plugin will not take effect. The first registered plugin is the one that is used. :param plugin | <Plugin> :return <bool>
def setPluginPath(cls, pluginpath): setattr(cls, '_%s__pluginpath' % cls.__name__, None) cls.addPluginPath(pluginpath)
Sets the plugin path for this class to the given path. The inputted pluginpath value can either be a list of strings, or a string containing paths separated by the OS specific path separator (':' on Mac & Linux, ';' on Windows) :param pluginpath | [<str>, ..] || <str>
def unregister(cls, plugin): plugs = getattr(cls, '_%s__plugins' % cls.__name__, {}) try: plugs.pop(plugin.name()) except AttributeError: pass except ValueError: pass
Unregisters the given plugin from the system based on its name. :param plugin | <Plugin>
def loadInstance(self): if self._loaded: return self._loaded = True module_path = self.modulePath() package = projex.packageFromPath(module_path) path = os.path.normpath(projex.packageRootPath(module_path)) if path in sys.path: sys.path.remove(path) sys.path.insert(0, path) try: __import__(package) except Exception, e: err = Plugin(self.name(), self.version()) err.setError(e) err.setFilepath(module_path) self._instance = err self.setError(e) msg = "%s.plugin('%s') errored loading instance from %s" opts = (self.proxyClass().__name__, self.name(), module_path) logger.warning(msg % opts) logger.error(e)
Loads the plugin from the proxy information that was created from the registry file.
def modulePath(self): base_path = os.path.dirname(self.filepath()) module_path = self.importPath() module_path = os.path.expanduser(os.path.expandvars(module_path)) if module_path.startswith('.'): module_path = os.path.abspath(os.path.join(base_path, module_path)) return module_path
Returns the module path information for this proxy plugin. This path will represent the root module that will be imported when the instance is first created of this plugin. :return <str>
def fromFile(cls, filepath): xdata = ElementTree.parse(nstr(filepath)) xroot = xdata.getroot() # collect variable information name = xroot.get('name') ver = float(xroot.get('version', '1.0')) if not name: name = os.path.basename(filepath).split('.') if name == '__init__': name = os.path.normpath(filepath).split(os.path.sep)[-2] name = projex.text.pretty(name) icon = xroot.get('icon', './icon.png') ximport = xroot.find('import') if ximport is not None: importpath = ximport.get('path', './__init__.py') else: importpath = './__init__.py' params = {'description': '', 'author': '', 'email': '', 'url': ''} for param, default in params.items(): xdata = xroot.find(param) if xdata is not None: params[param] = xdata.text # generate the proxy information proxy = PluginProxy(cls, name, ver) proxy.setImportPath(importpath) proxy.setDescription(params['description']) proxy.setAuthor(params['author']) proxy.setEmail(params['email']) proxy.setUrl(params['url']) proxy.setFilepath(filepath) return proxy
Creates a proxy instance from the inputted registry file. :param filepath | <str> :return <PluginProxy> || None
def clean_resource_json(resource_json): for a in ('parent_docname', 'parent', 'template', 'repr', 'series'): if a in resource_json: del resource_json[a] props = resource_json['props'] for prop in ( 'acquireds', 'style', 'in_nav', 'nav_title', 'weight', 'auto_excerpt'): if prop in props: del props[prop] return resource_json
The catalog wants to be smaller, let's drop some stuff
def resources_to_json(resources): return { docname: clean_resource_json(resource.__json__(resources)) for (docname, resource) in resources.items() }
Make a JSON/catalog representation of the resources db
def references_to_json(resources, references): dump_references = {} for reftype, refvalue in references.items(): dump_references[reftype] = {} for label, reference_resource in refvalue.items(): target_count = len(reference_resource.get_sources(resources)) dump_references[reftype][label] = dict( count=target_count, docname=reference_resource.docname ) return dump_references
Make a JSON/catalog representation of the references db, including the count for each
def get(self, url, params=None, cache_cb=None, **kwargs): if self.use_random_user_agent: headers = kwargs.get("headers", dict()) headers.update({Headers.UserAgent.KEY: Headers.UserAgent.random()}) kwargs["headers"] = headers url = add_params(url, params) cache_consumed, value = self.try_read_cache(url) if cache_consumed: response = requests.Response() response.url = url response._content = value else: response = self.ses.get(url, **kwargs) if self.should_we_update_cache(response, cache_cb, cache_consumed): self.cache.set( url, response.content, expire=kwargs.get("cache_expire", self.cache_expire), ) return response
Make http get request. :param url: :param params: :param cache_cb: (optional) a function that taking requests.Response as input, and returns a bool flag, indicate whether should update the cache. :param cache_expire: (optional). :param kwargs: optional arguments.
def get_html(self, url, params=None, cache_cb=None, decoder_encoding=None, decoder_errors=url_specified_decoder.ErrorsHandle.strict, **kwargs): response = self.get( url=url, params=params, cache_cb=cache_cb, **kwargs ) return url_specified_decoder.decode( binary=response.content, url=response.url, encoding=decoder_encoding, errors=decoder_errors, )
Get html of an url.
def download(self, url, dst, params=None, cache_cb=None, overwrite=False, stream=False, minimal_size=-1, maximum_size=1024 ** 6, **kwargs): response = self.get( url, params=params, cache_cb=cache_cb, stream=stream, **kwargs ) if not overwrite: # pragma: no cover if os.path.exists(dst): raise OSError("'%s' exists!" % dst) if stream: chunk_size = 1024 * 1024 downloaded_size = 0 with atomic_write(dst, mode="wb") as f: for chunk in response.iter_content(chunk_size): if not chunk: # pragma: no cover break f.write(chunk) downloaded_size += chunk_size if (downloaded_size < minimal_size) or (downloaded_size > maximum_size): self.raise_download_oversize_error( url, downloaded_size, minimal_size, maximum_size) else: content = response.content downloaded_size = sys.getsizeof(content) if (downloaded_size < minimal_size) or (downloaded_size > maximum_size): self.raise_download_oversize_error( url, downloaded_size, minimal_size, maximum_size) else: with atomic_write(dst, mode="wb") as f: f.write(content)
Download binary content to destination. :param url: binary content url :param dst: path to the 'save_as' file :param cache_cb: (optional) a function that taking requests.Response as input, and returns a bool flag, indicate whether should update the cache. :param overwrite: bool, :param stream: bool, whether we load everything into memory at once, or read the data chunk by chunk :param minimal_size: default -1, if response content smaller than minimal_size, then delete what just download. :param maximum_size: default 1GB, if response content greater than maximum_size, then delete what just download.
def option(*args, **kwargs): def decorate_sub_command(method): """create and add sub-command options""" if not hasattr(method, "optparser"): method.optparser = SubCmdOptionParser() method.optparser.add_option(*args, **kwargs) return method def decorate_class(klass): """store toplevel options""" assert _forgiving_issubclass(klass, Cmdln) _inherit_attr(klass, "toplevel_optparser_options", [], cp=lambda l: l[:]) klass.toplevel_optparser_options.append( (args, kwargs) ) return klass #XXX Is there a possible optimization for many options to not have a # large stack depth here? def decorate(obj): if _forgiving_issubclass(obj, Cmdln): return decorate_class(obj) else: return decorate_sub_command(obj) return decorate
Decorator to add an option to the optparser argument of a Cmdln subcommand To add a toplevel option, apply the decorator on the class itself. (see p4.py for an example) Example: @cmdln.option("-E", dest="environment_path") class MyShell(cmdln.Cmdln): @cmdln.option("-f", "--force", help="force removal") def do_remove(self, subcmd, opts, *args): #...
def _inherit_attr(klass, attr, default, cp): if attr not in klass.__dict__: if hasattr(klass, attr): value = cp(getattr(klass, attr)) else: value = default setattr(klass, attr, value)
Inherit the attribute from the base class Copy `attr` from base class (otherwise use `default`). Copying is done using the passed `cp` function. The motivation behind writing this function is to allow inheritance among Cmdln classes where base classes set 'common' options using the `@cmdln.option` decorator. To ensure this, we must not write to the base class's options when handling the derived class.
def _forgiving_issubclass(derived_class, base_class): return (type(derived_class) is ClassType and \ type(base_class) is ClassType and \ issubclass(derived_class, base_class))
Forgiving version of ``issubclass`` Does not throw any exception when arguments are not of class type
def applyTimeCalMs1(msrunContainer, specfile, correctionData, **kwargs): toleranceMode = kwargs.get('toleranceMode', 'relative') if toleranceMode == 'relative': for siId in correctionData: calibValue = correctionData[siId]['calibValue'] msrunContainer.saic[specfile][siId].arrays['mz'] *= (1 + calibValue) elif toleranceMode == 'absolute': for siId in correctionData: calibValue = correctionData[siId]['calibValue'] msrunContainer.saic[specfile][siId].arrays['mz'] += calibValue else: raise Exception('#TODO: a proper exception text')
Applies correction values to the MS1 ion m/z arrays in order to correct for a time dependent m/z error. :param msrunContainer: intance of :class:`maspy.core.MsrunContainer`, containing the :class:`maspy.core.Sai` items of the "specfile". :param specfile: filename of an ms-run file to which the m/z calibration should be applied :param correctionData: a dictionary containing the calibration values for each MS1 ``Si``, as returned by :func:`timecalMs1DataMedian()`. ``{si.id: {'calibValue': float}`` :param toleranceMode: "relative" or "absolute" Specifies how the ``massTolerance`` value is applied, by default "relative".
def applyMassCalMs1(msrunContainer, specfile, dataFit, **kwargs): toleranceMode = kwargs.get('toleranceMode', 'relative') if toleranceMode == 'relative': for si in msrunContainer.getItems(specfile, selector=lambda si: si.msLevel==1): mzArr = msrunContainer.saic[specfile][si.id].arrays['mz'] corrArr = dataFit.corrArray(mzArr) mzArr *= (1 + corrArr) elif toleranceMode == 'absolute': for si in msrunContainer.getItems(specfile, selector=lambda si: si.msLevel==1): mzArr = msrunContainer.saic[specfile][si.id].arrays['mz'] corrArr = dataFit.corrArray(mzArr) mzArr += corrArr else: raise Exception('#TODO: a proper exception text')
Applies a correction function to the MS1 ion m/z arrays in order to correct for a m/z dependent m/z error. :param msrunContainer: intance of :class:`maspy.core.MsrunContainer`, containing the :class:`maspy.core.Sai` items of the "specfile". :param specfile: filename of an ms-run file to which the m/z calibration should be applied :param dataFit: a :class:`maspy.auxiliary.DataFit` object, containing processed calibration data. :param toleranceMode: "relative" or "absolute" Specifies how the ``massTolerance`` value is applied, by default "relative".
def _make(self, key, content): pass self.say('make a new key>>>' + key + '>>>with>>>:' + str(content)) if key.isdigit(): i = int(key) # list index [p] self.say('extending parent list to contain index:' + key) # make a list with size return self._list(i, content) else: return self._dict(key, content)
clean
def set_path(self, data, path, value): self.say('set_path:value:' + str(value) + ' at:' + str(path) + ' in:' + str(data)) if isinstance(path, str): path = path.split('.') if len(path) > 1: self.set_path(data.setdefault(path[0], {}), path[1:], value) else: data[path[0]] = value return data
Sets the given key in the given dict object to the given value. If the given path is nested, child dicts are created as appropriate. Accepts either a dot-delimited path or an array of path elements as the `path` variable.
def get_genericpage(cls, kb_app): # Presumes the registry has been committed q = dectate.Query('genericpage') klasses = sorted(q(kb_app), key=lambda args: args[0].order) if not klasses: # The site doesn't configure a genericpage, return Genericpage else: return klasses[0][1]
Return the one class if configured, otherwise default
def buy_product(self, product_pk): if self.invoice_sales.filter(lines_sales__product_final__pk=product_pk).exists() \ or self.ticket_sales.filter(lines_sales__product_final__pk=product_pk).exists(): return True else: return False
determina si el customer ha comprado un producto
def create_ticket_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesTicket url_reverse = 'CDNX_invoicing_ticketsaless_list' # type_doc msg_error_relation = _("Hay lineas asignadas a ticket") msg_error_not_found = _('Sales albaran not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ).values_list('pk') if new_list_lines: new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first() if new_pk: context = SalesLines.create_ticket_from_order(new_pk, new_list_lines) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context
def create_invoice_from_albaran(pk, list_lines): MODEL_SOURCE = SalesAlbaran MODEL_FINAL = SalesInvoice url_reverse = 'CDNX_invoicing_invoicesaless_list' # type_doc msg_error_relation = _("Hay lineas asignadas a facturas") msg_error_not_found = _('Sales albaran not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=False ) if new_list_lines: new_pk = new_list_lines.first() if new_pk: context = SalesLines.create_invoice_from_order( new_pk.order.pk, [x['pk'] for x in new_list_lines.values('pk')]) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context
def create_invoice_from_ticket(pk, list_lines): MODEL_SOURCE = SalesTicket MODEL_FINAL = SalesInvoice url_reverse = 'CDNX_invoicing_invoicesaless_list' # type_doc msg_error_relation = _("Hay lineas asignadas a facturas") msg_error_not_found = _('Sales ticket not found') msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas') return SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
context = {} if list_lines: new_list_lines = SalesLines.objects.filter( pk__in=[int(x) for x in list_lines] ).exclude( invoice__isnull=True ) if new_list_lines: new_pk = new_list_lines.first() if new_pk: context = SalesLines.create_invoice_from_order( new_pk.order.pk, [x['pk'] for x in new_list_lines.values('pk')]) return context else: error = _('Pedido no encontrado') else: error = _('Lineas no relacionadas con pedido') else: error = _('Lineas no seleccionadas') context['error'] = error return context
def cli(ctx): manfile = bubble_lib_dir+os.sep+'extras'+os.sep+'Bubble.1.gz' mancmd = ["/usr/bin/man", manfile] try: return subprocess.call(mancmd) except Exception as e: print('cannot run man with bubble man page') print('you can always have a look at: '+manfile)
Shows the man page packed inside the bubble tool this is mainly too overcome limitations on installing manual pages in a distribution agnostic and simple way and the way bubble has been developed, in virtual python environments, installing a man page into a system location makes no sense, the system manpage will not reflect the development version. and if your is system is really bare like : docker.io/python, you will not even have man installed
def _fetch_dimensions(self, dataset): for dimension in super(SCB, self)._fetch_dimensions(dataset): if dimension.id == "Region": yield Dimension(dimension.id, datatype="region", dialect="skatteverket", label=dimension.label) else: yield dimension
We override this method just to set the correct datatype and dialect for regions.
def call(self, func, key, timeout=None): '''Wraps a function call with cache. Args: func (function): the function to call. key (str): the cache key for this call. timeout (int): the cache timeout for the key (the unit of this parameter depends on the cache class you use, for example, if you use the classes from werkzeug, then timeout is in seconds.) Returns: The return value of calling func ''' result = self.get(key) if result == NONE_RESULT: return None if result is None: result = func() self.set( key, result if result is not None else NONE_RESULT, timeout ) return resulf call(self, func, key, timeout=None): '''Wraps a function call with cache. Args: func (function): the function to call. key (str): the cache key for this call. timeout (int): the cache timeout for the key (the unit of this parameter depends on the cache class you use, for example, if you use the classes from werkzeug, then timeout is in seconds.) Returns: The return value of calling func ''' result = self.get(key) if result == NONE_RESULT: return None if result is None: result = func() self.set( key, result if result is not None else NONE_RESULT, timeout ) return result
Wraps a function call with cache. Args: func (function): the function to call. key (str): the cache key for this call. timeout (int): the cache timeout for the key (the unit of this parameter depends on the cache class you use, for example, if you use the classes from werkzeug, then timeout is in seconds.) Returns: The return value of calling func
def default_ssl_context() -> ssl.SSLContext: ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH) # OP_NO_SSLv2, OP_NO_SSLv3, and OP_NO_COMPRESSION are already set by default # so we just need to disable the old versions of TLS. ctx.options |= (ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1) # ALPN and NPN allow upgrades from HTTP/1.1, but these extensions are only # supported by recent versions of OpenSSL. Try to set them up, but don't cry # if they fail. try: ctx.set_alpn_protocols(["h2", "http/1.1"]) except NotImplementedError: pass try: ctx.set_npn_protocols(["h2", "http/1.1"]) except NotImplementedError: pass return ctx
Creates an SSL context suitable for use with HTTP/2. See https://tools.ietf.org/html/rfc7540#section-9.2 for what this entails. Specifically, we are interested in these points: § 9.2: Implementations of HTTP/2 MUST use TLS version 1.2 or higher. § 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable compression. The h2 project has its own ideas about how this context should be constructed but the resulting context doesn't work for us in the standard Python Docker images (though it does work under macOS). See https://python-hyper.org/projects/h2/en/stable/negotiating-http2.html#client-setup-example for more.
async def _window_open(self, stream_id: int): stream = self._get_stream(stream_id) return await stream.window_open.wait()
Wait until the identified stream's flow control window is open.
async def send_data( self, stream_id: int, data: bytes, end_stream: bool = False, ): if self.closed: raise ConnectionClosedError stream = self._get_stream(stream_id) if stream.closed: raise StreamClosedError(stream_id) remaining = data while len(remaining) > 0: await asyncio.gather( self._writable.wait(), self._window_open(stream.id), ) remaining_size = len(remaining) window_size = self._h2.local_flow_control_window(stream.id) max_frame_size = self._h2.max_outbound_frame_size send_size = min(remaining_size, window_size, max_frame_size) if send_size == 0: continue logger.debug( f'[{stream.id}] Sending {send_size} of {remaining_size} ' f'bytes (window {window_size}, frame max {max_frame_size})' ) to_send = remaining[:send_size] remaining = remaining[send_size:] end = (end_stream is True and len(remaining) == 0) self._h2.send_data(stream.id, to_send, end_stream=end) self._flush() if self._h2.local_flow_control_window(stream.id) == 0: stream.window_open.clear()
Send data, respecting the receiver's flow control instructions. If the provided data is larger than the connection's maximum outbound frame size, it will be broken into several frames as appropriate.
async def read_data(self, stream_id: int) -> bytes: frames = [f async for f in self.stream_frames(stream_id)] return b''.join(frames)
Read data from the specified stream until it is closed by the remote peer. If the stream is never ended, this never returns.
async def read_frame(self, stream_id: int) -> bytes: stream = self._get_stream(stream_id) frame = await stream.read_frame() if frame.flow_controlled_length > 0: self._acknowledge_data(frame.flow_controlled_length, stream_id) return frame.data
Read a single frame of data from the specified stream, waiting until frames are available if none are present in the local buffer. If the stream is closed and all buffered frames have been consumed, raises a StreamConsumedError.
async def get_pushed_stream_ids(self, parent_stream_id: int) -> List[int]: if parent_stream_id not in self._streams: logger.error( f'Parent stream {parent_stream_id} unknown to this connection' ) raise NoSuchStreamError(parent_stream_id) parent = self._get_stream(parent_stream_id) await parent.pushed_streams_available.wait() pushed_streams_ids = self._pushed_stream_ids[parent.id] stream_ids: List[int] = [] if len(pushed_streams_ids) > 0: stream_ids.extend(pushed_streams_ids) pushed_streams_ids.clear() parent.pushed_streams_available.clear() return stream_ids
Return a list of all streams pushed by the remote peer that are children of the specified stream. If no streams have been pushed when this method is called, waits until at least one stream has been pushed.
def populate(self, obj): # query if type(obj) is AtlasServiceInstance.Instance: query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } } elif type(obj) is AtlasServiceBinding.Binding: query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # find try: result = self.broker.find_one(query) except: raise ErrStorageMongoConnection("Populate Instance or Binding") if result is not None: obj.parameters = result["parameters"] # Flags the obj to provisioned obj.provisioned = True else: # New obj.provisioned = False
Populate Query mongo to get information about the obj if it exists Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Raises: ErrStorageTypeUnsupported: Type unsupported. ErrStorageMongoConnection: Error during MongoDB communication.
def store(self, obj): # query if type(obj) is AtlasServiceInstance.Instance: query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters } elif type(obj) is AtlasServiceBinding.Binding: query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # insert try: result = self.broker.insert_one(query) except: raise ErrStorageMongoConnection("Store Instance or Binding") if result is not None: # Flags the obj to provisioned obj.provisioned = True return result.inserted_id raise ErrStorageStore()
Store Store an object into the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Returns: ObjectId: MongoDB _id Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageTypeUnsupported: Type unsupported. ErrStorageStore : Failed to store the binding or instance.
def remove(self, obj): if type(obj) is AtlasServiceInstance.Instance: self.remove_instance(obj) elif type(obj) is AtlasServiceBinding.Binding: self.remove_binding(obj) else: raise ErrStorageTypeUnsupported(type(obj))
Remove Remove an object from the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Raises: ErrStorageTypeUnsupported: Type unsupported.
def remove_instance(self, instance): # query query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } } # delete the instance try: result = self.broker.delete_one(query) except: raise ErrStorageMongoConnection("Remove Instance") # return the result if result is not None and result.deleted_count == 1: instance.provisioned = False else: raise ErrStorageRemoveInstance(instance.instance_id)
Remove an instance Remove an object from the MongoDB storage for caching Args: instance (AtlasServiceInstance.Instance): instance Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageRemoveInstance: Failed to remove the instance.
def remove_binding(self, binding): # query query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id } # delete the binding try: result = self.broker.delete_one(query) except: raise ErrStorageMongoConnection("Remove Binding") # return the result if result is not None and result.deleted_count == 1: binding.provisioned = False else: raise ErrStorageRemoveBinding(binding.binding_id)
Remove a binding Remove an object from the MongoDB storage for caching Args: binding (AtlasServiceBinding.Binding): binding Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageRemoveBinding: Failed to remove the binding
def handle(self, request, buffer_size): if self.component_type == StreamComponent.SOURCE: msg = self.handler_function() return self.__send(request, msg) logger = self.logger data = self.__receive(request, buffer_size) if data is None: return False else: logger.debug(data.split(self.TERMINATOR)) for message in data.split(self.TERMINATOR)[:-1]: logger.debug(message) result = self.handler_function(message) if self.component_type == StreamComponent.PROCESSOR: if not self.__send(request, result): return False return True
Handle a message :param request: the request socket. :param buffer_size: the buffer size. :return: True if success, False otherwise
def handle(self, request, buffer_size): logger = self.logger data = self.__receive(request, buffer_size) if data is None: return False else: arr = array('B',data) for message in split_array(arr,StxEtxHandler.ETX): if message[0] == StxEtxHandler.STX: message = message[1:] logger.debug(message) result = self.handler_function(bytearray(message)) if self.component_type == StreamComponent.PROCESSOR: if not self.__send(request, result): return False return True
Handle a message :param request: the request socket. :param buffer_size: the buffer size. :return: True if success, False otherwise
def handle(self, request, buffer_size): logger = self.logger msg = self.__receive(request, buffer_size) if msg is None: return False result = self.handler_function(msg) if self.component_type == StreamComponent.PROCESSOR: return self.__send(request, result) return True
Handle a message :param request: the request socket. :param buffer_size: the buffer size. :return: True if success, False otherwise
def defaultFetchSiAttrFromSmi(smi, si): for key, value in viewitems(fetchSpectrumInfo(smi)): setattr(si, key, value) for key, value in viewitems(fetchScanInfo(smi)): setattr(si, key, value) if si.msLevel > 1: for key, value in viewitems(fetchParentIon(smi)): setattr(si, key, value)
Default method to extract attributes from a spectrum metadata item (sai) and adding them to a spectrum item (si).
def convertMzml(mzmlPath, outputDirectory=None): outputDirectory = outputDirectory if outputDirectory is not None else os.path.dirname(mzmlPath) msrunContainer = importMzml(mzmlPath) msrunContainer.setPath(outputDirectory) msrunContainer.save()
Imports an mzml file and converts it to a MsrunContainer file :param mzmlPath: path of the mzml file :param outputDirectory: directory where the MsrunContainer file should be written if it is not specified, the output directory is set to the mzml files directory.
def prepareSiiImport(siiContainer, specfile, path, qcAttr, qcLargerBetter, qcCutoff, rankAttr, rankLargerBetter): if specfile not in siiContainer.info: siiContainer.addSpecfile(specfile, path) else: raise Exception('...') siiContainer.info[specfile]['qcAttr'] = qcAttr siiContainer.info[specfile]['qcLargerBetter'] = qcLargerBetter siiContainer.info[specfile]['qcCutoff'] = qcCutoff siiContainer.info[specfile]['rankAttr'] = rankAttr siiContainer.info[specfile]['rankLargerBetter'] = rankLargerBetter
Prepares the ``siiContainer`` for the import of peptide spectrum matching results. Adds entries to ``siiContainer.container`` and to ``siiContainer.info``. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. :param path: folder location used by the ``SiiContainer`` to save and load data to the hard disk. :param qcAttr: name of the parameter to define a ``Sii`` quality cut off. Typically this is some sort of a global false positive estimator, for example a 'false discovery rate' (FDR). :param qcLargerBetter: bool, True if a large value for the ``.qcAttr`` means a higher confidence. :param qcCutOff: float, the quality threshold for the specifed ``.qcAttr`` :param rankAttr: name of the parameter used for ranking ``Sii`` according to how well they match to a fragment ion spectrum, in the case when their are multiple ``Sii`` present for the same spectrum. :param rankLargerBetter: bool, True if a large value for the ``.rankAttr`` means a better match to the fragment ion spectrum. For details on ``Sii`` ranking see :func:`applySiiRanking()` For details on ``Sii`` quality validation see :func:`applySiiQcValidation()`
def addSiiToContainer(siiContainer, specfile, siiList): for sii in siiList: if sii.id not in siiContainer.container[specfile]: siiContainer.container[specfile][sii.id] = list() siiContainer.container[specfile][sii.id].append(sii)
Adds the ``Sii`` elements contained in the siiList to the appropriate list in ``siiContainer.container[specfile]``. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. :param siiList: a list of ``Sii`` elements imported from any PSM search engine results
def applySiiRanking(siiContainer, specfile): attr = siiContainer.info[specfile]['rankAttr'] reverse = siiContainer.info[specfile]['rankLargerBetter'] for itemList in listvalues(siiContainer.container[specfile]): sortList = [(getattr(sii, attr), sii) for sii in itemList] itemList = [sii for score, sii in sorted(sortList, reverse=reverse)] #Rank Sii according to their position lastValue = None for itemPosition, item in enumerate(itemList, 1): if getattr(item, attr) != lastValue: rank = itemPosition item.rank = rank lastValue = getattr(item, attr)
Iterates over all Sii entries of a specfile in siiContainer and sorts Sii elements of the same spectrum according to the score attribute specified in ``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then ranked according to their sorted position, if multiple Sii have the same score, all get the same rank and the next entries rank is its list position. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers.
def applySiiQcValidation(siiContainer, specfile): attr = siiContainer.info[specfile]['qcAttr'] cutOff = siiContainer.info[specfile]['qcCutoff'] if siiContainer.info[specfile]['qcLargerBetter']: evaluator = lambda sii: getattr(sii, attr) >= cutOff and sii.rank == 1 else: evaluator = lambda sii: getattr(sii, attr) <= cutOff and sii.rank == 1 for itemList in listvalues(siiContainer.container[specfile]): #Set the .isValid attribute of all Sii to False for sii in itemList: sii.isValid = False #Validate the first Sii sii = itemList[0] if evaluator(sii): sii.isValid = True
Iterates over all Sii entries of a specfile in siiContainer and validates if they surpass a user defined quality threshold. The parameters for validation are defined in ``siiContainer.info[specfile]``: - ``qcAttr``, ``qcCutoff`` and ``qcLargerBetter`` In addition to passing this validation a ``Sii`` has also to be at the first list position in the ``siiContainer.container``. If both criteria are met the attribute ``Sii.isValid`` is set to ``True``. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers.
def _importDinosaurTsv(filelocation): with io.open(filelocation, 'r', encoding='utf-8') as openFile: #NOTE: this is pretty similar to importing percolator results, maybe unify in a common function lines = openFile.readlines() headerDict = dict([[y,x] for (x,y) in enumerate(lines[0].strip().split('\t'))]) featureDict = dict() for linePos, line in enumerate(lines[1:]): featureId = str(linePos) fields = line.strip().split('\t') entryDict = dict() for headerName, headerPos in viewitems(headerDict): entryDict[headerName] = float(fields[headerPos]) if headerName in ['rtApex', 'rtEnd', 'rtStart', 'fwhm']: #Covnert to seconds entryDict[headerName] *= 60 elif headerName in ['charge', 'intensitySum', 'nIsotopes', 'nScans', 'intensityApex']: entryDict[headerName] = int(entryDict[headerName]) featureDict[featureId] = entryDict return featureDict
Reads a Dinosaur tsv file. :returns: {featureKey1: {attribute1:value1, attribute2:value2, ...}, ...} See also :func:`importPeptideFeatures`
def rst_to_html(input_string: str) -> str: overrides = dict(input_encoding='unicode', doctitle_xform=True, initial_header_level=1) parts = publish_parts( writer_name='html', source=input_string, settings_overrides=overrides ) return parts['html_body']
Given a string of RST, use docutils to generate html
def get_rst_title(rst_doc: Node) -> Optional[Any]: for title in rst_doc.traverse(nodes.title): return title.astext() return None
Given some RST, extract what docutils thinks is the title
def get_rst_excerpt(rst_doc: document, paragraphs: int = 1) -> str: texts = [] for count, p in enumerate(rst_doc.traverse(paragraph)): texts.append(p.astext()) if count + 1 == paragraphs: break return ' '.join(texts)
Given rst, parse and return a portion
def requires_password_auth(fn): def wrapper(self, *args, **kwargs): self.auth_context = HAPI.auth_context_password return fn(self, *args, **kwargs) return wrapper
Decorator for HAPI methods that requires the instance to be authenticated with a password
def requires_api_auth(fn): def wrapper(self, *args, **kwargs): self.auth_context = HAPI.auth_context_hapi return fn(self, *args, **kwargs) return wrapper
Decorator for HAPI methods that requires the instance to be authenticated with a HAPI token
def parse(response): """Split a a=1b=2c=3 string into a dictionary of pairs""" tokens = {r[0]: r[1] for r in [r.split('=') for r in response.split("&")]} # The odd dummy parameter is of no use to us if 'dummy' in tokens: del tokens['dummy'] """ If we have key names that end in digits, these indicate the result set contains multiple sets For example, planet0=Hoth&x=1&y=-10&planet1=Naboo&x=9&y=13 is actually data for two planets Elements that end in digits (like tag0, tag1 for planets) are formatted like (tag0_1, tag1_1), so we rstrip underscores afterwards. """ if re.match('\D\d+$', tokens.keys()[0]): # Produce a list of dictionaries set_tokens = [] for key, value in tokens: key = re.match('^(.+\D)(\d+)$', key) # If the key isn't in the format (i.e. a failsafe), skip it if key is not None: if key.group(1) not in set_tokens: set_tokens[key.group(1)] = {} set_tokens[key.group(1)][key.group(0).rstrip('_')] = value tokens = set_tokens return tokens
Parse a postdata-style response format from the API into usable data
def init_chain(self): if not self._hasinit: self._hasinit = True self._devices = [] self.jtag_enable() while True: # pylint: disable=no-member idcode = self.rw_dr(bitcount=32, read=True, lastbit=False)() if idcode in NULL_ID_CODES: break dev = self.initialize_device_from_id(self, idcode) if self._debug: print(dev) self._devices.append(dev) if len(self._devices) >= 128: raise JTAGTooManyDevicesError("This is an arbitrary " "limit to deal with breaking infinite loops. If " "you have more devices, please open a bug") self.jtag_disable() #The chain comes out last first. Reverse it to get order. self._devices.reverse()
Autodetect the devices attached to the Controller, and initialize a JTAGDevice for each. This is a required call before device specific Primitives can be used.
def get_fitted_lv1_prim(self, reqef, bitcount): res = self._fitted_lv1_prim_cache.get(reqef) if res: return res prim = self.get_best_lv1_prim(reqef, bitcount) dispatcher = PrimitiveLv1Dispatcher(self, prim, reqef) self._fitted_lv1_prim_cache[reqef] = dispatcher return dispatcher
request r - A C 0 1 e -|? ! ! ! ! s A|? ✓ ✓ 0 1 Check this logic u C|? m ✓ 0 1 l 0|? M M 0 ! t 1|? M M ! 1 - = No Care A = arbitrary C = Constant 0 = ZERO 1 = ONE ! = ERROR ? = NO CARE RESULT ✓ = Pass data directly m = will require reconfiguring argument and using multiple of prim M = Requires using multiple of several prims to satisfy requirement
def _UserUpdateConfigValue(self, configKey, strDescriptor, isDir = True, dbConfigValue = None): newConfigValue = None if dbConfigValue is None: prompt = "Enter new {0} or 'x' to exit: ".format(strDescriptor) else: prompt = "Enter 'y' to use existing {0}, enter a new {0} or 'x' to exit: ".format(strDescriptor) while newConfigValue is None: response = goodlogging.Log.Input("CLEAR", prompt) if response.lower() == 'x': sys.exit(0) elif dbConfigValue is not None and response.lower() == 'y': newConfigValue = dbConfigValue elif not isDir: newConfigValue = response self._db.SetConfigValue(configKey, newConfigValue) else: if os.path.isdir(response): newConfigValue = os.path.abspath(response) self._db.SetConfigValue(configKey, newConfigValue) else: goodlogging.Log.Info("CLEAR", "{0} is not recognised as a directory".format(response)) return newConfigValue
Allow user to set or update config values in the database table. This is always called if no valid entry exists in the table already. Parameters ---------- configKey : string Name of config field. strDescriptor : string Description of config field. isDir : boolean [optional : default = True] Set to True if config value is expected to be a directory path. dbConfigValue : string [optional : default = None] The value of an existing entry for the given config field. Returns ---------- string New value for given config field in database.
def _GetConfigValue(self, configKey, strDescriptor, isDir = True): goodlogging.Log.Info("CLEAR", "Loading {0} from database:".format(strDescriptor)) goodlogging.Log.IncreaseIndent() configValue = self._db.GetConfigValue(configKey) if configValue is None: goodlogging.Log.Info("CLEAR", "No {0} exists in database".format(strDescriptor)) configValue = self._UserUpdateConfigValue(configKey, strDescriptor, isDir) else: goodlogging.Log.Info("CLEAR", "Got {0} {1} from database".format(strDescriptor, configValue)) if not isDir or os.path.isdir(configValue): goodlogging.Log.Info("CLEAR", "Using {0} {1}".format(strDescriptor, configValue)) goodlogging.Log.DecreaseIndent() return configValue else: goodlogging.Log.Info("CLEAR", "Exiting... {0} is not recognised as a directory".format(configValue)) sys.exit(0)
Get configuration value from database table. If no value found user will be prompted to enter one. Parameters ---------- configKey : string Name of config field. strDescriptor : string Description of config field. isDir : boolean [optional : default = True] Set to True if config value is expected to be a directory path. Returns ---------- string Value for given config field in database.
def _UserUpdateSupportedFormats(self, origFormatList = []): formatList = list(origFormatList) inputDone = None while inputDone is None: prompt = "Enter new format (e.g. .mp4, .avi), " \ "'r' to reset format list, " \ "'f' to finish or " \ "'x' to exit: " response = goodlogging.Log.Input("CLEAR", prompt) if response.lower() == 'x': sys.exit(0) elif response.lower() == 'f': inputDone = 1 elif response.lower() == 'r': formatList = [] else: if response is not None: if(response[0] != '.'): response = '.' + response formatList.append(response) formatList = set(formatList) origFormatList = set(origFormatList) if formatList != origFormatList: self._db.PurgeSupportedFormats() for fileFormat in formatList: self._db.AddSupportedFormat(fileFormat) return formatList
Add supported formats to database table. Always called if the database table is empty. User can build a list of entries to add to the database table (one entry at a time). Once finished they select the finish option and all entries will be added to the table. They can reset the list at any time before finishing. Parameters ---------- origFormatList : list [optional : default = []] List of original formats from database table. Returns ---------- string List of updated formats from database table.
def _GetSupportedFormats(self): goodlogging.Log.Info("CLEAR", "Loading supported formats from database:") goodlogging.Log.IncreaseIndent() formatList = self._db.GetSupportedFormats() if formatList is None: goodlogging.Log.Info("CLEAR", "No supported formats exist in database") formatList = self._UserUpdateSupportedFormats() else: goodlogging.Log.Info("CLEAR", "Got supported formats from database: {0}".format(formatList)) goodlogging.Log.Info("CLEAR", "Using supported formats: {0}".format(formatList)) goodlogging.Log.DecreaseIndent() return formatList
Get supported format values from database table. If no values found user will be prompted to enter values for this table. Returns ---------- string List of supported formats from database table.
def _UserUpdateIgnoredDirs(self, origIgnoredDirs = []): ignoredDirs = list(origIgnoredDirs) inputDone = None while inputDone is None: prompt = "Enter new directory to ignore (e.g. DONE), " \ "'r' to reset directory list, " \ "'f' to finish or " \ "'x' to exit: " response = goodlogging.Log.Input("CLEAR", prompt) if response.lower() == 'x': sys.exit(0) elif response.lower() == 'f': inputDone = 1 elif response.lower() == 'r': ignoredDirs = [] else: if response is not None: ignoredDirs.append(response) ignoredDirs = set(ignoredDirs) origIgnoredDirs = set(origIgnoredDirs) if ignoredDirs != origIgnoredDirs: self._db.PurgeIgnoredDirs() for ignoredDir in ignoredDirs: self._db.AddIgnoredDir(ignoredDir) return list(ignoredDirs)
Add ignored directories to database table. Always called if the database table is empty. User can build a list of entries to add to the database table (one entry at a time). Once finished they select the finish option and all entries will be added to the table. They can reset the list at any time before finishing. Parameters ---------- origIgnoredDirs : list [optional : default = []] List of original ignored directories from database table. Returns ---------- string List of updated ignored directories from database table.
def _GetIgnoredDirs(self): goodlogging.Log.Info("CLEAR", "Loading ignored directories from database:") goodlogging.Log.IncreaseIndent() ignoredDirs = self._db.GetIgnoredDirs() if ignoredDirs is None: goodlogging.Log.Info("CLEAR", "No ignored directories exist in database") ignoredDirs = self._UserUpdateIgnoredDirs() else: goodlogging.Log.Info("CLEAR", "Got ignored directories from database: {0}".format(ignoredDirs)) if self._archiveDir not in ignoredDirs: ignoredDirs.append(self._archiveDir) goodlogging.Log.Info("CLEAR", "Using ignored directories: {0}".format(ignoredDirs)) goodlogging.Log.DecreaseIndent() return ignoredDirs
Get ignored directories values from database table. If no values found user will be prompted to enter values for this table. Returns ---------- string List of ignored directories from database table.
def _GetDatabaseConfig(self): goodlogging.Log.Seperator() goodlogging.Log.Info("CLEAR", "Getting configuration variables...") goodlogging.Log.IncreaseIndent() # SOURCE DIRECTORY if self._sourceDir is None: self._sourceDir = self._GetConfigValue('SourceDir', 'source directory') # TV DIRECTORY if self._inPlaceRename is False and self._tvDir is None: self._tvDir = self._GetConfigValue('TVDir', 'tv directory') # ARCHIVE DIRECTORY self._archiveDir = self._GetConfigValue('ArchiveDir', 'archive directory', isDir = False) # SUPPORTED FILE FORMATS self._supportedFormatsList = self._GetSupportedFormats() # IGNORED DIRECTORIES self._ignoredDirsList = self._GetIgnoredDirs() goodlogging.Log.NewLine() goodlogging.Log.Info("CLEAR", "Configuation is:") goodlogging.Log.IncreaseIndent() goodlogging.Log.Info("CLEAR", "Source directory = {0}".format(self._sourceDir)) goodlogging.Log.Info("CLEAR", "TV directory = {0}".format(self._tvDir)) goodlogging.Log.Info("CLEAR", "Supported formats = {0}".format(self._supportedFormatsList)) goodlogging.Log.Info("CLEAR", "Ignored directory list = {0}".format(self._ignoredDirsList)) goodlogging.Log.ResetIndent()
Get all configuration from database. This includes values from the Config table as well as populating lists for supported formats and ignored directories from their respective database tables.
def _GetSupportedFilesInDir(self, fileDir, fileList, supportedFormatList, ignoreDirList): goodlogging.Log.Info("CLEAR", "Parsing file directory: {0}".format(fileDir)) if os.path.isdir(fileDir) is True: for globPath in glob.glob(os.path.join(fileDir, '*')): if util.FileExtensionMatch(globPath, supportedFormatList): newFile = tvfile.TVFile(globPath) if newFile.GetShowDetails(): fileList.append(newFile) elif os.path.isdir(globPath): if(os.path.basename(globPath) in ignoreDirList): goodlogging.Log.Info("CLEAR", "Skipping ignored directory: {0}".format(globPath)) else: self._GetSupportedFilesInDir(globPath, fileList, supportedFormatList, ignoreDirList) else: goodlogging.Log.Info("CLEAR", "Ignoring unsupported file or folder: {0}".format(globPath)) else: goodlogging.Log.Info("CLEAR", "Invalid non-directory path given to parse")
Recursively get all supported files given a root search directory. Supported file extensions are given as a list, as are any directories which should be ignored. The result will be appended to the given file list argument. Parameters ---------- fileDir : string Path to root of directory tree to search. fileList : string List to add any found files to. supportedFormatList : list List of supported file extensions. ignoreDirList : list List of directories to ignore.
def Run(self): self._GetArgs() goodlogging.Log.Info("CLEAR", "Using database: {0}".format(self._databasePath)) self._db = database.RenamerDB(self._databasePath) if self._dbPrint or self._dbUpdate: goodlogging.Log.Seperator() self._db.PrintAllTables() if self._dbUpdate: goodlogging.Log.Seperator() self._db.ManualUpdateTables() self._GetDatabaseConfig() if self._enableExtract: goodlogging.Log.Seperator() extractFileList = [] goodlogging.Log.Info("CLEAR", "Parsing source directory for compressed files") goodlogging.Log.IncreaseIndent() extract.GetCompressedFilesInDir(self._sourceDir, extractFileList, self._ignoredDirsList) goodlogging.Log.DecreaseIndent() goodlogging.Log.Seperator() extract.Extract(extractFileList, self._supportedFormatsList, self._archiveDir, self._skipUserInputExtract) goodlogging.Log.Seperator() tvFileList = [] goodlogging.Log.Info("CLEAR", "Parsing source directory for compatible files") goodlogging.Log.IncreaseIndent() self._GetSupportedFilesInDir(self._sourceDir, tvFileList, self._supportedFormatsList, self._ignoredDirsList) goodlogging.Log.DecreaseIndent() tvRenamer = renamer.TVRenamer(self._db, tvFileList, self._archiveDir, guideName = 'EPGUIDES', tvDir = self._tvDir, inPlaceRename = self._inPlaceRename, forceCopy = self._crossSystemCopyEnabled, skipUserInput = self._skipUserInputRename) tvRenamer.Run()
Main entry point for ClearManager class. Does the following steps: - Parse script arguments. - Optionally print or update database tables. - Get all configuration settings from database. - Optionally parse directory for file extraction. - Recursively parse source directory for files matching supported format list. - Call renamer.TVRenamer with file list.
def _merge_prims(prims, *, debug=False, stagenames=None, stages=None): if isinstance(prims, FrameSequence): merged_prims = FrameSequence(prims._chain) else: merged_prims = [] working_prim = prims[0] i = 1 logging_tmp = [] while i < len(prims): tmp = prims[i] res = working_prim.merge(tmp) if res is not None: working_prim = res if debug:#pragma: no cover logging_tmp.append( [p.snapshot() for p in merged_prims+[working_prim]]) else: merged_prims.append(working_prim) working_prim = tmp i += 1 merged_prims.append(working_prim) if debug:#pragma: no cover stages.append(logging_tmp) stagenames.append("Merge intermediate states") return merged_prims
Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class. Used by a CommandQueue during compilation and optimization of Primitives. Args: prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together. debug: A boolean for if debug information should be generated. stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True. stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True. Returns: A list or FrameSequence (the same type as prims) of the compined Primitives or Frames.
def flush(self): self.stages = [] self.stagenames = [] if not self.queue: return if self.print_statistics:#pragma: no cover print("LEN OF QUENE", len(self)) t = time() if self._chain._collect_compiler_artifacts: self._compile(debug=True, stages=self.stages, stagenames=self.stagenames) else: self._compile() if self.debug: print("ABOUT TO EXEC", self.queue)#pragma: no cover if self.print_statistics:#pragma: no cover print("COMPILE TIME", time()-t) print("TOTAL BITS OF ALL PRIMS", sum( (p.count for p in self.queue if hasattr(p, 'count')))) t = time() self._chain._controller._execute_primitives(self.queue) if self.print_statistics: print("EXECUTE TIME", time()-t)#pragma: no cover self.queue = [] self._chain._sm.state = self._fsm.state
Force the queue of Primitives to compile, execute on the Controller, and fulfill promises with the data returned.
def Ping(self, request, context): status = processor_pb2.Status() status.message='alive' return status
Invoke the Server health endpoint :param request: Empty :param context: the request context :return: Status message 'alive'
def Process(self, request, context): logger.debug(request) message = Message.__from_protobuf_message__(request) sig = getfullargspec(self.handler_function) if len(sig.args) == 2: result = self.handler_function(message.payload, message.headers) elif len(sig.args) == 1: result = self.handler_function(message.payload) else: context.set_code(grpc.StatusCode.INTERNAL) context.set_details('wrong number of arguments for handler function - must be 1 or 2') raise RuntimeError('wrong number of arguments for handler function - must be 1 or 2') if self.component_type == StreamComponent.PROCESSOR: if type(result) == Message: return result.__to_protobuf_message__() else: headers = MessageHeaders() headers.copy(message.headers) return Message(result, headers).__to_protobuf_message__()
Invoke the Grpc Processor, delegating to the handler_function. If the handler_function has a single argument, pass the Message payload. If two arguments, pass the payload and headers as positional arguments: handler_function(payload, headers). If the handler function return is not of type(Message), create a new Message using the original header values (new id and timestamp). :param request: the message :param context: the request context :return: response message
def step_impl(context): expected_lines = context.text.split('\n') assert len(expected_lines) == len(context.output) for expected, actual in zip(expected_lines, context.output): print('--\n\texpected: {}\n\tactual: {}'.format(expected, actual)) assert expected == actual
Compares text as written to the log output
def _ParseShowList(self, checkOnly=False): showTitleList = [] showIDList = [] csvReader = csv.reader(self._allShowList.splitlines()) for rowCnt, row in enumerate(csvReader): if rowCnt == 0: # Get header column index for colCnt, column in enumerate(row): if column == 'title': titleIndex = colCnt if column == self.ID_LOOKUP_TAG: lookupIndex = colCnt else: try: showTitleList.append(row[titleIndex]) showIDList.append(row[lookupIndex]) except UnboundLocalError: goodlogging.Log.Fatal("EPGUIDE", "Error detected in EPGUIDES allshows csv content") else: if checkOnly and rowCnt > 1: return True self._showTitleList = showTitleList self._showIDList = showIDList return True
Read self._allShowList as csv file and make list of titles and IDs. Parameters ---------- checkOnly : boolean [optional : default = False] If checkOnly is True this will only check to ensure the column headers can be extracted correctly.
def _GetAllShowList(self): today = datetime.date.today().strftime("%Y%m%d") saveFile = '_epguides_' + today + '.csv' saveFilePath = os.path.join(self._saveDir, saveFile) if os.path.exists(saveFilePath): # Load data previous saved to file with open(saveFilePath, 'r') as allShowsFile: self._allShowList = allShowsFile.read() else: # Download new list from EPGUIDES and strip any leading or trailing whitespace self._allShowList = util.WebLookup(self.ALLSHOW_IDLIST_URL).strip() if self._ParseShowList(checkOnly=True): # Save to file to avoid multiple url requests in same day with open(saveFilePath, 'w') as allShowsFile: goodlogging.Log.Info("EPGUIDE", "Adding new EPGUIDES file: {0}".format(saveFilePath), verbosity=self.logVerbosity) allShowsFile.write(self._allShowList) # Delete old copies of this file globPattern = '_epguides_????????.csv' globFilePath = os.path.join(self._saveDir, globPattern) for filePath in glob.glob(globFilePath): if filePath != saveFilePath: goodlogging.Log.Info("EPGUIDE", "Removing old EPGUIDES file: {0}".format(filePath), verbosity=self.logVerbosity) os.remove(filePath)
Populates self._allShowList with the epguides all show info. On the first lookup for a day the information will be loaded from the epguides url. This will be saved to local file _epguides_YYYYMMDD.csv and any old files will be removed. Subsequent accesses for the same day will read this file.
def _GetShowID(self, showName): self._GetTitleList() self._GetIDList() for index, showTitle in enumerate(self._showTitleList): if showName == showTitle: return self._showIDList[index] return None
Get epguides show id for a given show name. Attempts to match the given show name against a show title in self._showTitleList and, if found, returns the corresponding index in self._showIDList. Parameters ---------- showName : string Show name to get show ID for. Returns ---------- int or None If a show id is found this will be returned, otherwise None is returned.
def _ExtractDataFromShowHtml(self, html): htmlLines = html.splitlines() for count, line in enumerate(htmlLines): if line.strip() == r'<pre>': startLine = count+1 if line.strip() == r'</pre>': endLine = count try: dataList = htmlLines[startLine:endLine] dataString = '\n'.join(dataList) return dataString.strip() except: raise Exception("Show content not found - check EPGuides html formatting")
Extracts csv show data from epguides html source. Parameters ---------- html : string Block of html text Returns ---------- string Show data extracted from html text in csv format.
def _GetEpisodeName(self, showID, season, episode): # Load data for showID from dictionary showInfo = csv.reader(self._showInfoDict[showID].splitlines()) for rowCnt, row in enumerate(showInfo): if rowCnt == 0: # Get header column index for colCnt, column in enumerate(row): if column == 'season': seasonIndex = colCnt if column == 'episode': episodeIndex = colCnt if column == 'title': titleIndex = colCnt else: # Iterate rows until matching season and episode found try: int(row[seasonIndex]) int(row[episodeIndex]) except ValueError: # Skip rows which don't provide integer season or episode numbers pass else: if int(row[seasonIndex]) == int(season) and int(row[episodeIndex]) == int(episode): goodlogging.Log.Info("EPGUIDE", "Episode name is {0}".format(row[titleIndex]), verbosity=self.logVerbosity) return row[titleIndex] return None
Get episode name from epguides show info. Parameters ---------- showID : string Identifier matching show in epguides. season : int Season number. epiosde : int Epiosde number. Returns ---------- int or None If an episode name is found this is returned, otherwise the return value is None.
def ShowNameLookUp(self, string): goodlogging.Log.Info("EPGUIDES", "Looking up show name match for string '{0}' in guide".format(string), verbosity=self.logVerbosity) self._GetTitleList() showName = util.GetBestMatch(string, self._showTitleList) return(showName)
Attempts to find the best match for the given string in the list of epguides show titles. If this list has not previous been generated it will be generated first. Parameters ---------- string : string String to find show name match against. Returns ---------- string Show name which best matches input string.
def EpisodeNameLookUp(self, showName, season, episode): goodlogging.Log.Info("EPGUIDE", "Looking up episode name for {0} S{1}E{2}".format(showName, season, episode), verbosity=self.logVerbosity) goodlogging.Log.IncreaseIndent() showID = self._GetShowID(showName) if showID is not None: try: self._showInfoDict[showID] except KeyError: goodlogging.Log.Info("EPGUIDE", "Looking up info for new show: {0}(ID:{1})".format(showName, showID), verbosity=self.logVerbosity) urlData = util.WebLookup(self.EPISODE_LOOKUP_URL, {self.EP_LOOKUP_TAG: showID}) self._showInfoDict[showID] = self._ExtractDataFromShowHtml(urlData) else: goodlogging.Log.Info("EPGUIDE", "Reusing show info previous obtained for: {0}({1})".format(showName, showID), verbosity=self.logVerbosity) finally: episodeName = self._GetEpisodeName(showID, season, episode) goodlogging.Log.DecreaseIndent() return episodeName goodlogging.Log.DecreaseIndent()
Get the episode name correspondng to the given show name, season number and episode number. Parameters ---------- showName : string Name of TV show. This must match an entry in the epguides title list (this can be achieved by calling ShowNameLookUp first). season : int Season number. epiosde : int Epiosde number. Returns ---------- string or None If an episode name can be found it is returned, otherwise the return value is None.
def clone(cls, srcpath, destpath): # Mercurial will not create intermediate directories for clones. try: os.makedirs(destpath) except OSError as e: if not e.errno == errno.EEXIST: raise cmd = [HG, 'clone', '--quiet', '--noupdate', srcpath, destpath] subprocess.check_call(cmd) return cls(destpath)
Clone an existing repository to a new bare repository.
def create(cls, path): cmd = [HG, 'init', path] subprocess.check_call(cmd) return cls(path)
Create a new repository
def private_path(self): path = os.path.join(self.path, '.hg', '.private') try: os.mkdir(path) except OSError as e: if e.errno != errno.EEXIST: raise return path
Get the path to a directory which can be used to store arbitrary data This directory should not conflict with any of the repository internals. The directory should be created if it does not already exist.
def bookmarks(self): cmd = [HG, 'bookmarks'] output = self._command(cmd).decode(self.encoding, 'replace') if output.startswith('no bookmarks set'): return [] results = [] for line in output.splitlines(): m = bookmarks_rx.match(line) assert m, 'unexpected output: ' + line results.append(m.group('name')) return results
Get list of bookmarks
def content(self): if not self._content: self._content = self._read() return self._content
Get the file contents. This property is cached. The file is only read once.
def config(self): conf = config.Configuration() for namespace in self.namespaces: if not hasattr(conf, namespace): if not self._strict: continue raise exc.NamespaceNotRegistered( "The namespace {0} is not registered.".format(namespace) ) name = getattr(conf, namespace) for item, value in compat.iteritems(self.items(namespace)): if not hasattr(name, item): if not self._strict: continue raise exc.OptionNotRegistered( "The option {0} is not registered.".format(item) ) setattr(name, item, value) return conf
Get a Configuration object from the file contents.
def _read(self): with open(self.path, 'r') as file_handle: content = file_handle.read() # Py27 INI config parser chokes if the content provided is not unicode. # All other versions seems to work appropriately. Forcing the value to # unicode here in order to resolve this issue. return compat.unicode(content)
Open the file and return its contents.
async def ask(self, body, quick_replies=None, options=None, user=None): await self.send_text_message_to_all_interfaces( recipient=user, text=body, quick_replies=quick_replies, options=options, ) return any.Any()
simple ask with predefined quick replies :param body: :param quick_replies: (optional) in form of {'title': <message>, 'payload': <any json>} :param options: :param user: :return:
async def say(self, body, user, options): return await self.send_text_message_to_all_interfaces( recipient=user, text=body, options=options)
say something to user :param body: :param user: :return:
async def send_audio(self, url, user, options=None): tasks = [interface.send_audio(user, url, options) for _, interface in self.interfaces.items()] return [body for body in await asyncio.gather(*tasks)]
send audio message :param url: link to the audio file :param user: target user :param options: :return:
async def send_text_message_to_all_interfaces(self, *args, **kwargs): logger.debug('async_send_text_message_to_all_interfaces') tasks = [interface.send_text_message(*args, **kwargs) for _, interface in self.interfaces.items()] logger.debug(' tasks') logger.debug(tasks) res = [body for body in await asyncio.gather(*tasks)] logger.debug(' res') logger.debug(res) return res
TODO: we should know from where user has come and use right interface as well right interface can be chosen :param args: :param kwargs: :return:
def connect(self, protocolFactory): deferred = self._startProcess() deferred.addCallback(self._connectRelay, protocolFactory) deferred.addCallback(self._startRelay) return deferred
Starts a process and connect a protocol to it.
def _startProcess(self): connectedDeferred = defer.Deferred() processProtocol = RelayProcessProtocol(connectedDeferred) self.inductor.execute(processProtocol, *self.inductorArgs) return connectedDeferred
Use the inductor to start the process we want to relay data from.