code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def df(self): data = self._final_data() if len(data) == 0: adf = pd.DataFrame(columns = [self.index.name, self.name]) return adf.set_index(self.index.name) adf = pd.DataFrame(data) if len(adf.columns) != 2: msg = "Symbol ({}) needs to be cached prior to building a Dataframe" msg = msg.format(self.name) raise Exception(msg) adf.columns = [self.index.name, self.name] return self._finish_df(adf, 'FINAL')
Note: this accessor is read-only. It should be copied, if accessed in an application, more than once. Returns ------- Dataframe of the symbol's final data.
def datatable_df(self): data = self._all_datatable_data() adf = pd.DataFrame(data) adf.columns = self.dt_all_cols return self._finish_df(adf, 'ALL')
returns the dataframe representation of the symbol's final data
def _init_datatable(self): try: self.datatable = Table(self.name, Base.metadata, autoload=True) except NoSuchTableError: print "Creating datatable, cause it doesn't exist" self.datatable = self._datatable_factory() self.datatable.create() self.datatable_exists = True
Instantiates the .datatable attribute, pointing to a table in the database that stores all the cached data
def _datatable_factory(self): feed_cols = ['feed{0:03d}'.format(i + 1) for i in range(self.n_feeds)] feed_cols = ['override_feed000'] + feed_cols + ['failsafe_feed999'] ind_sqlatyp = indexingtypes[self.index.indimp].sqlatyp dat_sqlatyp = datadefs[self.dtype.datadef].sqlatyp atbl = Table(self.name, Base.metadata, Column('indx', ind_sqlatyp, primary_key=True), Column('final', dat_sqlatyp), *(Column(fed_col, dat_sqlatyp) for fed_col in feed_cols), extend_existing=True) self.dt_feed_cols = feed_cols[:] self.dt_all_cols = ['indx', 'final'] + feed_cols[:] return atbl
creates a SQLAlchemy Table object with the appropriate number of columns given the number of feeds
def update_handle(self, chkpnt_settings): # Note, for now, this function is nearly identical # to the Symbol version. Careful when augmenting, # to get the right one. objs = object_session(self) # override with anything passed in for checkpoint in chkpnt_settings: if checkpoint in FeedHandle.__table__.columns: settings = chkpnt_settings[checkpoint] setattr(self.handle, checkpoint, settings) objs.commit()
Update a feeds's handle checkpoint settings :param chkpnt_settings, dict: a dictionary where the keys are stings representing individual handle checkpoint names, for a Feed (eg. api_failure, feed_type, monounique...) See FeedHandle.__table__.columns for the current list. The values can be either integer or BitFlags. :return: None
def add_tags(self, tags): if isinstance(tags, (str, unicode)): tags = [tags] objs = object_session(self) tmps = [FeedTag(tag=t, feed=self) for t in tags] objs.add_all(tmps) objs.commit()
add a tag or tags to a Feed
def _fetch_dimensions(self, dataset): yield Dimension(u"region", label="municipality or county", datatype="region", dialect="arbetsmiljoverket") yield Dimension(u"period", label="Year or month")
Declaring available dimensions like this is not mandatory, but nice, especially if they differ from dataset to dataset. If you are using a built in datatype, you can specify the dialect you are expecting, to have values normalized. This scraper will look for Swedish month names (e.g. 'Januari'), but return them according to the Statscraper standard ('january').
def _fetch_itemslist(self, item): if item.is_root: for c in ["Arbetsolycka", "Arbetssjukdom"]: yield Collection(c, blob=(c, None, None)) else: c = item.id for r in [u"kommun", u"län"]: for p in [u"år", u"månad"]: yield Dataset(u"%s-%s-%s" % (c, r, p), blob=(c, r, p), label=u"%s, antal per %s och %s" % (c, r, p))
We define two collection: - Number of work injuries ("Arbetsolycka") - Number of workrelated diseases ("Arbetssjukdom") Each contains four datasets: - Per municipality and year - Per county and year - Per municipality and month - Per municipality and year
def make_log_record_output(category, level, message, format=None, datefmt=None, **kwargs): if not category or (category == "__ROOT__"): category = "root" levelname = logging.getLevelName(level) record_data = dict(name=category, levelname=levelname, msg=message) record_data.update(kwargs) record = logging.makeLogRecord(record_data) formatter = logging.Formatter(format, datefmt=datefmt) return formatter.format(record)
Create the output for a log record, like performed by :mod:`logging` module. :param category: Name of the logger (as string or None). :param level: Log level (as number). :param message: Log message to use. :returns: Log record output (as string)
def step_I_create_logrecords_with_table(context): assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) for row in context.table.rows: category = row["category"] if category == "__ROOT__": category = None level = LogLevel.parse_type(row["level"]) message = row["message"] make_log_record(category, level, message)
Step definition that creates one more log records by using a table. .. code-block: gherkin When I create log records with: | category | level | message | | foo | ERROR | Hello Foo | | foo.bar | WARN | Hello Foo.Bar | Table description ------------------ | Column | Type | Required | Description | | category | string | yes | Category (or logger) to use. | | level | LogLevel | yes | Log level to use. | | message | string | yes | Log message to use. | .. code-block: python import logging from behave.configuration import LogLevel for row in table.rows: logger = logging.getLogger(row.category) level = LogLevel.parse_type(row.level) logger.log(level, row.message)
def step_I_create_logrecord_with_table(context): assert context.table, "REQUIRE: context.table" assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1" step_I_create_logrecords_with_table(context)
Create an log record by using a table to provide the parts. .. seealso: :func:`step_I_create_logrecords_with_table()`
def step_command_output_should_contain_log_records(context): assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) format = getattr(context, "log_record_format", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.execute_steps(u''' Then the command output should contain: """ {expected_output} """ '''.format(expected_output=output))
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the command output should contain the following log records: | category | level | message | | bar | CURRENT | xxx |
def step_command_output_should_not_contain_log_records(context): assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) format = getattr(context, "log_record_format", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.execute_steps(u''' Then the command output should not contain: """ {expected_output} """ '''.format(expected_output=output))
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the command output should contain the following log records: | category | level | message | | bar | CURRENT | xxx |
def step_command_output_should_contain_log_records_from_categories(context): assert context.table, "REQUIRE: context.table" context.table.require_column("category") record_schema = context.log_record_row_schema LogRecordTable.annotate_with_row_schema(context.table, record_schema) step_command_output_should_contain_log_records(context) context.table.remove_columns(["level", "message"])
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Given I define a log record schema: | category | level | message | | root | ERROR | __LOG_MESSAGE__ | Then the command output should contain log records from categories: | category | | bar |
def step_command_output_should_not_contain_log_records_from_categories(context): assert context.table, "REQUIRE: context.table" context.table.require_column("category") record_schema = context.log_record_row_schema LogRecordTable.annotate_with_row_schema(context.table, record_schema) step_command_output_should_not_contain_log_records(context) context.table.remove_columns(["level", "message"])
Verifies that the command output contains not log records from the provided log categories (in any order). .. code-block: gherkin Given I define the log record schema: | category | level | message | | root | ERROR | __LOG_MESSAGE__ | Then the command output should not contain log records from categories: | category | | bar |
def step_file_should_contain_log_records(context, filename): assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) format = getattr(context, "log_record_format", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.text = output step_file_should_contain_multiline_text(context, filename)
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the file "xxx.log" should contain the log records: | category | level | message | | bar | CURRENT | xxx |
def step_file_should_not_contain_log_records(context, filename): assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) format = getattr(context, "log_record_format", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.text = output step_file_should_not_contain_multiline_text(context, filename)
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the file "xxx.log" should not contain the log records: | category | level | message | | bar | CURRENT | xxx |
def step_use_log_record_configuration(context): assert context.table, "REQUIRE: context.table" context.table.require_columns(["property", "value"]) for row in context.table.rows: property_name = row["property"] value = row["value"] if property_name == "format": context.log_record_format = value elif property_name == "datefmt": context.log_record_datefmt = value else: raise KeyError("Unknown property=%s" % property_name)
Define log record configuration parameters. .. code-block: gherkin Given I use the log record configuration: | property | value | | format | | | datefmt | |
def annotate_with_row_schema(table, row_schema): for column, value in row_schema.items(): if column not in table.headings: table.add_column(column, default_value=value)
Annotate/extend a table of log-records with additional columns from the log-record schema if columns are missing. :param table: Table w/ log-records (as :class:`behave.model.Table`) :param row_schema: Log-record row schema (as dict).
def smart_decode(binary, errors="strict"): d = chardet.detect(binary) encoding = d["encoding"] confidence = d["confidence"] text = binary.decode(encoding, errors=errors) return text, encoding, confidence
Automatically find the right codec to decode binary data to string. :param binary: binary data :param errors: one of 'strict', 'ignore' and 'replace' :return: string
def decode(self, binary, url, encoding=None, errors="strict"): if encoding is None: domain = util.get_domain(url) if domain in self.domain_encoding_table: encoding = self.domain_encoding_table[domain] html = binary.decode(encoding, errors=errors) else: html, encoding, confidence = smart_decode( binary, errors=errors) # cache domain name and encoding self.domain_encoding_table[domain] = encoding else: html = binary.decode(encoding, errors=errors) return html
Decode binary to string. :param binary: binary content of a http request. :param url: endpoint of the request. :param encoding: manually specify the encoding. :param errors: errors handle method. :return: str
def modify_number_pattern(number_pattern, **kwargs): params = ['pattern', 'prefix', 'suffix', 'grouping', 'int_prec', 'frac_prec', 'exp_prec', 'exp_plus'] for param in params: if param in kwargs: continue kwargs[param] = getattr(number_pattern, param) return NumberPattern(**kwargs)
Modifies a number pattern by specified keyword arguments.
def format_currency_field(__, prec, number, locale): locale = Locale.parse(locale) currency = get_territory_currencies(locale.territory)[0] if prec is None: pattern, currency_digits = None, True else: prec = int(prec) pattern = locale.currency_formats['standard'] pattern = modify_number_pattern(pattern, frac_prec=(prec, prec)) currency_digits = False return format_currency(number, currency, pattern, locale=locale, currency_digits=currency_digits)
Formats a currency field.
def format_decimal_field(__, prec, number, locale): prec = 0 if prec is None else int(prec) if number < 0: prec += 1 return format(number, u'0%dd' % prec)
Formats a decimal field: .. sourcecode:: 1234 ('D') -> 1234 -1234 ('D6') -> -001234
def format_float_field(__, prec, number, locale): format_ = u'0.' if prec is None: format_ += u'#' * NUMBER_DECIMAL_DIGITS else: format_ += u'0' * int(prec) pattern = parse_pattern(format_) return pattern.apply(number, locale)
Formats a fixed-point field.
def format_number_field(__, prec, number, locale): prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec) locale = Locale.parse(locale) pattern = locale.decimal_formats.get(None) return pattern.apply(number, locale, force_frac=(prec, prec))
Formats a number field.
def format_percent_field(__, prec, number, locale): prec = PERCENT_DECIMAL_DIGITS if prec is None else int(prec) locale = Locale.parse(locale) pattern = locale.percent_formats.get(None) return pattern.apply(number, locale, force_frac=(prec, prec))
Formats a percent field.
def format_hexadecimal_field(spec, prec, number, locale): if number < 0: # Take two's complement. number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1 format_ = u'0%d%s' % (int(prec or 0), spec) return format(number, format_)
Formats a hexadeciaml field.
def format_field(self, value, format_spec): if format_spec: spec, arg = format_spec[0], format_spec[1:] arg = arg or None else: spec = arg = None return self._format_field(spec, arg, value, self.numeric_locale)
Format specifiers are described in :func:`format_field` which is a static function.
def delegate(attribute_name, method_names): call_attribute_method = partial(_call_delegated_method, attribute_name) def decorate(class_): for method in method_names: setattr(class_, method, partialmethod(call_attribute_method, method)) return class_ return decorate
Decorator factory to delegate methods to an attribute. Decorate a class to map every method in `method_names` to the attribute `attribute_name`.
def prepare_query(query): for name in query: value = query[name] # None is sent as an empty string. if value is None: query[name] = "" # Booleans are sent as 0 or 1. elif isinstance(value, bool): query[name] = int(value) # XXX shouldn't this just check for basestring instead? elif isinstance(value, dict): raise ValueError("Invalid query data type %r" % type(value).__name__)
Prepare a query object for the RAPI. RAPI has lots of curious rules for coercing values. This function operates on dicts in-place and has no return value. @type query: dict @param query: Query arguments
def itemgetters(*args): f = itemgetter(*args) def inner(l): return [f(x) for x in l] return inner
Get a handful of items from an iterable. This is just map(itemgetter(...), iterable) with a list comprehension.
def create_container(self, container, **kwargs): try: LOG.debug('create_container() with %s is success.', self.driver) return self.driver.create_container(container, **kwargs) except DriverException as e: LOG.exception('create_container() with %s raised\ an exception %s.', self.driver, e)
Create container :param container(string): container name (Container is equivalent to Bucket term in Amazon). :param **kwargs(dict): extend args for specific driver.
def delete_container(self, container): try: LOG.debug('delete_container() with %s is success.', self.driver) return self.driver.delete_container(container) except DriverException as e: LOG.exception('delete_container() with %s raised\ an exception %s.', self.driver, e)
Delete container :param container: container name (Container is equivalent to Bucket term in Amazon).
def stat_container(self, container): LOG.debug('stat_container() with %s is success.', self.driver) return self.driver.stat_container(container)
Stat container metadata :param container: container name (Container is equivalent to Bucket term in Amazon).
def update_container(self, container, metadata, **kwargs): LOG.debug('update_object() with %s is success.', self.driver) return self.driver.update_container(container, metadata, **kwargs)
Update container metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param metadata(dict): additional metadata to include in the request. :param **kwargs(dict): extend args for specific driver.
def upload_object(self, container, obj, contents, content_length=None, metadata=None, **kwargs): try: LOG.debug('upload_object() with %s is success.', self.driver) return self.driver.upload_object(container, obj, contents=contents, content_length=content_length, metadata=metadata, **kwargs) except DriverException as e: LOG.exception('upload_object() with %s raised\ an exception %s.', self.driver, e)
Upload object :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param contents: object content. :param content_length(int): content length. :param metadata (dict): addition infomation. :param **kwargs(dict): extend args for specific driver.
def stat_object(self, container, obj): LOG.debug('stat_object() with %s is success.', self.driver) return self.driver.stat_object(container, obj)
Stat object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon).
def delete_object(self, container, obj, **kwargs): try: LOG.debug('delete_object() with %s is success.', self.driver) return self.driver.delete_object(container, obj, **kwargs) except DriverException as e: LOG.exception('download_object() with %s raised\ an exception %s.', self.driver, e)
Delete object in container :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon).
def list_container_objects(self, container, prefix=None, delimiter=None): LOG.debug('list_container_objects() with %s is success.', self.driver) return self.driver.list_container_objects(container, prefix, delimiter)
List container objects :param container: container name (Container is equivalent to Bucket term in Amazon). :param prefix: prefix query :param delimiter: string to delimit the queries on
def update_object(self, container, obj, metadata, **kwargs): try: LOG.debug('update_object() with %s is success.', self.driver) return self.driver.update_object(container, obj, metadata, **kwargs) except DriverException as e: LOG.exception('copy_object() with %s raised\ an exception %s.', self.driver, e)
Update object metadata :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param metadata(dict): additional metadata to include in the request.
def copy_object(self, container, obj, metadata=None, destination=None, **kwargs): try: LOG.debug('copy_object() with %s is success.', self.driver) return self.driver.copy_object(container, obj, metadata=metadata, destination=destination, **kwargs) except DriverException as e: LOG.exception('copy_object() with %s raised\ an exception %s.', self.driver, e)
Copy object :param container: container name (Container is equivalent to Bucket term in Amazon). :param obj: object name (Object is equivalent to Key term in Amazon). :param destination: The container and object name of the destination object in the form of /container/object; if None, the copy will use the source as the destination. :param metadata(dict): additional metadata(headers) to include in the request :param **kwargs(dict): extend args for specific driver.
def permission_required(*actions, obj=None, raise_exception=False): def checker(user): ok = False if user.is_authenticated() and check_perms(user, actions, [obj]): ok = True if raise_exception and not ok: raise PermissionDenied else: return ok def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): if checker(request.user): return view_func(request, *args, **kwargs) return _wrapped_view return decorator
Permission checking decorator -- works like the ``permission_required`` decorator in the default Django authentication system, except that it takes a sequence of actions to check, an object must be supplied, and the user must have permission to perform all of the actions on the given object for the permissions test to pass. *Not actually sure how useful this is going to be: in any case where obj is not None, it's going to be tricky to get the object into the decorator. Class-based views are definitely best here...*
def get_path_fields(cls, base=[]): pfs = [] for pf in cls.TutelaryMeta.path_fields: if pf == 'pk': pfs.append(base + ['pk']) else: f = cls._meta.get_field(pf) if isinstance(f, models.ForeignKey): pfs += get_path_fields(f.target_field.model, base=base + [pf]) else: pfs.append(base + [f.name]) return pfs
Get object fields used for calculation of django-tutelary object paths.
def get_perms_object(obj, action): def get_one(pf): if isinstance(pf, str): return pf else: return str(reduce(lambda o, f: getattr(o, f), pf, obj)) return Object([get_one(pf) for pf in obj.__class__.TutelaryMeta.pfs])
Get the django-tutelary path for an object, based on the fields listed in ``TutelaryMeta.pfs``.
def make_get_perms_object(perms_objs): def retfn(obj, action): if action in perms_objs: if perms_objs[action] is None: return None else: return get_perms_object(getattr(obj, perms_objs[action]), action) else: return get_perms_object(obj, action) return retfn
Make a function to delegate permission object rendering to some other (foreign key) field of an object.
def _getArrays(items, attr, defaultValue): arrays = dict([(key, []) for key in attr]) for item in items: for key in attr: arrays[key].append(getattr(item, key, defaultValue)) for key in [_ for _ in viewkeys(arrays)]: arrays[key] = numpy.array(arrays[key]) return arrays
Return arrays with equal size of item attributes from a list of sorted "items" for fast and convenient data processing. :param attr: list of item attributes that should be added to the returned array. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. :returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
def _getItems(container, containerKeys=None, sort=False, reverse=False, selector=lambda item: True): if containerKeys is None: containerKeys = [_ for _ in viewkeys(container)] else: containerKeys = aux.toList(containerKeys) if sort: sortIdentifier = list() for containerKey in containerKeys: for identifier in [_ for _ in viewkeys(container[containerKey])]: item = container[containerKey][identifier] if selector(item): try: sortIdentifier.append((getattr(item, sort), containerKey, identifier ) ) except AttributeError: pass sortIdentifier.sort(key=ITEMGETTER(0), reverse=reverse) for _, containerKey, identifier in sortIdentifier: yield container[containerKey][identifier] else: for containerKey in containerKeys: for identifier in [_ for _ in viewkeys(container[containerKey])]: item = container[containerKey][identifier] if selector(item): yield item
Generator that yields filtered and/or sorted items from the specified "container". :param container: The container has to be a dictionary of dictionaries that contain some kind of items. Depending on the specified parameters all or a subset of these items are yielded. ``{containerKey1: {key1: item1, key2: item2, ...}, ...}`` :param containerKeys: valid keys of the "container", if None all keys are considered. :type containerKeys: a single dictionary key or a list of keys :param sort: if "sort" is specified the returned list of items is sorted according to the item attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each item and returns True (include item) or False (discard item). If not specified all items are returned :returns: items from container that passed the selector function
def _containerSetPath(container, folderpath, specfiles): if not os.path.exists(folderpath): warntext = 'Error while calling "_containerSetPath()": The specified '\ 'directory "%s" does not exist!' %(folderpath, ) warnings.warn(warntext) for specfile in specfiles: if specfile in container.info: container.info[specfile]['path'] = folderpath else: warntext = 'Error while calling "_containerSetPath()": The '\ 'specfile "%s" is not present in the container!'\ %(specfile, ) warnings.warn(warntext)
Helper function for :class:`MsrunContainer`, :class:`SiiContainer` and :class:`FiContainer`. Changes the folderpath of the specified specfiles in container.info: ``container.info[specfile]['path'] = folderpath``. :param container: a container like class that has an attribute ``.info`` :param folderpath: a filedirectory :param specfiles: a list of ms-run names
def _mzmlListAttribToTuple(oldList): newList = list() for oldParamList in oldList: newParamLIst = [tuple(param) for param in oldParamList] newList.append(newParamLIst) return newList
Turns the param entries of elements in a list elements into tuples, used in :func:`MzmlScan._fromJSON()` and :func:`MzmlPrecursor._fromJSON()`. .. note:: only intended for a list of elements that contain params. For example the mzML element ``selectedIonList`` or ``scanWindowList``. :param oldList: [[paramList, paramList, ...], ...] :returns: [[paramTuple, paramTuple, ...], ...]
def addMsrunContainers(mainContainer, subContainer): typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic', 'sai': 'saic', 'si': 'sic' } for specfile in subContainer.info: if specfile in mainContainer.info: continue mainContainer.addSpecfile(specfile, subContainer.info[specfile]['path']) for datatype, status in listitems(subContainer.info[specfile]['status']): if not status: continue datatypeContainer = typeToContainer[datatype] dataTypeContainer = getattr(mainContainer, datatypeContainer) subContainerData = getattr(subContainer, datatypeContainer )[specfile] dataTypeContainer[specfile] = subContainerData mainContainer.info[specfile]['status'][datatype] = True
Adds the complete content of all specfile entries from the subContainer to the mainContainer. However if a specfile of ``subContainer.info`` is already present in ``mainContainer.info`` its contents are not added to the mainContainer. :param mainContainer: :class:`MsrunContainer` :param subContainer: :class:`MsrunContainer` .. warning:: does not generate new items, all items added to the ``mainContainer`` are still present in the ``subContainer`` and changes made to elements of one container also affects the elements of the other one (ie elements share same memory location).
def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False, selector=None, defaultValue=None): selector = (lambda si: True) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'specfile'] + aux.toList(attr)) items = self.getItems(specfiles, sort, reverse, selector) return _getArrays(items, attr, defaultValue)
Return a condensed array of data selected from :class:`Si` instances from ``self.sic`` for fast and convenient data processing. :param attr: list of :class:`Si` item attributes that should be added to the returned array. The attributes "id" and "specfile" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. :param specfiles: filenames of ms-run files, if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each :class:`Si` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... }
def getItems(self, specfiles=None, sort=False, reverse=False, selector=None): selector = (lambda si: True) if selector is None else selector if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) return _getItems(self.sic, specfiles, sort, reverse, selector)
Generator that yields filtered and/or sorted :class:`Si` instances from ``self.sic``. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Si` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each ``Si`` item and returns True (include item) or False (discard item). Default function is: ``lambda si: True`` :returns: items from container that passed the selector function
def addSpecfile(self, specfiles, path): for specfile in aux.toList(specfiles): if specfile not in self.info: self._addSpecfile(specfile, path) else: warntext = 'Error while calling "MsrunContainer.addSpecfile()"'\ ': "%s" is already present "MsrunContainer.info"'\ % (specfile, ) warnings.warn(warntext)
Prepares the container for loading ``mrc`` files by adding specfile entries to ``self.info``. Use :func:`MsrunContainer.load()` afterwards to actually import the files :param specfiles: the name of an ms-run file or a list of names :type specfiles: str or [str, str, ...] :param path: filedirectory used for loading and saving ``mrc`` files
def _addSpecfile(self, specfile, path): datatypeStatus = {'rm': False, 'ci': False, 'smi': False, 'sai': False, 'si': False } self.info[specfile] = {'path': path, 'status': datatypeStatus}
Adds a new specfile entry to MsrunContainer.info. See also :class:`MsrunContainer.addSpecfile()`. :param specfile: the name of an ms-run file :param path: filedirectory used for loading and saving ``mrc`` files
def setPath(self, folderpath, specfiles=None): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) _containerSetPath(self, folderpath, specfiles)
Changes the folderpath of the specified specfiles. The folderpath is used for saving and loading of ``mrc`` files. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param folderpath: a filedirectory
def removeData(self, specfiles=None, rm=False, ci=False, smi=False, sai=False, si=False): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) #TODO: add a check if specfiles are present in the container typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic', 'sai': 'saic', 'si': 'sic' } datatypes = self._processDatatypes(rm, ci, smi, sai, si) for specfile in specfiles: for datatype in datatypes: datatypeContainer = typeToContainer[datatype] dataContainer = getattr(self, datatypeContainer) try: del dataContainer[specfile] except KeyError: pass finally: self.info[specfile]['status'][datatype] = False
Removes the specified datatypes of the specfiles from the msrunContainer. To completely remove a specfile use :func:`MsrunContainer.removeSpecfile`, which also removes the complete entry from ``self.info``. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str] :param rm: bool, True to select ``self.rmc`` :param ci: bool, True to select ``self.cic`` :param smi: bool, True to select ``self.smic`` :param sai: bool, True to select ``self.saic`` :param si: bool, True to select ``self.sic``
def removeSpecfile(self, specfiles): for specfile in aux.toList(specfiles): for datatypeContainer in ['rmc', 'cic', 'smic', 'saic', 'sic']: dataContainer = getattr(self, datatypeContainer) try: del dataContainer[specfile] except KeyError: pass del self.info[specfile]
Completely removes the specified specfiles from the ``msrunContainer``. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: str, [str, str]
def _processDatatypes(self, rm, ci, smi, sai, si): datatypes = list() for datatype, value in [('rm', rm), ('ci', ci), ('smi', smi), ('sai', sai), ('si', si)]: if value: datatypes.append(datatype) return datatypes
Helper function that returns a list of datatype strings, depending on the parameters boolean value. :param rm: bool, True to add ``rm`` :param ci: bool, True to add ``ci`` :param smi: bool, True to add ``smi`` :param sai: bool, True to add ``sai`` :param si: bool, True to add ``si`` :returns: [datatype1, ...]
def _writeCic(self, filelike, specfile, compress): aux.writeBinaryItemContainer(filelike, self.cic[specfile], compress)
Writes the ``.cic`` container entry of the specified specfile to the ``mrc_cic`` format. For details see :func:`maspy.auxiliary.writeBinaryItemContainer()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression
def _writeSaic(self, filelike, specfile, compress): aux.writeBinaryItemContainer(filelike, self.saic[specfile], compress)
Writes the ``.ssic`` container entry of the specified specfile to the ``mrc_saic`` format. For details see :func:`maspy.auxiliary.writeBinaryItemContainer()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression
def _writeSmic(self, filelike, specfile, compress): aux.writeJsonZipfile(filelike, self.smic[specfile], compress)
Writes the ``.smic`` container entry of the specified specfile to the ``mrc_smic`` format. For details see :func:`maspy.auxiliary.writeJsonZipfile()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression
def _writeSic(self, filelike, specfile, compress): aux.writeJsonZipfile(filelike, self.sic[specfile], compress)
Writes the ``.sic`` container entry of the specified specfile to the ``mrc_sic`` format. For details see :func:`maspy.auxiliary.writeJsonZipfile()` :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression
def _writeRmc(self, filelike, specfile): xmlString = ETREE.tostring(self.rmc[specfile], pretty_print=True) filelike.write(xmlString)
Writes the ``.rmc`` container entry of the specified specfile as an human readable and pretty formatted xml string. :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info``
def _reprJSON(self): return {'__Ci__': (self.id, self.specfile, self.dataProcessingRef, self.precursor, self.product, self.params, self.attrib, self.arrayInfo ) }
Returns a JSON serializable represenation of a ``Ci`` class instance. Use :func:`maspy.core.Ci._fromJSON()` to generate a new ``Ci`` instance from the return value. :returns: a JSON serializable python object
def _fromJSON(cls, jsonobject): newInstance = cls(jsonobject[0], jsonobject[1]) attribDict = {} attribDict['dataProcessingRef'] = jsonobject[2] attribDict['precursor'] = jsonobject[3] attribDict['product'] = jsonobject[4] attribDict['params'] = [tuple(param) for param in jsonobject[5]] attribDict['attrib'] = jsonobject[6] attribDict['arrayInfo'] = dict() for arrayType, jsonEntry in viewitems(jsonobject[7]): arrayEntry = {'dataProcessingRef': jsonEntry['dataProcessingRef'], 'params': [tuple(_) for _ in jsonEntry['params']] } attribDict['arrayInfo'][arrayType] = arrayEntry for key, value in viewitems(attribDict): setattr(newInstance, key, value) return newInstance
Generates a new instance of :class:`maspy.core.Ci` from a decoded JSON object (as generated by :func:`maspy.core.Ci._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Ci`
def jsonHook(encoded): if '__Ci__' in encoded: return Ci._fromJSON(encoded['__Ci__']) elif '__MzmlProduct__' in encoded: return MzmlProduct._fromJSON(encoded['__MzmlProduct__']) elif '__MzmlPrecursor__' in encoded: return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__']) else: return encoded
Custom JSON decoder that allows construction of a new ``Ci`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Ci`, :class:`MzmlProduct`, :class:`MzmlPrecursor`
def _fromJSON(cls, jsonobject): newInstance = cls(jsonobject[0], jsonobject[1]) for arrayType, jsonEntry in viewitems(jsonobject[2]): arrayEntry = {'dataProcessingRef': jsonEntry['dataProcessingRef'], 'params': [tuple(_) for _ in jsonEntry['params']] } newInstance.arrayInfo[arrayType] = arrayEntry return newInstance
Generates a new instance of :class:`maspy.core.Sai` from a decoded JSON object (as generated by :func:`maspy.core.Sai._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Sai`
def _reprJSON(self): return {'__Smi__': (self.id, self.specfile, self.attributes, self.params, self.scanListParams, self.scanList, self.precursorList, self.productList ) }
Returns a JSON serializable represenation of a ``Smi`` class instance. Use :func:`maspy.core.Sai._fromJSON()` to generate a new ``Smi`` instance from the return value. :returns: a JSON serializable python object
def _fromJSON(cls, jsonobject): newInstance = cls(None, None) attribDict = {} attribDict['id'] = jsonobject[0] attribDict['specfile'] = jsonobject[1] attribDict['attributes'] = jsonobject[2] attribDict['params'] = [tuple(param) for param in jsonobject[3]] attribDict['scanListParams'] = [tuple(param) for param in jsonobject[4]] attribDict['scanList'] = jsonobject[5] attribDict['precursorList'] = jsonobject[6] attribDict['productList'] = jsonobject[7] for key, value in viewitems(attribDict): setattr(newInstance, key, value) return newInstance
Generates a new instance of :class:`maspy.core.Smi` from a decoded JSON object (as generated by :func:`maspy.core.Smi._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Smi`
def jsonHook(encoded): if '__Smi__' in encoded: return Smi._fromJSON(encoded['__Smi__']) elif '__MzmlScan__' in encoded: return MzmlScan._fromJSON(encoded['__MzmlScan__']) elif '__MzmlProduct__' in encoded: return MzmlProduct._fromJSON(encoded['__MzmlProduct__']) elif '__MzmlPrecursor__' in encoded: return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__']) else: return encoded
Custom JSON decoder that allows construction of a new ``Smi`` instance from a decoded JSON object. :param encoded: a JSON decoded object literal (a dict) :returns: "encoded" or one of the these objects: :class:`Smi`, :class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor`
def _fromJSON(cls, jsonobject): newInstance = cls(None, None) newInstance.__dict__.update(jsonobject) return newInstance
Generates a new instance of :class:`maspy.core.Si` from a decoded JSON object (as generated by :func:`maspy.core.Si._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`Si`
def _fromJSON(cls, jsonobject): scanWindowList = _mzmlListAttribToTuple(jsonobject[0]) params = [tuple(param) for param in jsonobject[1]] return cls(scanWindowList, params)
Generates a new instance of :class:`maspy.core.MzmlScan` from a decoded JSON object (as generated by :func:`maspy.core.MzmlScan._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlScan`
def _fromJSON(cls, jsonobject): isolationWindow =[tuple(param) for param in jsonobject] return cls(isolationWindow)
Generates a new instance of :class:`maspy.core.MzmlProduct` from a decoded JSON object (as generated by :func:`maspy.core.MzmlProduct._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlProduct`
def _reprJSON(self): return {'__MzmlPrecursor__': (self.spectrumRef, self.activation, self.isolationWindow, self.selectedIonList ) }
Returns a JSON serializable represenation of a ``MzmlPrecursor`` class instance. Use :func:`maspy.core.MzmlPrecursor._fromJSON()` to generate a new ``MzmlPrecursor`` instance from the return value. :returns: a JSON serializable python object
def _fromJSON(cls, jsonobject): spectrumRef = jsonobject[0] activation = [tuple(param) for param in jsonobject[1]] isolationWindow =[tuple(param) for param in jsonobject[2]] selectedIonList = _mzmlListAttribToTuple(jsonobject[3]) return cls(spectrumRef, activation, isolationWindow, selectedIonList)
Generates a new instance of :class:`maspy.core.MzmlPrecursor` from a decoded JSON object (as generated by :func:`maspy.core.MzmlPrecursor._reprJSON()`). :param jsonobject: decoded JSON object :returns: a new instance of :class:`MzmlPrecursor`
def getItems(self, specfiles=None, sort=False, reverse=False, selector=None): selector = (lambda sii: sii.isValid) if selector is None else selector if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) return _getListItems(self.container, specfiles, sort, reverse, selector)
Generator that yields filtered and/or sorted :class:`Sii` instances from ``self.container``. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Sii` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each ``Sii`` item and has to return True (include item) or False (discard item). By default only items with ``Sii.isValid == True`` are returned. :returns: items from container that passed the selector function
def getValidItem(self, specfile, identifier): for item in self.container[specfile][identifier]: if item.isValid: return item else: return None
Returns a ``Sii`` instance from ``self.container`` if it is valid, if all elements of ``self.container[specfile][identifier] are ``Sii.isValid == False`` then ``None`` is returned. :param specfile: a ms-run file name :param identifier: item identifier ``Sii.id`` :returns: ``Sii`` or ``None``
def _addSpecfile(self, specfile, path): self.info[specfile] = {'path': path, 'qcAttr': None, 'qcCutoff': None, 'qcLargerBetter': None, 'rankAttr': None, 'rankLargerBetter': None } self.container[specfile] = dict()
Adds a new specfile entry to SiiContainer.info. See also :class:`SiiContainer.addSpecfile()`. :param specfile: the name of an ms-run file :param path: filedirectory for loading and saving the ``siic`` files
def removeSpecfile(self, specfiles): for specfile in aux.toList(specfiles): del self.container[specfile] del self.info[specfile]
Completely removes the specified specfiles from the ``SiiContainer``. :param specfiles: the name of an ms-run file or a list of names.
def save(self, specfiles=None, compress=True, path=None): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling "SiiContainer.save()": "%s" is'\ ' not present in "SiiContainer.info"!'\ % (specfile, ) warnings.warn(warntext) continue else: path = self.info[specfile]['path'] if path is None else path with aux.PartiallySafeReplace() as msr: filename = specfile + '.siic' filepath = aux.joinpath(path, filename) with msr.open(filepath, mode='w+b') as openfile: self._writeContainer(openfile, specfile, compress)
Writes the specified specfiles to ``siic`` files on the hard disk. .. note:: If ``.save()`` is called and no ``siic`` files are present in the specified path new files are generated, otherwise old files are replaced. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param compress: bool, True to use zip file compression :param path: filedirectory to which the ``siic`` files are written. By default the parameter is set to ``None`` and the filedirectory is read from ``self.info[specfile]['path']``
def addSiInfo(self, msrunContainer, specfiles=None, attributes=['obsMz', 'rt', 'charge']): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling "SiiContainer.addSiInfo()": '\ '"%s" is not present in "SiiContainer.info"!'\ % (specfile, ) warnings.warn(warntext) elif specfile not in msrunContainer.info: warntext = 'Error while calling "SiiContainer.addSiInfo()": '\ '"%s" is not present in "MsrunContainer.info"'\ % (specfile, ) warnings.warn(warntext) else: for identifier in self.container[specfile]: si = msrunContainer.sic[specfile][identifier] for sii in self.container[specfile][identifier]: for attribute in attributes: setattr(sii, attribute, getattr(si, attribute, None) )
Transfer attributes to :class:`Sii` elements from the corresponding :class`Si` in :class:`MsrunContainer.sic <MsrunContainer>`. If an attribute is not present in the ``Si`` the attribute value in the ``Sii``is set to ``None``. Attribute examples: 'obsMz', 'rt', 'charge', 'tic', 'iit', 'ms1Id' :param msrunContainer: an instance of :class:`MsrunContainer` which has imported the corresponding specfiles :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :param attributes: a list of ``Si`` attributes that should be transfered.
def getArrays(self, attr=None, specfiles=None, sort=False, reverse=False, selector=None, defaultValue=None): selector = (lambda fi: fi.isValid) if selector is None else selector attr = attr if attr is not None else [] attr = set(['id', 'specfile'] + aux.toList(attr)) items = self.getItems(specfiles, sort, reverse, selector) return _getArrays(items, attr, defaultValue)
Return a condensed array of data selected from :class:`Fi` instances from ``self.container`` for fast and convenient data processing. :param attr: list of :class:`Fi` item attributes that should be added to the returned array. The attributes "id" and "specfile" are always included, in combination they serve as a unique id. :param defaultValue: if an item is missing an attribute, the "defaultValue" is added to the array instead. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Fi` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, set True to reverse sort order :param selector: a function which is called with each `Fi` item and has to return True (include item) or False (discard item). Default function is: ``lambda si: True``. By default only items with ``Fi.isValid == True`` are returned. :returns: {'attribute1': numpy.array(), 'attribute2': numpy.array(), ... }
def getItems(self, specfiles=None, sort=False, reverse=False, selector=None): selector = (lambda fi: fi.isValid) if selector is None else selector if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) return _getItems(self.container, specfiles, sort, reverse, selector)
Generator that yields filtered and/or sorted :class:`Fi` instances from ``self.container``. :param specfiles: filenames of ms-run files - if specified return only items from those files :type specfiles: str or [str, str, ...] :param sort: if "sort" is specified the returned list of items is sorted according to the :class:`Fi` attribute specified by "sort", if the attribute is not present the item is skipped. :param reverse: bool, ``True`` reverses the sort order :param selector: a function which is called with each ``Fi`` item and has to return True (include item) or False (discard item). By default only items with ``Fi.isValid == True`` are returned. :returns: items from container that passed the selector function
def _writeContainer(self, filelike, specfile, compress): aux.writeJsonZipfile(filelike, self.container[specfile], compress=compress )
Writes the ``self.container`` entry of the specified specfile to the ``fic`` format. :param filelike: path to a file (str) or a file-like object :param specfile: name of an ms-run file present in ``self.info`` :param compress: bool, True to use zip file compression .. note:: In addition it could also dump the ``self.info`` entry to the zipfile with the filename ``info``, but this is not used at the moment. For details see :func:`maspy.auxiliary.writeJsonZipfile()`
def load(self, specfiles=None): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in specfiles: if specfile not in self.info: warntext = 'Error while calling "FiContainer.load()": "%s" is'\ ' not present in "FiContainer.info"!'\ % (specfile, ) warnings.warn(warntext) continue else: fiPath = aux.joinpath(self.info[specfile]['path'], specfile+'.fic' ) with zipfile.ZipFile(fiPath, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. jsonString = io.TextIOWrapper(containerZip.open('data'), encoding='utf-8' ).read() #infoString = io.TextIOWrapper(containerZip.open('info'), # encoding='utf-8' # ).read() self.container[specfile] = json.loads(jsonString, object_hook=Fi.jsonHook )
Imports the specified ``fic`` files from the hard disk. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str]
def removeAnnotation(self, specfiles=None): if specfiles is None: specfiles = [_ for _ in viewkeys(self.info)] else: specfiles = aux.toList(specfiles) for specfile in aux.toList(specfiles): for item in viewvalues(self.container[specfile]): item.isMatched = False item.isAnnotated = False item.siIds = list() item.siiIds = list() item.peptide = None item.sequence = None item.bestScore = None
Remove all annotation information from :class:`Fi` elements. :param specfiles: the name of an ms-run file or a list of names. If None all specfiles are selected. :type specfiles: None, str, [str, str]
def as_dict(self): def conv(v): if isinstance(v, SerializableAttributesHolder): return v.as_dict() elif isinstance(v, list): return [conv(x) for x in v] elif isinstance(v, dict): return {x:conv(y) for (x,y) in v.items()} else: return v return {k.replace('_', '-'): conv(v) for (k, v) in self._attributes.items()}
Returns a JSON-serializeable object representing this tree.
def from_json(cls, data): # Decode JSON string assert isinstance(data, str) data = json.loads(data) assert isinstance(data, dict) return cls.from_dict(data)
Decode a JSON string and inflate a node instance.
def _pred(aclass): isaclass = inspect.isclass(aclass) return isaclass and aclass.__module__ == _pred.__module__
:param aclass :return: boolean
def extract_keywords(func): if hasattr(func, 'im_func'): func = func.im_func try: return func.func_code.co_varnames[-len(func.func_defaults):] except (TypeError, ValueError, IndexError): return tuple()
Parses the keywords from the given function. :param func | <function>
def _get_adv_trans_stats(self, cmd, return_tdo=False): t = time() code, res = self.bulkCommand(b'\x03\x02%c\x00'%(0x80|cmd), 10) if self._scanchain and self._scanchain._print_statistics: print("GET STATS TIME", time()-t)#pragma: no cover if len(res) == 4: count = struct.unpack('<I', res)[0] return count elif len(res) == 8: written, read = struct.unpack('<II', res) return written, read return res
Utility function to fetch the transfer statistics for the last advanced transfer. Checking the stats appears to sync the controller. For details on the advanced transfer please refer to the documentation at http://diamondman.github.io/Adapt/cable_digilent_adept.html#bulk-requests
def jtag_enable(self): status, _ = self.bulkCommand(_BMSG_ENABLE_JTAG) if status == 0: self._jtagon = True elif status == 3: self._jtagon = True raise JTAGAlreadyEnabledError() else: raise JTAGEnableFailedError("Error enabling JTAG. Error code: %s." %status)
Enables JTAG output on the controller. JTAG operations executed before this function is called will return useless data or fail. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray("001011111"), return_tdo=True) >>> c.jtag_disable()
def jtag_disable(self): if not self._jtagon: return status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG) if status == 0: self._jtagon = False elif status == 3: raise JTAGControlError("Error Code %s"%status) self.close_handle()
Disables JTAG output on the controller. JTAG operations executed immediately after this function will return useless data or fail. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray("001011111"), return_tdo=True) >>> c.jtag_disable()
def write_tms_bits(self, data, return_tdo=False, TDI=False): self._check_jtag() self._update_scanchain(data) self.bulkCommandDefault(_BMSG_WRITE_TMS % (return_tdo, TDI, len(data).to_bytes(4, 'little'))) self.bulkWriteData(build_byte_align_buff(data).tobytes()[::-1]) tdo_bits = self._read_tdo(len(data)) if return_tdo else None self._get_adv_trans_stats(0x0B, return_tdo) return tdo_bits
Command controller to write TMS data (with constant TDI bit) to the physical scan chain. Optionally return TDO bits sent back from scan the chain. Args: data - bits to send over TMS line of scan chain (bitarray) return_tdo (bool) - return the devices bitarray response TDI (bool) - whether TDI should send a bitarray of all 0's of same length as `data` (i.e False) or all 1's (i.e. True) Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tms_bits(bitarray("001011111"), return_tdo=True) >>> c.jtag_disable()
def write_tdi_bits(self, buff, return_tdo=False, TMS=True): self._check_jtag() tms_bits = bitarray([TMS]*len(buff)) self._update_scanchain(tms_bits) self.bulkCommandDefault(_BMSG_WRITE_TDI % (return_tdo, TMS, len(buff).to_bytes(4, 'little'))) self.bulkWriteData(build_byte_align_buff(buff).tobytes()[::-1]) tdo_bits = self._read_tdo(len(buff)) if return_tdo else None self._get_adv_trans_stats(0x08, return_tdo) return tdo_bits
Command controller to write TDI data (with constant TMS bit) to the physical scan chain. Optionally return TDO bits sent back from scan the chain. Args: data - bits to send over TDI line of scan chain (bitarray) return_tdo (bool) - return the devices bitarray response TMS (bool) - whether TMS should send a bitarray of all 0's of same length as `data` (i.e False) or all 1's (i.e. True) Returns: None by default or the (bitarray) response of the device after receiving data, if return_tdo is True. Usage: >>> from proteusisc import getAttachedControllers, bitarray >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> c.write_tdi_bits(bitarray("11111"), return_tdo=True) >>> c.jtag_disable()
def read_tdo_bits(self, count, TMS=True, TDI=False): self._check_jtag() self._update_scanchain(bool(TMS)) self.bulkCommandDefault( _BMSG_READ_TDO % (TMS, TDI, count.to_bytes(4, 'little'))) res = self._read_tdo(count) self._get_adv_trans_stats(_BMSG_READ_TDO[2], True) return res
Command controller to issue [count] bit transfers to the physicsl scan chain, with a constant TMS and TDI value, and reading back the returned TDO bits. Args: count (int) - Number of bits to read from TDO and write to TMS/TDI TMS (bool) - constant value to write to TMS for each bit read from TDO. TDI (bool) - constant value to write to TDI for each bit read from TDO. Returns: Returns the response (bitarray) from the physical scanchain's TDO line. Usage: >>> from proteusisc import getAttachedControllers >>> c = getAttachedControllers()[0] >>> c.jtag_enable() >>> data = c.read_tdo_bits(32) >>> c.jtag_disable()
def _readFastaFile(filepath): processSequences = lambda i: ''.join([s.rstrip() for s in i]).rstrip('*') processHeaderLine = lambda line: line[1:].rstrip() with io.open(filepath) as openfile: #Iterate through lines until the first header is encountered try: line = next(openfile) while line[0] != '>': line = next(openfile) header = processHeaderLine(line) sequences = list() except StopIteration: errorText = 'File does not contain fasta entries.' raise maspy.errors.FileFormatError(errorText) for line in openfile: if line[0] == '>': yield header, processSequences(sequences) header = processHeaderLine(line) sequences = list() else: sequences.append(line) #Yield last entry if sequences: yield header, processSequences(sequences)
Read a FASTA file and yields tuples of 'header' and 'sequence' entries. :param filepath: file path of the FASTA file :yields: FASTA entries in the format ('header', 'sequence'). The 'header' string does not contain the '>' and trailing white spaces. The 'sequence' string does not contain trailing white spaces, a '*' at the end of the sequence is removed. See also :func:`importProteinDatabase` and :func:`maspy.peptidemethods.digestInSilico`.
def _extractFastaHeader(fastaHeader, parser=None, forceId=False): if parser is None: try: headerInfo = pyteomics.fasta.parse(fastaHeader) except pyteomics.auxiliary.PyteomicsError as pyteomicsError: #If forceId is set True, it uses the whole header as an id if forceId: headerInfo = {'id': fastaHeader} else: raise pyteomicsError else: headerInfo = parser(fastaHeader) return headerInfo
Parses a fasta header and returns extracted information in a dictionary. Unless a custom parser is specified, a ``Pyteomics`` function is used, which provides parsers for the formats of UniProtKB, UniRef, UniParc and UniMES (UniProt Metagenomic and Environmental Sequences), described at `www.uniprot.org <http://www.uniprot.org/help/fasta-headers>_`. :param fastaHeader: str, protein entry header from a fasta file :param parser: is a function that takes a fastaHeader string and returns a dictionary, containing at least the key "id". If None the parser function from pyteomics ``pyteomics.fasta.parse()`` is used. :param forceId: bool, if True and no id can be extracted from the fasta header the whole header sequence is used as a protein id instead of raising an exception. :returns: dict, describing a fasta header
def fastaParseSgd(header): rePattern = '([\S]+)\s([\S]+).+(\".+\")' ID, name, description = re.match(rePattern, header).groups() info = {'id':ID, 'name':name, 'description':description} return info
Custom parser for fasta headers in the SGD format, see www.yeastgenome.org. :param header: str, protein entry header from a fasta file :returns: dict, parsed header