response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively. | def cmp_sccs(sccs1, sccs2):
"""Order SCOP concise classification strings (sccs).
a.4.5.1 < a.4.5.11 < b.1.1.1
A sccs (e.g. a.4.5.11) compactly represents a domain's classification.
The letter represents the class, and the numbers are the fold,
superfamily, and family, respectively.
"""
s1 = sccs1.split(".")
s2 = sccs2.split(".")
c1, c2 = s1[0], s2[0]
if c1 < c2:
return -1
if c1 > c2:
return +1
for c1, c2 in zip(s1[1:], s2[1:]):
i1 = int(c1)
i2 = int(c2)
if i1 < i2:
return -1
if i1 > i2:
return +1
n1 = len(s1)
n2 = len(s2)
if n1 < n2:
return -1
if n1 > n2:
return +1
return 0 |
Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli} | def parse_domain(term):
"""Convert an ASTRAL header string into a Scop domain.
An ASTRAL (http://astral.stanford.edu/) header contains a concise
description of a SCOP domain. A very similar format is used when a
Domain object is converted into a string. The Domain returned by this
method contains most of the SCOP information, but it will not be located
within the SCOP hierarchy (i.e. The parent node will be None). The
description is composed of the SCOP protein and species descriptions.
A typical ASTRAL header looks like --
>d1tpt_1 a.46.2.1 (1-70) Thymidine phosphorylase {Escherichia coli}
"""
m = _domain_re.match(term)
if not m:
raise ValueError("Domain: " + term)
dom = Domain()
dom.sid = m.group(1)
dom.sccs = m.group(2)
dom.residues = Residues.Residues(m.group(3))
if not dom.residues.pdbid:
dom.residues.pdbid = dom.sid[1:5]
dom.description = m.group(4).strip()
return dom |
Access SCOP search and return a handle to the results.
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/legacy/help.html
Raises an IOError if there's a network error. | def search(
pdb=None,
key=None,
sid=None,
disp=None,
dir=None,
loc=None,
cgi="http://scop.mrc-lmb.cam.ac.uk/legacy/search.cgi",
**keywds,
):
"""Access SCOP search and return a handle to the results.
Access search.cgi and return a handle to the results. See the
online help file for an explanation of the parameters:
http://scop.mrc-lmb.cam.ac.uk/legacy/help.html
Raises an IOError if there's a network error.
"""
params = {"pdb": pdb, "key": key, "sid": sid, "disp": disp, "dir": dir, "loc": loc}
variables = {}
for k, v in params.items():
if v is not None:
variables[k] = v
variables.update(keywds)
return _open(cgi, variables) |
Open a handle to SCOP and return it (PRIVATE).
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used. | def _open(cgi, params=None, get=1):
"""Open a handle to SCOP and return it (PRIVATE).
Open a handle to SCOP. cgi is the URL for the cgi script to access.
params is a dictionary with the options to pass to it. get is a boolean
that describes whether a GET should be used.
"""
# Open a handle to SCOP.
if params is None:
params = {}
options = urlencode(params)
if get: # do a GET
if options:
cgi += "?" + options
handle = urlopen(cgi)
else: # do a POST
handle = urlopen(cgi, data=options)
return handle |
Transform the given comma-separated string into a list (PRIVATE).
:param csv_string: comma-separated input string
:type csv_string: string
:param caster: function used to cast each item in the input string
to its intended type
:type caster: callable, accepts string, returns object | def _list_from_csv(csv_string, caster=None):
"""Transform the given comma-separated string into a list (PRIVATE).
:param csv_string: comma-separated input string
:type csv_string: string
:param caster: function used to cast each item in the input string
to its intended type
:type caster: callable, accepts string, returns object
"""
if caster is None:
return [x for x in csv_string.split(",") if x]
else:
return [caster(x) for x in csv_string.split(",") if x] |
Reorients block starts into the opposite strand's coordinates (PRIVATE).
:param starts: start coordinates
:type starts: list [int]
:param blksizes: block sizes
:type blksizes: list [int]
:param seqlen: sequence length
:type seqlen: int
:param strand: sequence strand
:type strand: int, choice of -1, 0, or 1 | def _reorient_starts(starts, blksizes, seqlen, strand):
"""Reorients block starts into the opposite strand's coordinates (PRIVATE).
:param starts: start coordinates
:type starts: list [int]
:param blksizes: block sizes
:type blksizes: list [int]
:param seqlen: sequence length
:type seqlen: int
:param strand: sequence strand
:type strand: int, choice of -1, 0, or 1
"""
if len(starts) != len(blksizes):
raise RuntimeError(
"Unequal start coordinates and block sizes list (%r vs %r)"
% (len(starts), len(blksizes))
)
# see: http://genome.ucsc.edu/goldenPath/help/blatSpec.html
# no need to reorient if it's already the positive strand
if strand >= 0:
return starts
else:
# the plus-oriented coordinate is calculated by this:
# plus_coord = length - minus_coord - block_size
return [seqlen - start - blksize for start, blksize in zip(starts, blksizes)] |
Validate if psl is protein (PRIVATE). | def _is_protein(psl):
"""Validate if psl is protein (PRIVATE)."""
# check if query is protein or not
# adapted from http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
if len(psl["strand"]) == 2:
if psl["strand"][1] == "+":
return psl["tend"] == psl["tstarts"][-1] + 3 * psl["blocksizes"][-1]
elif psl["strand"][1] == "-":
return psl["tstart"] == psl["tsize"] - (
psl["tstarts"][-1] + 3 * psl["blocksizes"][-1]
)
return False |
Calculate millibad (PRIVATE). | def _calc_millibad(psl, is_protein):
"""Calculate millibad (PRIVATE)."""
# adapted from http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
size_mul = 3 if is_protein else 1
millibad = 0
qali_size = size_mul * (psl["qend"] - psl["qstart"])
tali_size = psl["tend"] - psl["tstart"]
ali_size = min(qali_size, tali_size)
if ali_size <= 0:
return 0
size_dif = qali_size - tali_size
size_dif = 0 if size_dif < 0 else size_dif
total = size_mul * (psl["matches"] + psl["repmatches"] + psl["mismatches"])
if total != 0:
millibad = (
1000
* (
psl["mismatches"] * size_mul
+ psl["qnuminsert"]
+ round(3 * log(1 + size_dif))
)
) / total
return millibad |
Calculate score (PRIVATE). | def _calc_score(psl, is_protein):
"""Calculate score (PRIVATE)."""
# adapted from http://genome.ucsc.edu/FAQ/FAQblat.html#blat4
size_mul = 3 if is_protein else 1
return (
size_mul * (psl["matches"] + (psl["repmatches"] >> 1))
- size_mul * psl["mismatches"]
- psl["qnuminsert"]
- psl["tnuminsert"]
) |
Create high scoring pair object (PRIVATE). | def _create_hsp(hid, qid, psl):
"""Create high scoring pair object (PRIVATE)."""
# protein flag
is_protein = _is_protein(psl)
# strand
# if query is protein, strand is 0
if is_protein:
qstrand = 0
else:
qstrand = 1 if psl["strand"][0] == "+" else -1
# try to get hit strand, if it exists
try:
hstrand = 1 if psl["strand"][1] == "+" else -1
except IndexError:
hstrand = 1 # hit strand defaults to plus
blocksize_multiplier = 3 if is_protein else 1
# query block starts
qstarts = _reorient_starts(psl["qstarts"], psl["blocksizes"], psl["qsize"], qstrand)
# hit block starts
if len(psl["strand"]) == 2:
hstarts = _reorient_starts(
psl["tstarts"],
[blocksize_multiplier * i for i in psl["blocksizes"]],
psl["tsize"],
hstrand,
)
else:
hstarts = psl["tstarts"]
# set query and hit coords
# this assumes each block has no gaps (which seems to be the case)
assert len(qstarts) == len(hstarts) == len(psl["blocksizes"])
query_range_all = list(
zip(qstarts, [x + y for x, y in zip(qstarts, psl["blocksizes"])])
)
hit_range_all = list(
zip(
hstarts,
[x + y * blocksize_multiplier for x, y in zip(hstarts, psl["blocksizes"])],
)
)
# check length of sequences and coordinates, all must match
if "tseqs" in psl and "qseqs" in psl:
assert (
len(psl["tseqs"])
== len(psl["qseqs"])
== len(query_range_all)
== len(hit_range_all)
)
else:
assert len(query_range_all) == len(hit_range_all)
frags = []
# iterating over query_range_all, but hit_range_all works just as well
for idx, qcoords in enumerate(query_range_all):
hseqlist = psl.get("tseqs")
hseq = "" if not hseqlist else hseqlist[idx]
qseqlist = psl.get("qseqs")
qseq = "" if not qseqlist else qseqlist[idx]
frag = HSPFragment(hid, qid, hit=hseq, query=qseq)
# set molecule type
frag.molecule_type = "DNA"
# set coordinates
frag.query_start = qcoords[0]
frag.query_end = qcoords[1]
frag.hit_start = hit_range_all[idx][0]
frag.hit_end = hit_range_all[idx][1]
# and strands
frag.query_strand = qstrand
frag.hit_strand = hstrand
frags.append(frag)
# create hsp object
hsp = HSP(frags)
# check if start and end are set correctly
assert hsp.query_start == psl["qstart"]
assert hsp.query_end == psl["qend"]
assert hsp.hit_start == psl["tstart"]
assert hsp.hit_end == psl["tend"]
# and check block spans as well
hit_spans = [span / blocksize_multiplier for span in hsp.hit_span_all]
assert hit_spans == hsp.query_span_all == psl["blocksizes"]
# set its attributes
hsp.match_num = psl["matches"]
hsp.mismatch_num = psl["mismatches"]
hsp.match_rep_num = psl["repmatches"]
hsp.n_num = psl["ncount"]
hsp.query_gapopen_num = psl["qnuminsert"]
hsp.query_gap_num = psl["qbaseinsert"]
hsp.hit_gapopen_num = psl["tnuminsert"]
hsp.hit_gap_num = psl["tbaseinsert"]
hsp.ident_num = psl["matches"] + psl["repmatches"]
hsp.gapopen_num = psl["qnuminsert"] + psl["tnuminsert"]
hsp.gap_num = psl["qbaseinsert"] + psl["tbaseinsert"]
hsp.query_is_protein = is_protein
hsp.ident_pct = 100.0 - _calc_millibad(psl, is_protein) * 0.1
hsp.score = _calc_score(psl, is_protein)
# helper flag, for writing
hsp._has_hit_strand = len(psl["strand"]) == 2
return hsp |
Append Hits without alignments into QueryResults (PRIVATE). | def _set_qresult_hits(qresult, hit_rows=()):
"""Append Hits without alignments into QueryResults (PRIVATE)."""
for hit_row in hit_rows:
hit_id, remainder = hit_row.split(" ", 1)
# TODO: parse hit and hsp properties properly; by dealing with:
# - any character in the description (brackets, spaces, etc.)
# - possible [f] or [r] presence (for frame info)
# - possible presence of E2() column
# - possible incomplete hit_id due to column length limit
# The current method only looks at the Hit ID, none of the things above
if hit_id not in qresult:
frag = HSPFragment(hit_id, qresult.id)
hsp = HSP([frag])
hit = Hit([hsp])
qresult.append(hit)
return qresult |
Set HSPs sequences (PRIVATE).
:param hsp: HSP whose properties will be set
:type hsp: HSP
:param parsed: parsed values of the HSP attributes
:type parsed: dictionary {string: object}
:param program: program name
:type program: string | def _set_hsp_seqs(hsp, parsed, program):
"""Set HSPs sequences (PRIVATE).
:param hsp: HSP whose properties will be set
:type hsp: HSP
:param parsed: parsed values of the HSP attributes
:type parsed: dictionary {string: object}
:param program: program name
:type program: string
"""
# get aligned sequences and check if they have equal lengths
start = 0
for seq_type in ("hit", "query"):
if "tfast" not in program:
pseq = parsed[seq_type]
# adjust start and end coordinates based on the amount of
# filler characters
start, stop = _get_aln_slice_coords(pseq)
start_adj = len(re.search(_RE_START_EXC, pseq["seq"]).group(0))
stop_adj = len(re.search(_RE_END_EXC, pseq["seq"]).group(0))
start = start + start_adj
stop = stop + start_adj - stop_adj
parsed[seq_type]["seq"] = pseq["seq"][start:stop]
if len(parsed["query"]["seq"]) != len(parsed["hit"]["seq"]):
raise ValueError(
"Length mismatch: %r %r"
% (len(parsed["query"]["seq"]), len(parsed["hit"]["seq"]))
)
if "similarity" in hsp.aln_annotation:
# only using 'start' since FASTA seems to have trimmed the 'excess'
# end part
hsp.aln_annotation["similarity"] = hsp.aln_annotation["similarity"][start:]
# hit or query works equally well here
assert len(hsp.aln_annotation["similarity"]) == len(parsed["hit"]["seq"])
# query and hit sequence types must be the same
assert parsed["query"]["_type"] == parsed["hit"]["_type"]
type_val = parsed["query"]["_type"] # hit works fine too
molecule_type = "DNA" if type_val == "D" else "protein"
setattr(hsp.fragment, "molecule_type", molecule_type)
for seq_type in ("hit", "query"):
# get and set start and end coordinates
start = int(parsed[seq_type]["_start"])
end = int(parsed[seq_type]["_stop"])
setattr(hsp.fragment, seq_type + "_start", min(start, end) - 1)
setattr(hsp.fragment, seq_type + "_end", max(start, end))
# set seq and molecule type
setattr(hsp.fragment, seq_type, parsed[seq_type]["seq"])
if molecule_type != "protein":
# get strand from coordinate; start <= end is plus
# start > end is minus
if start <= end:
setattr(hsp.fragment, seq_type + "_strand", 1)
else:
setattr(hsp.fragment, seq_type + "_strand", -1)
else:
setattr(hsp.fragment, seq_type + "_strand", 0) |
Get HSPs sequences (PRIVATE).
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are present as a result of using the -X command line option. | def _get_aln_slice_coords(parsed_hsp):
"""Get HSPs sequences (PRIVATE).
To get the actual pairwise alignment sequences, we must first
translate the un-gapped sequence based coordinates into positions
in the gapped sequence (which may have a flanking region shown
using leading - characters). To date, I have never seen any
trailing flanking region shown in the m10 file, but the
following code should also cope with that.
Note that this code seems to work fine even when the "sq_offset"
entries are present as a result of using the -X command line option.
"""
seq = parsed_hsp["seq"]
seq_stripped = seq.strip("-")
disp_start = int(parsed_hsp["_display_start"])
start = int(parsed_hsp["_start"])
stop = int(parsed_hsp["_stop"])
if start <= stop:
start = start - disp_start
stop = stop - disp_start + 1
else:
start = disp_start - start
stop = disp_start - stop + 1
stop += seq_stripped.count("-")
if not (0 <= start and start < stop and stop <= len(seq_stripped)):
raise ValueError(
"Problem with sequence start/stop,\n%s[%i:%i]\n%s"
% (seq, start, stop, parsed_hsp)
)
return start, stop |
Return string of the given object's attribute.
Defaults to the given fallback value if attribute is not present. | def getattr_str(obj, attr, fmt=None, fallback="?"):
"""Return string of the given object's attribute.
Defaults to the given fallback value if attribute is not present.
"""
try:
value = getattr(obj, attr)
except AttributeError:
return fallback
if fmt is None:
return str(value)
return fmt % value |
Read through whitespaces, return the first non-whitespace line. | def read_forward(handle):
"""Read through whitespaces, return the first non-whitespace line."""
while True:
line = handle.readline()
# if line is empty or line has characters and stripping does not remove
# them, return the line
if (not line) or (line and line.strip()):
return line |
Return the object to process the given format according to the mapping.
:param format: format name
:type format: string, lower case
:param mapping: mapping of format name and its processor object
:type mapping: dictionary {string: object} | def get_processor(format, mapping):
"""Return the object to process the given format according to the mapping.
:param format: format name
:type format: string, lower case
:param mapping: mapping of format name and its processor object
:type mapping: dictionary {string: object}
"""
# map file format to iterator name
try:
obj_info = mapping[format]
except KeyError:
# handle the errors with helpful messages
if format is None:
raise ValueError("Format required (lower case string)") from None
elif not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)") from None
elif format != format.lower():
raise ValueError("Format string %r should be lower case" % format) from None
else:
raise ValueError(
"Unknown format %r. Supported formats are %r"
% (format, "', '".join(mapping))
) from None
mod_name, obj_name = obj_info
mod = __import__("Bio.SearchIO.%s" % mod_name, fromlist=[""])
return getattr(mod, obj_name) |
Property for fetching attribute from first entry of container.
Returns a property that fetches the given attribute from
the first item in a SearchIO container object. | def singleitem(attr=None, doc=""):
"""Property for fetching attribute from first entry of container.
Returns a property that fetches the given attribute from
the first item in a SearchIO container object.
"""
def getter(self):
if len(self._items) > 1:
raise ValueError("More than one HSPFragment objects found in HSP")
if attr is None:
return self._items[0]
return getattr(self._items[0], attr)
return property(fget=getter, doc=doc) |
Property for fetching attribute from all entries of container.
Returns a property that fetches the given attributes from
all items in a SearchIO container object. | def allitems(attr=None, doc=""):
"""Property for fetching attribute from all entries of container.
Returns a property that fetches the given attributes from
all items in a SearchIO container object.
"""
def getter(self):
if attr is None:
return self._items
return [getattr(frag, attr) for frag in self._items]
return property(fget=getter, doc=doc) |
Return a getter property with a cascading setter.
This is similar to ``optionalcascade``, but for SearchIO containers that have
at least one item (HSP). The getter always retrieves the attribute
value from the first item. If the items have more than one attribute values,
an error will be raised. The setter behaves like ``partialcascade``, except
that it only sets attributes to items in the object, not the object itself. | def fullcascade(attr, doc=""):
"""Return a getter property with a cascading setter.
This is similar to ``optionalcascade``, but for SearchIO containers that have
at least one item (HSP). The getter always retrieves the attribute
value from the first item. If the items have more than one attribute values,
an error will be raised. The setter behaves like ``partialcascade``, except
that it only sets attributes to items in the object, not the object itself.
"""
def getter(self):
return getattr(self._items[0], attr)
def setter(self, value):
for item in self:
setattr(item, attr, value)
return property(fget=getter, fset=setter, doc=doc) |
Return a getter property with a cascading setter.
This is used for the ``id`` and ``description`` properties of the container
objects with zero or more items. These items have their own private
attributes that stores query and/or hit ID and description. When the
container has zero items, attribute values are always retrieved from the
container's attribute. Otherwise, the first item's attribute is used.
To keep the container items' query and/or hit ID and description in-sync,
the setter cascades any new value given to the items' values. | def optionalcascade(cont_attr, item_attr, doc=""):
"""Return a getter property with a cascading setter.
This is used for the ``id`` and ``description`` properties of the container
objects with zero or more items. These items have their own private
attributes that stores query and/or hit ID and description. When the
container has zero items, attribute values are always retrieved from the
container's attribute. Otherwise, the first item's attribute is used.
To keep the container items' query and/or hit ID and description in-sync,
the setter cascades any new value given to the items' values.
"""
def getter(self):
if self._items:
# don't use self._items here, so QueryResult can use this property
# as well (the underlying dict is not integer-indexable)
return getattr(self[0], item_attr)
else:
return getattr(self, cont_attr)
def setter(self, value):
setattr(self, cont_attr, value)
for item in self:
setattr(item, item_attr, value)
return property(fget=getter, fset=setter, doc=doc) |
Return a getter property with cascading setter, for HSPFragment objects.
Similar to ``partialcascade``, but for HSPFragment objects and acts on ``query``
or ``hit`` properties of the object if they are not None. | def fragcascade(attr, seq_type, doc=""):
"""Return a getter property with cascading setter, for HSPFragment objects.
Similar to ``partialcascade``, but for HSPFragment objects and acts on ``query``
or ``hit`` properties of the object if they are not None.
"""
assert seq_type in ("hit", "query")
attr_name = f"_{seq_type}_{attr}"
def getter(self):
return getattr(self, attr_name)
def setter(self, value):
setattr(self, attr_name, value)
seq = getattr(self, seq_type)
if seq is not None:
setattr(seq, attr, value)
return property(fget=getter, fset=setter, doc=doc) |
Remove the suffix from the string, if it exists. | def removesuffix(string, suffix):
"""Remove the suffix from the string, if it exists."""
# This method is a compatibility wrapper for Python 3.8. It should be
# removed when support for Python 3.8 is dropped. At the time of writing,
# 3.8 is already the oldest supported version.
major, minor, *_ = sys.version_info
if major == 3 and minor == 8:
if suffix and string.endswith(suffix):
return string[: -len(suffix)]
return string
return string.removesuffix(suffix) |
Iterate over search tool output file as QueryResult objects.
Arguments:
- handle - Handle to the file, or the filename as a string.
- format - Lower case string denoting one of the supported formats.
- kwargs - Format-specific keyword arguments.
This function is used to iterate over each query in a given search output
file:
>>> from Bio import SearchIO
>>> qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
>>> qresults
<generator object ...>
>>> for qresult in qresults:
... print("Search %s has %i hits" % (qresult.id, len(qresult)))
...
Search 33211 has 100 hits
Search 33212 has 44 hits
Search 33213 has 95 hits
Depending on the file format, ``parse`` may also accept additional keyword
argument(s) that modifies the behavior of the format parser. Here is a
simple example, where the keyword argument enables parsing of a commented
BLAST tabular output file:
>>> from Bio import SearchIO
>>> for qresult in SearchIO.parse('Blast/mirna.tab', 'blast-tab', comments=True):
... print("Search %s has %i hits" % (qresult.id, len(qresult)))
...
Search 33211 has 100 hits
Search 33212 has 44 hits
Search 33213 has 95 hits | def parse(handle, format=None, **kwargs):
"""Iterate over search tool output file as QueryResult objects.
Arguments:
- handle - Handle to the file, or the filename as a string.
- format - Lower case string denoting one of the supported formats.
- kwargs - Format-specific keyword arguments.
This function is used to iterate over each query in a given search output
file:
>>> from Bio import SearchIO
>>> qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
>>> qresults
<generator object ...>
>>> for qresult in qresults:
... print("Search %s has %i hits" % (qresult.id, len(qresult)))
...
Search 33211 has 100 hits
Search 33212 has 44 hits
Search 33213 has 95 hits
Depending on the file format, ``parse`` may also accept additional keyword
argument(s) that modifies the behavior of the format parser. Here is a
simple example, where the keyword argument enables parsing of a commented
BLAST tabular output file:
>>> from Bio import SearchIO
>>> for qresult in SearchIO.parse('Blast/mirna.tab', 'blast-tab', comments=True):
... print("Search %s has %i hits" % (qresult.id, len(qresult)))
...
Search 33211 has 100 hits
Search 33212 has 44 hits
Search 33213 has 95 hits
"""
# get the iterator object and do error checking
iterator = get_processor(format, _ITERATOR_MAP)
# HACK: force BLAST XML decoding to use utf-8
handle_kwargs = {}
if format == "blast-xml":
handle_kwargs["encoding"] = "utf-8"
# and start iterating
with as_handle(handle, **handle_kwargs) as source_file:
generator = iterator(source_file, **kwargs)
yield from generator |
Turn a search output file containing one query into a single QueryResult.
- handle - Handle to the file, or the filename as a string.
- format - Lower case string denoting one of the supported formats.
- kwargs - Format-specific keyword arguments.
``read`` is used for parsing search output files containing exactly one query:
>>> from Bio import SearchIO
>>> qresult = SearchIO.read('Blast/xml_2226_blastp_004.xml', 'blast-xml')
>>> print("%s %s" % (qresult.id, qresult.description))
...
gi|11464971:4-101 pleckstrin [Mus musculus]
If the given handle has no results, an exception will be raised:
>>> from Bio import SearchIO
>>> qresult = SearchIO.read('Blast/tab_2226_tblastn_002.txt', 'blast-tab')
Traceback (most recent call last):
...
ValueError: No query results found in handle
Similarly, if the given handle has more than one result, an exception will
be raised:
>>> from Bio import SearchIO
>>> qresult = SearchIO.read('Blast/tab_2226_tblastn_001.txt', 'blast-tab')
Traceback (most recent call last):
...
ValueError: More than one query result found in handle
Like ``parse``, ``read`` may also accept keyword argument(s) depending on the
search output file format. | def read(handle, format=None, **kwargs):
"""Turn a search output file containing one query into a single QueryResult.
- handle - Handle to the file, or the filename as a string.
- format - Lower case string denoting one of the supported formats.
- kwargs - Format-specific keyword arguments.
``read`` is used for parsing search output files containing exactly one query:
>>> from Bio import SearchIO
>>> qresult = SearchIO.read('Blast/xml_2226_blastp_004.xml', 'blast-xml')
>>> print("%s %s" % (qresult.id, qresult.description))
...
gi|11464971:4-101 pleckstrin [Mus musculus]
If the given handle has no results, an exception will be raised:
>>> from Bio import SearchIO
>>> qresult = SearchIO.read('Blast/tab_2226_tblastn_002.txt', 'blast-tab')
Traceback (most recent call last):
...
ValueError: No query results found in handle
Similarly, if the given handle has more than one result, an exception will
be raised:
>>> from Bio import SearchIO
>>> qresult = SearchIO.read('Blast/tab_2226_tblastn_001.txt', 'blast-tab')
Traceback (most recent call last):
...
ValueError: More than one query result found in handle
Like ``parse``, ``read`` may also accept keyword argument(s) depending on the
search output file format.
"""
query_results = parse(handle, format, **kwargs)
try:
query_result = next(query_results)
except StopIteration:
raise ValueError("No query results found in handle") from None
try:
next(query_results)
raise ValueError("More than one query result found in handle")
except StopIteration:
pass
return query_result |
Turn a QueryResult iterator or list into a dictionary.
- qresults - Iterable returning QueryResult objects.
- key_function - Optional callback function which when given a
QueryResult object should return a unique key for the
dictionary. Defaults to using .id of the result.
This function enables access of QueryResult objects from a single search
output file using its identifier.
>>> from Bio import SearchIO
>>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml')
>>> search_dict = SearchIO.to_dict(qresults)
>>> list(search_dict)
['gi|195230749:301-1383', 'gi|325053704:108-1166', ..., 'gi|53729353:216-1313']
>>> search_dict['gi|156630997:105-1160']
QueryResult(id='gi|156630997:105-1160', 5 hits)
By default, the dictionary key is the QueryResult's string ID. This may be
changed by supplying a callback function that returns the desired identifier.
Here is an example using a function that removes the 'gi|' part in the
beginning of the QueryResult ID.
>>> from Bio import SearchIO
>>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml')
>>> key_func = lambda qresult: qresult.id.split('|')[1]
>>> search_dict = SearchIO.to_dict(qresults, key_func)
>>> list(search_dict)
['195230749:301-1383', '325053704:108-1166', ..., '53729353:216-1313']
>>> search_dict['156630997:105-1160']
QueryResult(id='gi|156630997:105-1160', 5 hits)
Note that the callback function does not change the QueryResult's ID value.
It only changes the key value used to retrieve the associated QueryResult.
As this function loads all QueryResult objects into memory, it may be
unsuitable for dealing with files containing many queries. In that case, it
is recommended that you use either ``index`` or ``index_db``.
Since Python 3.7, the default dict class maintains key order, meaning
this dictionary will reflect the order of records given to it. For
CPython and PyPy, this was already implemented for Python 3.6, so
effectively you can always assume the record order is preserved. | def to_dict(qresults, key_function=None):
"""Turn a QueryResult iterator or list into a dictionary.
- qresults - Iterable returning QueryResult objects.
- key_function - Optional callback function which when given a
QueryResult object should return a unique key for the
dictionary. Defaults to using .id of the result.
This function enables access of QueryResult objects from a single search
output file using its identifier.
>>> from Bio import SearchIO
>>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml')
>>> search_dict = SearchIO.to_dict(qresults)
>>> list(search_dict)
['gi|195230749:301-1383', 'gi|325053704:108-1166', ..., 'gi|53729353:216-1313']
>>> search_dict['gi|156630997:105-1160']
QueryResult(id='gi|156630997:105-1160', 5 hits)
By default, the dictionary key is the QueryResult's string ID. This may be
changed by supplying a callback function that returns the desired identifier.
Here is an example using a function that removes the 'gi|' part in the
beginning of the QueryResult ID.
>>> from Bio import SearchIO
>>> qresults = SearchIO.parse('Blast/wnts.xml', 'blast-xml')
>>> key_func = lambda qresult: qresult.id.split('|')[1]
>>> search_dict = SearchIO.to_dict(qresults, key_func)
>>> list(search_dict)
['195230749:301-1383', '325053704:108-1166', ..., '53729353:216-1313']
>>> search_dict['156630997:105-1160']
QueryResult(id='gi|156630997:105-1160', 5 hits)
Note that the callback function does not change the QueryResult's ID value.
It only changes the key value used to retrieve the associated QueryResult.
As this function loads all QueryResult objects into memory, it may be
unsuitable for dealing with files containing many queries. In that case, it
is recommended that you use either ``index`` or ``index_db``.
Since Python 3.7, the default dict class maintains key order, meaning
this dictionary will reflect the order of records given to it. For
CPython and PyPy, this was already implemented for Python 3.6, so
effectively you can always assume the record order is preserved.
"""
def _default_key_function(rec):
return rec.id
if key_function is None:
key_function = _default_key_function
qdict = {}
for qresult in qresults:
key = key_function(qresult)
if key in qdict:
raise ValueError("Duplicate key %r" % key)
qdict[key] = qresult
return qdict |
Indexes a search output file and returns a dictionary-like object.
- filename - string giving name of file to be indexed
- format - Lower case string denoting one of the supported formats.
- key_function - Optional callback function which when given a
QueryResult should return a unique key for the dictionary.
- kwargs - Format-specific keyword arguments.
Index returns a pseudo-dictionary object with QueryResult objects as its
values and a string identifier as its keys. The function is mainly useful
for dealing with large search output files, as it enables access to any
given QueryResult object much faster than using parse or read.
Index works by storing in-memory the start locations of all queries in a
file. When a user requests access to the query, this function will jump
to its start position, parse the whole query, and return it as a
QueryResult object:
>>> from Bio import SearchIO
>>> search_idx = SearchIO.index('Blast/wnts.xml', 'blast-xml')
>>> search_idx
SearchIO.index('Blast/wnts.xml', 'blast-xml', key_function=None)
>>> sorted(search_idx)
['gi|156630997:105-1160', 'gi|195230749:301-1383', ..., 'gi|53729353:216-1313']
>>> search_idx['gi|195230749:301-1383']
QueryResult(id='gi|195230749:301-1383', 5 hits)
>>> search_idx.close()
If the file is BGZF compressed, this is detected automatically. Ordinary
GZIP files are not supported:
>>> from Bio import SearchIO
>>> search_idx = SearchIO.index('Blast/wnts.xml.bgz', 'blast-xml')
>>> search_idx
SearchIO.index('Blast/wnts.xml.bgz', 'blast-xml', key_function=None)
>>> search_idx['gi|195230749:301-1383']
QueryResult(id='gi|195230749:301-1383', 5 hits)
>>> search_idx.close()
You can supply a custom callback function to alter the default identifier
string. This function should accept as its input the QueryResult ID string
and return a modified version of it.
>>> from Bio import SearchIO
>>> key_func = lambda id: id.split('|')[1]
>>> search_idx = SearchIO.index('Blast/wnts.xml', 'blast-xml', key_func)
>>> search_idx
SearchIO.index('Blast/wnts.xml', 'blast-xml', key_function=<function <lambda> at ...>)
>>> sorted(search_idx)
['156630997:105-1160', ..., '371502086:108-1205', '53729353:216-1313']
>>> search_idx['156630997:105-1160']
QueryResult(id='gi|156630997:105-1160', 5 hits)
>>> search_idx.close()
Note that the callback function does not change the QueryResult's ID value.
It only changes the key value used to retrieve the associated QueryResult. | def index(filename, format=None, key_function=None, **kwargs):
"""Indexes a search output file and returns a dictionary-like object.
- filename - string giving name of file to be indexed
- format - Lower case string denoting one of the supported formats.
- key_function - Optional callback function which when given a
QueryResult should return a unique key for the dictionary.
- kwargs - Format-specific keyword arguments.
Index returns a pseudo-dictionary object with QueryResult objects as its
values and a string identifier as its keys. The function is mainly useful
for dealing with large search output files, as it enables access to any
given QueryResult object much faster than using parse or read.
Index works by storing in-memory the start locations of all queries in a
file. When a user requests access to the query, this function will jump
to its start position, parse the whole query, and return it as a
QueryResult object:
>>> from Bio import SearchIO
>>> search_idx = SearchIO.index('Blast/wnts.xml', 'blast-xml')
>>> search_idx
SearchIO.index('Blast/wnts.xml', 'blast-xml', key_function=None)
>>> sorted(search_idx)
['gi|156630997:105-1160', 'gi|195230749:301-1383', ..., 'gi|53729353:216-1313']
>>> search_idx['gi|195230749:301-1383']
QueryResult(id='gi|195230749:301-1383', 5 hits)
>>> search_idx.close()
If the file is BGZF compressed, this is detected automatically. Ordinary
GZIP files are not supported:
>>> from Bio import SearchIO
>>> search_idx = SearchIO.index('Blast/wnts.xml.bgz', 'blast-xml')
>>> search_idx
SearchIO.index('Blast/wnts.xml.bgz', 'blast-xml', key_function=None)
>>> search_idx['gi|195230749:301-1383']
QueryResult(id='gi|195230749:301-1383', 5 hits)
>>> search_idx.close()
You can supply a custom callback function to alter the default identifier
string. This function should accept as its input the QueryResult ID string
and return a modified version of it.
>>> from Bio import SearchIO
>>> key_func = lambda id: id.split('|')[1]
>>> search_idx = SearchIO.index('Blast/wnts.xml', 'blast-xml', key_func)
>>> search_idx
SearchIO.index('Blast/wnts.xml', 'blast-xml', key_function=<function <lambda> at ...>)
>>> sorted(search_idx)
['156630997:105-1160', ..., '371502086:108-1205', '53729353:216-1313']
>>> search_idx['156630997:105-1160']
QueryResult(id='gi|156630997:105-1160', 5 hits)
>>> search_idx.close()
Note that the callback function does not change the QueryResult's ID value.
It only changes the key value used to retrieve the associated QueryResult.
"""
if not isinstance(filename, str):
raise TypeError("Need a filename (not a handle)")
from Bio.File import _IndexedSeqFileDict
proxy_class = get_processor(format, _INDEXER_MAP)
repr = f"SearchIO.index({filename!r}, {format!r}, key_function={key_function!r})"
return _IndexedSeqFileDict(
proxy_class(filename, **kwargs), key_function, repr, "QueryResult"
) |
Indexes several search output files into an SQLite database.
- index_filename - The SQLite filename.
- filenames - List of strings specifying file(s) to be indexed, or when
indexing a single file this can be given as a string.
(optional if reloading an existing index, but must match)
- format - Lower case string denoting one of the supported formats.
(optional if reloading an existing index, but must match)
- key_function - Optional callback function which when given a
QueryResult identifier string should return a unique
key for the dictionary.
- kwargs - Format-specific keyword arguments.
The ``index_db`` function is similar to ``index`` in that it indexes the start
position of all queries from search output files. The main difference is
instead of storing these indices in-memory, they are written to disk as an
SQLite database file. This allows the indices to persist between Python
sessions. This enables access to any queries in the file without any
indexing overhead, provided it has been indexed at least once.
>>> from Bio import SearchIO
>>> idx_filename = ":memory:" # Use a real filename, this is in RAM only!
>>> db_idx = SearchIO.index_db(idx_filename, 'Blast/mirna.xml', 'blast-xml')
>>> sorted(db_idx)
['33211', '33212', '33213']
>>> db_idx['33212']
QueryResult(id='33212', 44 hits)
>>> db_idx.close()
``index_db`` can also index multiple files and store them in the same
database, making it easier to group multiple search files and access them
from a single interface.
>>> from Bio import SearchIO
>>> idx_filename = ":memory:" # Use a real filename, this is in RAM only!
>>> files = ['Blast/mirna.xml', 'Blast/wnts.xml']
>>> db_idx = SearchIO.index_db(idx_filename, files, 'blast-xml')
>>> sorted(db_idx)
['33211', '33212', '33213', 'gi|156630997:105-1160', ..., 'gi|53729353:216-1313']
>>> db_idx['33212']
QueryResult(id='33212', 44 hits)
>>> db_idx.close()
One common example where this is helpful is if you had a large set of
query sequences (say ten thousand) which you split into ten query files
of one thousand sequences each in order to run as ten separate BLAST jobs
on a cluster. You could use ``index_db`` to index the ten BLAST output
files together for seamless access to all the results as one dictionary.
Note that ':memory:' rather than an index filename tells SQLite to hold
the index database in memory. This is useful for quick tests, but using
the Bio.SearchIO.index(...) function instead would use less memory.
BGZF compressed files are supported, and detected automatically. Ordinary
GZIP compressed files are not supported.
See also Bio.SearchIO.index(), Bio.SearchIO.to_dict(), and the Python module
glob which is useful for building lists of files. | def index_db(index_filename, filenames=None, format=None, key_function=None, **kwargs):
"""Indexes several search output files into an SQLite database.
- index_filename - The SQLite filename.
- filenames - List of strings specifying file(s) to be indexed, or when
indexing a single file this can be given as a string.
(optional if reloading an existing index, but must match)
- format - Lower case string denoting one of the supported formats.
(optional if reloading an existing index, but must match)
- key_function - Optional callback function which when given a
QueryResult identifier string should return a unique
key for the dictionary.
- kwargs - Format-specific keyword arguments.
The ``index_db`` function is similar to ``index`` in that it indexes the start
position of all queries from search output files. The main difference is
instead of storing these indices in-memory, they are written to disk as an
SQLite database file. This allows the indices to persist between Python
sessions. This enables access to any queries in the file without any
indexing overhead, provided it has been indexed at least once.
>>> from Bio import SearchIO
>>> idx_filename = ":memory:" # Use a real filename, this is in RAM only!
>>> db_idx = SearchIO.index_db(idx_filename, 'Blast/mirna.xml', 'blast-xml')
>>> sorted(db_idx)
['33211', '33212', '33213']
>>> db_idx['33212']
QueryResult(id='33212', 44 hits)
>>> db_idx.close()
``index_db`` can also index multiple files and store them in the same
database, making it easier to group multiple search files and access them
from a single interface.
>>> from Bio import SearchIO
>>> idx_filename = ":memory:" # Use a real filename, this is in RAM only!
>>> files = ['Blast/mirna.xml', 'Blast/wnts.xml']
>>> db_idx = SearchIO.index_db(idx_filename, files, 'blast-xml')
>>> sorted(db_idx)
['33211', '33212', '33213', 'gi|156630997:105-1160', ..., 'gi|53729353:216-1313']
>>> db_idx['33212']
QueryResult(id='33212', 44 hits)
>>> db_idx.close()
One common example where this is helpful is if you had a large set of
query sequences (say ten thousand) which you split into ten query files
of one thousand sequences each in order to run as ten separate BLAST jobs
on a cluster. You could use ``index_db`` to index the ten BLAST output
files together for seamless access to all the results as one dictionary.
Note that ':memory:' rather than an index filename tells SQLite to hold
the index database in memory. This is useful for quick tests, but using
the Bio.SearchIO.index(...) function instead would use less memory.
BGZF compressed files are supported, and detected automatically. Ordinary
GZIP compressed files are not supported.
See also Bio.SearchIO.index(), Bio.SearchIO.to_dict(), and the Python module
glob which is useful for building lists of files.
"""
# cast filenames to list if it's a string
# (can we check if it's a string or a generator?)
if isinstance(filenames, str):
filenames = [filenames]
from Bio.File import _SQLiteManySeqFilesDict
repr = f"SearchIO.index_db({index_filename!r}, filenames={filenames!r}, {format!r}, key_function={key_function!r})"
def proxy_factory(format, filename=None):
"""Given a filename returns proxy object, else boolean if format OK."""
if filename:
return get_processor(format, _INDEXER_MAP)(filename, **kwargs)
else:
return format in _INDEXER_MAP
return _SQLiteManySeqFilesDict(
index_filename, filenames, proxy_factory, format, key_function, repr
) |
Write QueryResult objects to a file in the given format.
- qresults - An iterator returning QueryResult objects or a single
QueryResult object.
- handle - Handle to the file, or the filename as a string.
- format - Lower case string denoting one of the supported formats.
- kwargs - Format-specific keyword arguments.
The ``write`` function writes QueryResult object(s) into the given output
handle / filename. You can supply it with a single QueryResult object or an
iterable returning one or more QueryResult objects. In both cases, the
function will return a tuple of four values: the number of QueryResult, Hit,
HSP, and HSPFragment objects it writes to the output file::
from Bio import SearchIO
qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
SearchIO.write(qresults, 'results.tab', 'blast-tab')
<stdout> (3, 239, 277, 277)
The output of different formats may be adjusted using the format-specific
keyword arguments. Here is an example that writes BLAT PSL output file with
a header::
from Bio import SearchIO
qresults = SearchIO.parse('Blat/psl_34_001.psl', 'blat-psl')
SearchIO.write(qresults, 'results.tab', 'blat-psl', header=True)
<stdout> (2, 13, 22, 26) | def write(qresults, handle, format=None, **kwargs):
"""Write QueryResult objects to a file in the given format.
- qresults - An iterator returning QueryResult objects or a single
QueryResult object.
- handle - Handle to the file, or the filename as a string.
- format - Lower case string denoting one of the supported formats.
- kwargs - Format-specific keyword arguments.
The ``write`` function writes QueryResult object(s) into the given output
handle / filename. You can supply it with a single QueryResult object or an
iterable returning one or more QueryResult objects. In both cases, the
function will return a tuple of four values: the number of QueryResult, Hit,
HSP, and HSPFragment objects it writes to the output file::
from Bio import SearchIO
qresults = SearchIO.parse('Blast/mirna.xml', 'blast-xml')
SearchIO.write(qresults, 'results.tab', 'blast-tab')
<stdout> (3, 239, 277, 277)
The output of different formats may be adjusted using the format-specific
keyword arguments. Here is an example that writes BLAT PSL output file with
a header::
from Bio import SearchIO
qresults = SearchIO.parse('Blat/psl_34_001.psl', 'blat-psl')
SearchIO.write(qresults, 'results.tab', 'blat-psl', header=True)
<stdout> (2, 13, 22, 26)
"""
# turn qresults into an iterator if it's a single QueryResult object
if isinstance(qresults, QueryResult):
qresults = iter([qresults])
else:
qresults = iter(qresults)
# get the writer object and do error checking
writer_class = get_processor(format, _WRITER_MAP)
# write to the handle
with as_handle(handle, "w") as target_file:
writer = writer_class(target_file, **kwargs)
# count how many qresults, hits, and hsps
qresult_count, hit_count, hsp_count, frag_count = writer.write_file(qresults)
return qresult_count, hit_count, hsp_count, frag_count |
Convert between two search output formats, return number of records.
- in_file - Handle to the input file, or the filename as string.
- in_format - Lower case string denoting the format of the input file.
- out_file - Handle to the output file, or the filename as string.
- out_format - Lower case string denoting the format of the output file.
- in_kwargs - Dictionary of keyword arguments for the input function.
- out_kwargs - Dictionary of keyword arguments for the output function.
The convert function is a shortcut function for ``parse`` and ``write``. It has
the same return type as ``write``. Format-specific arguments may be passed to
the convert function, but only as dictionaries.
Here is an example of using ``convert`` to convert from a BLAST+ XML file
into a tabular file with comments::
from Bio import SearchIO
in_file = 'Blast/mirna.xml'
in_fmt = 'blast-xml'
out_file = 'results.tab'
out_fmt = 'blast-tab'
out_kwarg = {'comments': True}
SearchIO.convert(in_file, in_fmt, out_file, out_fmt, out_kwargs=out_kwarg)
<stdout> (3, 239, 277, 277)
Given that different search output file provide different statistics and
different level of details, the convert function is limited only to
converting formats that have the same statistics and for conversion to
formats with the same level of detail, or less.
For example, converting from a BLAST+ XML output to a HMMER table file
is not possible, as these are two search programs with different kinds of
statistics. In theory, you may provide the necessary values required by the
HMMER table file (e.g. conditional e-values, envelope coordinates, etc).
However, these values are likely to hold little meaning as they are not true
HMMER-computed values.
Another example is converting from BLAST+ XML to BLAST+ tabular file. This
is possible, as BLAST+ XML provide all the values necessary to create a
BLAST+ tabular file. However, the reverse conversion may not be possible.
There are more details covered in the XML file that are not found in a
tabular file (e.g. the lambda and kappa values) | def convert(in_file, in_format, out_file, out_format, in_kwargs=None, out_kwargs=None):
"""Convert between two search output formats, return number of records.
- in_file - Handle to the input file, or the filename as string.
- in_format - Lower case string denoting the format of the input file.
- out_file - Handle to the output file, or the filename as string.
- out_format - Lower case string denoting the format of the output file.
- in_kwargs - Dictionary of keyword arguments for the input function.
- out_kwargs - Dictionary of keyword arguments for the output function.
The convert function is a shortcut function for ``parse`` and ``write``. It has
the same return type as ``write``. Format-specific arguments may be passed to
the convert function, but only as dictionaries.
Here is an example of using ``convert`` to convert from a BLAST+ XML file
into a tabular file with comments::
from Bio import SearchIO
in_file = 'Blast/mirna.xml'
in_fmt = 'blast-xml'
out_file = 'results.tab'
out_fmt = 'blast-tab'
out_kwarg = {'comments': True}
SearchIO.convert(in_file, in_fmt, out_file, out_fmt, out_kwargs=out_kwarg)
<stdout> (3, 239, 277, 277)
Given that different search output file provide different statistics and
different level of details, the convert function is limited only to
converting formats that have the same statistics and for conversion to
formats with the same level of detail, or less.
For example, converting from a BLAST+ XML output to a HMMER table file
is not possible, as these are two search programs with different kinds of
statistics. In theory, you may provide the necessary values required by the
HMMER table file (e.g. conditional e-values, envelope coordinates, etc).
However, these values are likely to hold little meaning as they are not true
HMMER-computed values.
Another example is converting from BLAST+ XML to BLAST+ tabular file. This
is possible, as BLAST+ XML provide all the values necessary to create a
BLAST+ tabular file. However, the reverse conversion may not be possible.
There are more details covered in the XML file that are not found in a
tabular file (e.g. the lambda and kappa values)
"""
if in_kwargs is None:
in_kwargs = {}
if out_kwargs is None:
out_kwargs = {}
qresults = parse(in_file, in_format, **in_kwargs)
return write(qresults, out_file, out_format, **out_kwargs) |
Return the number of gap openings in the given HSP (PRIVATE). | def _compute_gapopen_num(hsp):
"""Return the number of gap openings in the given HSP (PRIVATE)."""
gapopen = 0
for seq_type in ("query", "hit"):
seq = str(getattr(hsp, seq_type).seq)
gapopen += len(re.findall(_RE_GAPOPEN, seq))
return gapopen |
Calculate the given HSP attribute, for writing (PRIVATE). | def _augment_blast_hsp(hsp, attr):
"""Calculate the given HSP attribute, for writing (PRIVATE)."""
if not hasattr(hsp, attr) and not attr.endswith("_pct"):
# aln_span is number of identical matches + mismatches + gaps
if attr == "aln_span":
hsp.aln_span = hsp.ident_num + hsp.mismatch_num + hsp.gap_num
# ident and gap requires the num values to be computed first
elif attr.startswith("ident"):
setattr(hsp, attr, hsp.aln_span - hsp.mismatch_num - hsp.gap_num)
elif attr.startswith("gap"):
setattr(hsp, attr, hsp.aln_span - hsp.ident_num - hsp.mismatch_num)
elif attr == "mismatch_num":
setattr(hsp, attr, hsp.aln_span - hsp.ident_num - hsp.gap_num)
elif attr == "gapopen_num":
if not hasattr(hsp, "query") or not hasattr(hsp, "hit"):
raise AttributeError
hsp.gapopen_num = _compute_gapopen_num(hsp)
# if the attr is a percent value, calculate it
if attr == "ident_pct":
hsp.ident_pct = hsp.ident_num / hsp.aln_span * 100
elif attr == "pos_pct":
hsp.pos_pct = hsp.pos_num / hsp.aln_span * 100
elif attr == "gap_pct":
hsp.gap_pct = hsp.gap_num / hsp.aln_span * 100 |
Extract IDs, descriptions, and raw ID from raw values (PRIVATE).
Given values of the ``Hit_id`` and ``Hit_def`` elements, this function
returns a tuple of three elements: all IDs, all descriptions, and the
BLAST-generated ID. The BLAST-generated ID is set to ``None`` if no
BLAST-generated IDs are present. | def _extract_ids_and_descs(raw_id, raw_desc):
"""Extract IDs, descriptions, and raw ID from raw values (PRIVATE).
Given values of the ``Hit_id`` and ``Hit_def`` elements, this function
returns a tuple of three elements: all IDs, all descriptions, and the
BLAST-generated ID. The BLAST-generated ID is set to ``None`` if no
BLAST-generated IDs are present.
"""
ids = []
descs = []
blast_gen_id = raw_id
if raw_id.startswith("gnl|BL_ORD_ID|"):
id_desc_line = raw_desc
else:
id_desc_line = raw_id + " " + raw_desc
# create a list of lists, each list containing an ID and description
# or just an ID, if description is not present
id_desc_pairs = [
re.split(_RE_ID_DESC_PATTERN, x, maxsplit=1)
for x in re.split(_RE_ID_DESC_PAIRS_PATTERN, id_desc_line)
]
# make sure empty descriptions are added as empty strings
# also, we return lists for compatibility reasons between Py2 and Py3
for pair in id_desc_pairs:
if len(pair) != 2:
pair.append("")
ids.append(pair[0])
descs.append(pair[1])
return (ids, descs, blast_gen_id) |
Flips the codon characters from one seq to another (PRIVATE). | def _flip_codons(codon_seq, target_seq):
"""Flips the codon characters from one seq to another (PRIVATE)."""
a, b = "", ""
for char1, char2 in zip(codon_seq, target_seq):
# no need to do anything if the codon seq line has nothing
if char1 == " ":
a += char1
b += char2
else:
a += char2
b += char1
return a, b |
Return a list of start, end coordinates for each given block in the sequence (PRIVATE). | def _get_block_coords(parsed_seq, row_dict, has_ner=False):
"""Return a list of start, end coordinates for each given block in the sequence (PRIVATE)."""
start = 0
coords = []
if not has_ner:
splitter = _RE_EXON
else:
splitter = _RE_NER
# use the query line for reference
seq = parsed_seq[row_dict["query"]]
for block in re.split(splitter, seq):
start += seq[start:].find(block)
end = start + len(block)
coords.append((start, end))
return coords |
Return list of pairs covering intervening ranges (PRIVATE).
From the given pairs of coordinates, returns a list of pairs
covering the intervening ranges. | def _get_inter_coords(coords, strand=1):
"""Return list of pairs covering intervening ranges (PRIVATE).
From the given pairs of coordinates, returns a list of pairs
covering the intervening ranges.
"""
# adapted from Python's itertools guide
# if strand is -1, adjust coords to the ends and starts are chained
if strand == -1:
sorted_coords = [(max(a, b), min(a, b)) for a, b in coords]
inter_coords = list(chain(*sorted_coords))[1:-1]
return list(zip(inter_coords[1::2], inter_coords[::2]))
else:
inter_coords = list(chain(*coords))[1:-1]
return list(zip(inter_coords[::2], inter_coords[1::2])) |
Stitches together the parsed alignment rows and returns them in a list (PRIVATE). | def _stitch_rows(raw_rows):
"""Stitches together the parsed alignment rows and returns them in a list (PRIVATE)."""
# deal with possible codon surprise!
# (i.e. alignments with codons using cdna2genome model)
# by creating additional rows to contain the codons
try:
max_len = max(len(x) for x in raw_rows)
for row in raw_rows:
assert len(row) == max_len
except AssertionError:
for idx, row in enumerate(raw_rows):
if len(row) != max_len:
# codons must be present in the query and hit (so +2)
assert len(row) + 2 == max_len
# add additional empty lines to contain codons
raw_rows[idx] = [" " * len(row[0])] + row + [" " * len(row[0])]
cmbn_rows = []
for idx, row in enumerate(raw_rows[0]):
cmbn_row = "".join(aln_row[idx] for aln_row in raw_rows)
cmbn_rows.append(cmbn_row)
# the real aligned sequence is always the 'outer' one, so we want
# to flip them with their 'inner' pairs
if len(cmbn_rows) == 5:
# flip query sequence
cmbn_rows[0], cmbn_rows[1] = _flip_codons(cmbn_rows[0], cmbn_rows[1])
# flip hit sequence
cmbn_rows[4], cmbn_rows[3] = _flip_codons(cmbn_rows[4], cmbn_rows[3])
return cmbn_rows |
Return a dictionary of row indices for parsing alignment blocks (PRIVATE). | def _get_row_dict(row_len, model):
"""Return a dictionary of row indices for parsing alignment blocks (PRIVATE)."""
idx = {}
# 3 lines, usually in dna vs dna models
if row_len == 3:
idx["query"] = 0
idx["midline"] = 1
idx["hit"] = 2
idx["qannot"], idx["hannot"] = None, None
# 4 lines, in protein vs dna models or dna vs protein models
# TODO: currently we check this from the model string; is there
# a better way to do it?
elif row_len == 4:
if "protein2" in model:
idx["query"] = 0
idx["midline"] = 1
idx["hit"] = 2
idx["hannot"] = 3
idx["qannot"] = None
elif "2protein" in model:
idx["query"] = 1
idx["midline"] = 2
idx["hit"] = 3
idx["hannot"] = None
idx["qannot"] = 0
else:
raise ValueError("Unexpected model: " + model)
# 5 lines, translated dna vs translated dna
elif row_len == 5:
# set sequence indexes
idx["qannot"] = 0
idx["query"] = 1
idx["midline"] = 2
idx["hit"] = 3
idx["hannot"] = 4
else:
raise ValueError("Unexpected row count in alignment block: %i" % row_len)
return idx |
Return a list of dictionaries of sequences split by the coordinates (PRIVATE). | def _get_blocks(rows, coords, idx):
"""Return a list of dictionaries of sequences split by the coordinates (PRIVATE)."""
for idx_name in ("query", "hit", "midline", "qannot", "hannot"):
assert idx_name in idx
blocks = []
for start, end in coords:
block = {}
# get seqs according to index
block["query"] = rows[idx["query"]][start:end]
block["hit"] = rows[idx["hit"]][start:end]
block["similarity"] = rows[idx["midline"]][start:end]
if idx["qannot"] is not None:
block["query_annotation"] = rows[idx["qannot"]][start:end]
if idx["hannot"] is not None:
block["hit_annotation"] = rows[idx["hannot"]][start:end]
blocks.append(block)
return blocks |
Get a dictionary of split codon locations relative to each fragment end (PRIVATE). | def _get_scodon_moves(tmp_seq_blocks):
"""Get a dictionary of split codon locations relative to each fragment end (PRIVATE)."""
scodon_moves = {"query": [], "hit": []}
for seq_type in scodon_moves:
scoords = []
for block in tmp_seq_blocks:
# check both ends of the sequence for residues in curly braces
m_start = re.search(_RE_SCODON_START, block[seq_type])
m_end = re.search(_RE_SCODON_END, block[seq_type])
if m_start:
m_start = len(m_start.group(1))
scoords.append((m_start, 0))
else:
scoords.append((0, 0))
if m_end:
m_end = len(m_end.group(1))
scoords.append((0, m_end))
else:
scoords.append((0, 0))
scodon_moves[seq_type] = scoords
return scodon_moves |
Remove curly braces (split codon markers) from the given sequences (PRIVATE). | def _clean_blocks(tmp_seq_blocks):
"""Remove curly braces (split codon markers) from the given sequences (PRIVATE)."""
seq_blocks = []
for seq_block in tmp_seq_blocks:
for line_name in seq_block:
seq_block[line_name] = (
seq_block[line_name].replace("{", "").replace("}", "")
)
seq_blocks.append(seq_block)
return seq_blocks |
Return the length of introns between fragments (PRIVATE). | def _comp_intron_lens(seq_type, inter_blocks, raw_inter_lens):
"""Return the length of introns between fragments (PRIVATE)."""
# set opposite type, for setting introns
opp_type = "hit" if seq_type == "query" else "query"
# list of flags to denote if an intron follows a block
# it reads e.g. this line:
# "ATGTT{TT} >>>> Target Intron 1 >>>> {G}TGTGTGTACATT"
# and sets the opposing sequence type's intron (since this
# line is present on the opposite sequence type line)
has_intron_after = ["Intron" in x[seq_type] for x in inter_blocks]
assert len(has_intron_after) == len(raw_inter_lens)
# create list containing coord adjustments incorporating
# intron lengths
inter_lens = []
for flag, parsed_len in zip(has_intron_after, raw_inter_lens):
if flag:
# joint introns
if all(parsed_len[:2]):
# intron len is [0] if opp_type is query, otherwise it's [1]
intron_len = (
int(parsed_len[0]) if opp_type == "query" else int(parsed_len[1])
)
# single hit/query introns
elif parsed_len[2]:
intron_len = int(parsed_len[2])
else:
raise ValueError("Unexpected intron parsing result: %r" % parsed_len)
else:
intron_len = 0
inter_lens.append(intron_len)
return inter_lens |
Fill the block coordinates of the given hsp dictionary (PRIVATE). | def _comp_coords(hsp, seq_type, inter_lens):
"""Fill the block coordinates of the given hsp dictionary (PRIVATE)."""
assert seq_type in ("hit", "query")
# manually fill the first coord
seq_step = 1 if hsp["%s_strand" % seq_type] >= 0 else -1
fstart = hsp["%s_start" % seq_type]
# fend is fstart + number of residues in the sequence, minus gaps
fend = (
fstart
+ len(hsp[seq_type][0].replace("-", "").replace(">", "").replace("<", ""))
* seq_step
)
coords = [(fstart, fend)]
# and start from the second block, after the first inter seq
for idx, block in enumerate(hsp[seq_type][1:]):
bstart = coords[-1][1] + inter_lens[idx] * seq_step
bend = bstart + seq_step * len(block.replace("-", ""))
coords.append((bstart, bend))
# adjust the coords so the smallest is [0], if strand is -1
# couldn't do this in the previous steps since we need the initial
# block ordering
if seq_step != 1:
for idx, coord in enumerate(coords):
coords[idx] = coords[idx][1], coords[idx][0]
return coords |
Compute positions of split codons, store in given HSP dictionary (PRIVATE). | def _comp_split_codons(hsp, seq_type, scodon_moves):
"""Compute positions of split codons, store in given HSP dictionary (PRIVATE)."""
scodons = []
for idx in range(len(scodon_moves[seq_type])):
pair = scodon_moves[seq_type][idx]
if not any(pair):
continue
else:
assert not all(pair)
a, b = pair
anchor_pair = hsp["%s_ranges" % seq_type][idx // 2]
strand = 1 if hsp["%s_strand" % seq_type] >= 0 else -1
if a:
func = max if strand == 1 else min
anchor = func(anchor_pair)
start_c, end_c = anchor + a * strand * -1, anchor
elif b:
func = min if strand == 1 else max
anchor = func(anchor_pair)
start_c, end_c = anchor + b * strand, anchor
scodons.append((min(start_c, end_c), max(start_c, end_c)))
return scodons |
Parse the vulgar components present in the hsp dictionary. | def parse_vulgar_comp(hsp, vulgar_comp):
"""Parse the vulgar components present in the hsp dictionary."""
# containers for block coordinates
qstarts = [hsp["query_start"]]
qends = []
hstarts = [hsp["hit_start"]]
hends = []
# containers for split codons
hsp["query_split_codons"] = []
hsp["hit_split_codons"] = []
# containers for ner blocks
hsp["query_ner_ranges"] = []
hsp["hit_ner_ranges"] = []
# sentinels for tracking query and hit positions
qpos = hsp["query_start"]
hpos = hsp["hit_start"]
# multiplier for determining sentinel movement
qmove = 1 if hsp["query_strand"] >= 0 else -1
hmove = 1 if hsp["hit_strand"] >= 0 else -1
vcomps = re.findall(_RE_VCOMP, vulgar_comp)
for idx, match in enumerate(vcomps):
label, qstep, hstep = match[0], int(match[1]), int(match[2])
# check for label, must be recognized
assert label in "MCGF53INS", "Unexpected vulgar label: %r" % label
# match, codon, or gaps
if label in "MCGS":
# if the previous comp is not an MCGS block, it's the
# start of a new block
if vcomps[idx - 1][0] not in "MCGS":
qstarts.append(qpos)
hstarts.append(hpos)
# other labels
# store the values in the hsp dict as a tuple of (start, stop)
# we're not doing anything if the label is in '53IN', as these
# basically tell us what the inter-block coordinates are and
# inter-block coordinates are automatically calculated by
# and HSP property
if label == "S":
# get start and stop from parsed values
qstart, hstart = qpos, hpos
qend = qstart + qstep * qmove
hend = hstart + hstep * hmove
# adjust the start-stop ranges
sqstart, sqend = min(qstart, qend), max(qstart, qend)
shstart, shend = min(hstart, hend), max(hstart, hend)
# split codons
# XXX: is it possible to have a frameshift that introduces
# a codon split? If so, this may need a different treatment..
hsp["query_split_codons"].append((sqstart, sqend))
hsp["hit_split_codons"].append((shstart, shend))
# move sentinels accordingly
qpos += qstep * qmove
hpos += hstep * hmove
# append to ends if the next comp is not an MCGS block or
# if it's the last comp
if idx == len(vcomps) - 1 or (
label in "MCGS" and vcomps[idx + 1][0] not in "MCGS"
):
qends.append(qpos)
hends.append(hpos)
# adjust coordinates
for seq_type in ("query_", "hit_"):
strand = hsp[seq_type + "strand"]
# switch coordinates if strand is < 0
if strand < 0:
# switch the starts and ends
hsp[seq_type + "start"], hsp[seq_type + "end"] = (
hsp[seq_type + "end"],
hsp[seq_type + "start"],
)
if seq_type == "query_":
qstarts, qends = qends, qstarts
else:
hstarts, hends = hends, hstarts
# set start and end ranges
hsp["query_ranges"] = list(zip(qstarts, qends))
hsp["hit_ranges"] = list(zip(hstarts, hends))
return hsp |
Set the HSPFragment frames (PRIVATE). | def _set_frame(frag):
"""Set the HSPFragment frames (PRIVATE)."""
frag.hit_frame = (frag.hit_start % 3 + 1) * frag.hit_strand
frag.query_frame = (frag.query_start % 3 + 1) * frag.query_strand |
Select a valid amino acid sequence given a 3-letter code input (PRIVATE).
This function takes a single three-letter amino acid sequence and the phase
of the sequence to return the longest intact amino acid sequence possible.
Parts of the input sequence before and after the selected sequence are also
returned.
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _make_triplets
>>> _make_triplets('GlyThrSerAlaPro')
('', ['Gly', 'Thr', 'Ser', 'Ala', 'Pro'], '')
>>> _make_triplets('yThrSerAla', phase=1)
('y', ['Thr', 'Ser', 'Ala'], '')
>>> _make_triplets('yThrSerAlaPr', phase=1)
('y', ['Thr', 'Ser', 'Ala'], 'Pr') | def _make_triplets(seq, phase=0):
"""Select a valid amino acid sequence given a 3-letter code input (PRIVATE).
This function takes a single three-letter amino acid sequence and the phase
of the sequence to return the longest intact amino acid sequence possible.
Parts of the input sequence before and after the selected sequence are also
returned.
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _make_triplets
>>> _make_triplets('GlyThrSerAlaPro')
('', ['Gly', 'Thr', 'Ser', 'Ala', 'Pro'], '')
>>> _make_triplets('yThrSerAla', phase=1)
('y', ['Thr', 'Ser', 'Ala'], '')
>>> _make_triplets('yThrSerAlaPr', phase=1)
('y', ['Thr', 'Ser', 'Ala'], 'Pr')
"""
pre = seq[:phase]
np_seq = seq[phase:]
non_triplets = len(np_seq) % 3
post = "" if not non_triplets else np_seq[-1 * non_triplets :]
intacts = [np_seq[3 * i : 3 * (i + 1)] for i in range(len(np_seq) // 3)]
return pre, intacts, post |
Return the letter coordinate of the given list of fragments (PRIVATE).
This function takes a list of three-letter amino acid sequences and
returns a list of coordinates for each fragment had all the input
sequences been flattened.
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _get_fragments_coord
>>> _get_fragments_coord(['Thr', 'Ser', 'Ala'])
[0, 3, 6]
>>> _get_fragments_coord(['Thr', 'SerAlaPro', 'GlyLeu'])
[0, 3, 12]
>>> _get_fragments_coord(['Thr', 'SerAlaPro', 'GlyLeu', 'Cys'])
[0, 3, 12, 18] | def _get_fragments_coord(frags):
"""Return the letter coordinate of the given list of fragments (PRIVATE).
This function takes a list of three-letter amino acid sequences and
returns a list of coordinates for each fragment had all the input
sequences been flattened.
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _get_fragments_coord
>>> _get_fragments_coord(['Thr', 'Ser', 'Ala'])
[0, 3, 6]
>>> _get_fragments_coord(['Thr', 'SerAlaPro', 'GlyLeu'])
[0, 3, 12]
>>> _get_fragments_coord(['Thr', 'SerAlaPro', 'GlyLeu', 'Cys'])
[0, 3, 12, 18]
"""
if not frags:
return []
# first fragment always starts from position 0
init = [0]
return reduce(lambda acc, frag: acc + [acc[-1] + len(frag)], frags[:-1], init) |
Return the phases of the given list of 3-letter amino acid fragments (PRIVATE).
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _get_fragments_phase
>>> _get_fragments_phase(['Thr', 'Ser', 'Ala'])
[0, 0, 0]
>>> _get_fragments_phase(['ThrSe', 'rAla'])
[0, 1]
>>> _get_fragments_phase(['ThrSe', 'rAlaLeu', 'ProCys'])
[0, 1, 0]
>>> _get_fragments_phase(['ThrSe', 'rAlaLeuP', 'roCys'])
[0, 1, 2]
>>> _get_fragments_phase(['ThrSe', 'rAlaLeuPr', 'oCys'])
[0, 1, 1] | def _get_fragments_phase(frags):
"""Return the phases of the given list of 3-letter amino acid fragments (PRIVATE).
This is an internal private function and is meant for parsing Exonerate's
three-letter amino acid output.
>>> from Bio.SearchIO.ExonerateIO._base import _get_fragments_phase
>>> _get_fragments_phase(['Thr', 'Ser', 'Ala'])
[0, 0, 0]
>>> _get_fragments_phase(['ThrSe', 'rAla'])
[0, 1]
>>> _get_fragments_phase(['ThrSe', 'rAlaLeu', 'ProCys'])
[0, 1, 0]
>>> _get_fragments_phase(['ThrSe', 'rAlaLeuP', 'roCys'])
[0, 1, 2]
>>> _get_fragments_phase(['ThrSe', 'rAlaLeuPr', 'oCys'])
[0, 1, 1]
"""
return [(3 - (x % 3)) % 3 for x in _get_fragments_coord(frags)] |
Transform 3-letter AA codes of input fragments to one-letter codes (PRIVATE).
Argument fraglist should be a list of HSPFragments objects. | def _adjust_aa_seq(fraglist):
"""Transform 3-letter AA codes of input fragments to one-letter codes (PRIVATE).
Argument fraglist should be a list of HSPFragments objects.
"""
custom_map = {"***": "*", "<->": "-"}
hsp_hstart = fraglist[0].hit_start
hsp_qstart = fraglist[0].query_start
frag_phases = _get_fragments_phase(fraglist)
for frag, phase in zip(fraglist, frag_phases):
assert frag.query_strand == 0 or frag.hit_strand == 0
# hit step may be -1 as we're aligning to DNA
hstep = 1 if frag.hit_strand >= 0 else -1
# set fragment phase
frag.phase = phase
# fragment should have a length that is a multiple of 3
# assert len(frag) % 3 == 0
qseq = str(frag.query.seq)
q_triplets_pre, q_triplets, q_triplets_post = _make_triplets(qseq, phase)
hseq = str(frag.hit.seq)
h_triplets_pre, h_triplets, h_triplets_post = _make_triplets(hseq, phase)
# get one letter codes
# and replace gap codon markers and termination characters
hseq1_pre = "X" if h_triplets_pre else ""
hseq1_post = "X" if h_triplets_post else ""
hseq1 = seq1("".join(h_triplets), custom_map=custom_map)
hstart = hsp_hstart + (len(hseq1_pre) * hstep)
hend = hstart + len(hseq1.replace("-", "")) * hstep
qseq1_pre = "X" if q_triplets_pre else ""
qseq1_post = "X" if q_triplets_post else ""
qseq1 = seq1("".join(q_triplets), custom_map=custom_map)
qstart = hsp_qstart + len(qseq1_pre)
qend = qstart + len(qseq1.replace("-", ""))
# replace the old frag sequences with the new ones
frag.hit = None
frag.query = None
frag.hit = hseq1_pre + hseq1 + hseq1_post
frag.query = qseq1_pre + qseq1 + qseq1_post
# set coordinates for the protein sequence
if frag.query_strand == 0:
frag.query_start, frag.query_end = qstart, qend
elif frag.hit_strand == 0:
frag.hit_start, frag.hit_end = hstart, hend
# update alignment annotation
# by turning them into list of triplets
for annot, annotseq in frag.aln_annotation.items():
pre, intact, post = _make_triplets(annotseq, phase)
frag.aln_annotation[annot] = (
list(filter(None, [pre])) + intact + list(filter(None, [post]))
)
# update values for next iteration
hsp_hstart, hsp_qstart = hend, qend
return fraglist |
Split one HSPFragment containing frame-shifted alignment into two (PRIVATE). | def _split_fragment(frag):
"""Split one HSPFragment containing frame-shifted alignment into two (PRIVATE)."""
# given an HSPFragment object with frameshift(s), this method splits it
# into fragments without frameshifts by sequentially chopping it off
# starting from the beginning
simil = frag.aln_annotation["similarity"]
# we should have at least 1 frame shift for splitting
assert simil.count("#") > 0
split_frags = []
qstep = 1 if frag.query_strand >= 0 else -1
hstep = 1 if frag.hit_strand >= 0 else -1
qpos = min(frag.query_range) if qstep >= 0 else max(frag.query_range)
hpos = min(frag.hit_range) if hstep >= 0 else max(frag.hit_range)
abs_pos = 0
# split according to hit, then query
while simil:
try:
shifts = re.search(_RE_SHIFTS, simil).group(1)
s_start = simil.find(shifts)
s_stop = s_start + len(shifts)
split = frag[abs_pos : abs_pos + s_start]
except AttributeError: # no '#' in simil, i.e. last frag
shifts = ""
s_start = 0
s_stop = len(simil)
split = frag[abs_pos:]
# coordinates for the split strand
qstart, hstart = qpos, hpos
qpos += (
len(split) - sum(split.query.seq.count(x) for x in ("-", "<", ">"))
) * qstep
hpos += (
len(split) - sum(split.hit.seq.count(x) for x in ("-", "<", ">"))
) * hstep
split.hit_start = min(hstart, hpos)
split.query_start = min(qstart, qpos)
split.hit_end = max(hstart, hpos)
split.query_end = max(qstart, qpos)
# account for frameshift length
abs_slice = slice(abs_pos + s_start, abs_pos + s_stop)
if len(frag.aln_annotation) == 2:
seqs = (frag[abs_slice].query.seq, frag[abs_slice].hit.seq)
elif len(frag.aln_annotation) == 3:
seqs = (
frag[abs_slice].aln_annotation["query_annotation"],
frag[abs_slice].aln_annotation["hit_annotation"],
)
if "#" in seqs[0]:
qpos += len(shifts) * qstep
elif "#" in seqs[1]:
hpos += len(shifts) * hstep
# set frame
_set_frame(split)
split_frags.append(split)
# set similarity string and absolute position for the next loop
simil = simil[s_stop:]
abs_pos += s_stop
return split_frags |
Return a list of HSP objects from the given parsed HSP values (PRIVATE). | def _create_hsp(hid, qid, hspd):
"""Return a list of HSP objects from the given parsed HSP values (PRIVATE)."""
frags = []
# we are iterating over query_ranges, but hit_ranges works just as well
for idx, qcoords in enumerate(hspd["query_ranges"]):
# get sequences, create object
hseqlist = hspd.get("hit")
hseq = "" if hseqlist is None else hseqlist[idx]
qseqlist = hspd.get("query")
qseq = "" if qseqlist is None else qseqlist[idx]
frag = HSPFragment(hid, qid, hit=hseq, query=qseq)
# coordinates
frag.query_start = qcoords[0]
frag.query_end = qcoords[1]
frag.hit_start = hspd["hit_ranges"][idx][0]
frag.hit_end = hspd["hit_ranges"][idx][1]
# alignment annotation
try:
aln_annot = hspd.get("aln_annotation", {})
for key, value in aln_annot.items():
frag.aln_annotation[key] = value[idx]
except IndexError:
pass
# strands
frag.query_strand = hspd["query_strand"]
frag.hit_strand = hspd["hit_strand"]
# and append the hsp object to the list
if frag.aln_annotation.get("similarity") is not None:
if "#" in frag.aln_annotation["similarity"]:
frags.extend(_split_fragment(frag))
continue
# try to set frame if there are translation in the alignment
if (
len(frag.aln_annotation) > 1
or frag.query_strand == 0
or ("vulgar_comp" in hspd and re.search(_RE_TRANS, hspd["vulgar_comp"]))
):
_set_frame(frag)
frags.append(frag)
# if the query is protein, we need to change the hit and query sequences
# from three-letter amino acid codes to one letter, and adjust their
# coordinates accordingly
if len(frags[0].aln_annotation) == 2: # 2 annotations == protein query
frags = _adjust_aa_seq(frags)
hsp = HSP(frags)
# set hsp-specific attributes
for attr in (
"score",
"hit_split_codons",
"query_split_codons",
"model",
"vulgar_comp",
"cigar_comp",
"molecule_type",
):
if attr in hspd:
setattr(hsp, attr, hspd[attr])
return hsp |
Parse the 'Query:' line of exonerate alignment outputs (PRIVATE). | def _parse_hit_or_query_line(line):
"""Parse the 'Query:' line of exonerate alignment outputs (PRIVATE)."""
try:
mark, id, desc = line.split(" ", 2)
except ValueError: # no desc
mark, id = line.split(" ", 1)
desc = ""
return id, desc |
Determine the strand from the description (PRIVATE).
Exonerate appends ``:[revcomp]`` (versions <= 2.2) or ``[revcomp]``
(versions > 2.2) to the query and/or hit description string. This function
outputs '-' if the description has such modifications or '+' if not. If the
query and/or hit is a protein sequence, a '.' is output instead.
Aside from the strand, the input description value is also returned. It is
returned unmodified if ``modify_desc`` is ``False``. Otherwise, the appended
``:[revcomp]`` or ``[revcomp]`` is removed. | def _get_strand_from_desc(desc, is_protein, modify_desc=True):
"""Determine the strand from the description (PRIVATE).
Exonerate appends ``:[revcomp]`` (versions <= 2.2) or ``[revcomp]``
(versions > 2.2) to the query and/or hit description string. This function
outputs '-' if the description has such modifications or '+' if not. If the
query and/or hit is a protein sequence, a '.' is output instead.
Aside from the strand, the input description value is also returned. It is
returned unmodified if ``modify_desc`` is ``False``. Otherwise, the appended
``:[revcomp]`` or ``[revcomp]`` is removed.
"""
if is_protein:
return ".", desc
suffix = ""
if desc.endswith("[revcomp]"):
suffix = ":[revcomp]" if desc.endswith(":[revcomp]") else "[revcomp]"
if not suffix:
return "+", desc
if modify_desc:
return "-", desc[: -len(suffix)]
return "-", desc |
Map hit to its identifier (PRIVATE).
Default hit key function for QueryResult.__init__ use. | def _hit_key_func(hit):
"""Map hit to its identifier (PRIVATE).
Default hit key function for QueryResult.__init__ use.
"""
return hit.id |
Return the string value of the given an optional raw bytes tag value.
If the bytes value is None, return the given default value. | def _get_string_tag(opt_bytes_value, default=None):
"""Return the string value of the given an optional raw bytes tag value.
If the bytes value is None, return the given default value.
"""
if opt_bytes_value is None:
return default
try:
return opt_bytes_value.decode()
except UnicodeDecodeError:
return opt_bytes_value.decode(encoding=sys.getdefaultencoding()) |
Return an iterator for the Abi file format that yields trimmed SeqRecord objects (PRIVATE). | def _AbiTrimIterator(handle):
"""Return an iterator for the Abi file format that yields trimmed SeqRecord objects (PRIVATE)."""
return AbiIterator(handle, trim=True) |
Return directory contents (PRIVATE). | def _abi_parse_header(header, handle):
"""Return directory contents (PRIVATE)."""
# header structure (after ABIF marker):
# file version, tag name, tag number,
# element type code, element size, number of elements
# data size, data offset, handle (not file handle)
head_elem_size = header[4]
head_elem_num = header[5]
head_offset = header[7]
index = 0
while index < head_elem_num:
start = head_offset + index * head_elem_size
# add directory offset to tuple
# to handle directories with data size <= 4 bytes
handle.seek(start)
dir_entry = struct.unpack(_DIRFMT, handle.read(struct.calcsize(_DIRFMT))) + (
start,
)
index += 1
# only parse desired dirs
key = dir_entry[0].decode()
key += str(dir_entry[1])
tag_name = dir_entry[0].decode()
tag_number = dir_entry[1]
elem_code = dir_entry[2]
elem_num = dir_entry[4]
data_size = dir_entry[5]
data_offset = dir_entry[6]
tag_offset = dir_entry[8]
# if data size <= 4 bytes, data is stored inside tag
# so offset needs to be changed
if data_size <= 4:
data_offset = tag_offset + 20
handle.seek(data_offset)
data = handle.read(data_size)
yield tag_name, tag_number, _parse_tag_data(elem_code, elem_num, data) |
Trims the sequence using Richard Mott's modified trimming algorithm (PRIVATE).
Arguments:
- seq_record - SeqRecord object to be trimmed.
Trimmed bases are determined from their segment score, which is a
cumulative sum of each base's score. Base scores are calculated from
their quality values.
More about the trimming algorithm:
http://www.phrap.org/phredphrap/phred.html
http://resources.qiagenbioinformatics.com/manuals/clcgenomicsworkbench/650/Quality_trimming.html | def _abi_trim(seq_record):
"""Trims the sequence using Richard Mott's modified trimming algorithm (PRIVATE).
Arguments:
- seq_record - SeqRecord object to be trimmed.
Trimmed bases are determined from their segment score, which is a
cumulative sum of each base's score. Base scores are calculated from
their quality values.
More about the trimming algorithm:
http://www.phrap.org/phredphrap/phred.html
http://resources.qiagenbioinformatics.com/manuals/clcgenomicsworkbench/650/Quality_trimming.html
"""
start = False # flag for starting position of trimmed sequence
segment = 20 # minimum sequence length
trim_start = 0 # init start index
cutoff = 0.05 # default cutoff value for calculating base score
if len(seq_record) <= segment:
return seq_record
else:
# calculate base score
score_list = [
cutoff - (10 ** (qual / -10.0))
for qual in seq_record.letter_annotations["phred_quality"]
]
# calculate cumulative score
# if cumulative value < 0, set it to 0
# first value is set to 0, because of the assumption that
# the first base will always be trimmed out
cummul_score = [0]
for i in range(1, len(score_list)):
score = cummul_score[-1] + score_list[i]
if score < 0:
cummul_score.append(0)
else:
cummul_score.append(score)
if not start:
# trim_start = value when cumulative score is first > 0
trim_start = i
start = True
# trim_finish = index of highest cumulative score,
# marking the end of sequence segment with highest cumulative score
trim_finish = cummul_score.index(max(cummul_score))
return seq_record[trim_start:trim_finish] |
Return single data value (PRIVATE).
Arguments:
- elem_code - What kind of data
- elem_num - How many data points
- raw_data - abi file object from which the tags would be unpacked | def _parse_tag_data(elem_code, elem_num, raw_data):
"""Return single data value (PRIVATE).
Arguments:
- elem_code - What kind of data
- elem_num - How many data points
- raw_data - abi file object from which the tags would be unpacked
"""
if elem_code in _BYTEFMT:
# because '>1s' unpack differently from '>s'
if elem_num == 1:
num = ""
else:
num = str(elem_num)
fmt = ">" + num + _BYTEFMT[elem_code]
assert len(raw_data) == struct.calcsize(fmt)
data = struct.unpack(fmt, raw_data)
# no need to use tuple if len(data) == 1
# also if data is date / time
if elem_code not in [10, 11] and len(data) == 1:
data = data[0]
# account for different data types
if elem_code == 2:
return data
elif elem_code == 10:
return str(datetime.date(*data))
elif elem_code == 11:
return str(datetime.time(*data[:3]))
elif elem_code == 13:
return bool(data)
elif elem_code == 18:
return data[1:]
elif elem_code == 19:
return data[:-1]
else:
return data
else:
return None |
Return SeqRecord objects from an ACE file.
This uses the Bio.Sequencing.Ace module to do the hard work. Note that
by iterating over the file in a single pass, we are forced to ignore any
WA, CT, RT or WR footer tags.
Ace files include the base quality for each position, which are taken
to be PHRED style scores. Just as if you had read in a FASTQ or QUAL file
using PHRED scores using Bio.SeqIO, these are stored in the SeqRecord's
letter_annotations dictionary under the "phred_quality" key.
>>> from Bio import SeqIO
>>> with open("Ace/consed_sample.ace") as handle:
... for record in SeqIO.parse(handle, "ace"):
... print("%s %s... %i" % (record.id, record.seq[:10], len(record)))
... print(max(record.letter_annotations["phred_quality"]))
Contig1 agccccgggc... 1475
90
However, ACE files do not include a base quality for any gaps in the
consensus sequence, and these are represented in Biopython with a quality
of zero. Using zero is perhaps misleading as there may be very strong
evidence to support the gap in the consensus. Previous versions of
Biopython therefore used None instead, but this complicated usage, and
prevented output of the gapped sequence as FASTQ format.
>>> from Bio import SeqIO
>>> with open("Ace/contig1.ace") as handle:
... for record in SeqIO.parse(handle, "ace"):
... print("%s ...%s..." % (record.id, record.seq[85:95]))
... print(record.letter_annotations["phred_quality"][85:95])
... print(max(record.letter_annotations["phred_quality"]))
Contig1 ...AGAGG-ATGC...
[57, 57, 54, 57, 57, 0, 57, 72, 72, 72]
90
Contig2 ...GAATTACTAT...
[68, 68, 68, 68, 68, 68, 68, 68, 68, 68]
90 | def AceIterator(source):
"""Return SeqRecord objects from an ACE file.
This uses the Bio.Sequencing.Ace module to do the hard work. Note that
by iterating over the file in a single pass, we are forced to ignore any
WA, CT, RT or WR footer tags.
Ace files include the base quality for each position, which are taken
to be PHRED style scores. Just as if you had read in a FASTQ or QUAL file
using PHRED scores using Bio.SeqIO, these are stored in the SeqRecord's
letter_annotations dictionary under the "phred_quality" key.
>>> from Bio import SeqIO
>>> with open("Ace/consed_sample.ace") as handle:
... for record in SeqIO.parse(handle, "ace"):
... print("%s %s... %i" % (record.id, record.seq[:10], len(record)))
... print(max(record.letter_annotations["phred_quality"]))
Contig1 agccccgggc... 1475
90
However, ACE files do not include a base quality for any gaps in the
consensus sequence, and these are represented in Biopython with a quality
of zero. Using zero is perhaps misleading as there may be very strong
evidence to support the gap in the consensus. Previous versions of
Biopython therefore used None instead, but this complicated usage, and
prevented output of the gapped sequence as FASTQ format.
>>> from Bio import SeqIO
>>> with open("Ace/contig1.ace") as handle:
... for record in SeqIO.parse(handle, "ace"):
... print("%s ...%s..." % (record.id, record.seq[85:95]))
... print(record.letter_annotations["phred_quality"][85:95])
... print(max(record.letter_annotations["phred_quality"]))
Contig1 ...AGAGG-ATGC...
[57, 57, 54, 57, 57, 0, 57, 72, 72, 72]
90
Contig2 ...GAATTACTAT...
[68, 68, 68, 68, 68, 68, 68, 68, 68, 68]
90
"""
for ace_contig in Ace.parse(source):
# Convert the ACE contig record into a SeqRecord...
consensus_seq_str = ace_contig.sequence
if "*" in consensus_seq_str:
# For consistency with most other file formats, map
# any * gaps into - gaps.
assert "-" not in consensus_seq_str
consensus_seq = Seq(consensus_seq_str.replace("*", "-"))
else:
consensus_seq = Seq(consensus_seq_str)
# TODO? - Base segments (BS lines) which indicates which read
# phrap has chosen to be the consensus at a particular position.
# Perhaps as SeqFeature objects?
# TODO - Supporting reads (RD lines, plus perhaps QA and DS lines)
# Perhaps as SeqFeature objects?
seq_record = SeqRecord(consensus_seq, id=ace_contig.name, name=ace_contig.name)
# Consensus base quality (BQ lines). Note that any gaps (originally
# as * characters) in the consensus do not get a quality entry, so
# we assign a quality of None (zero would be misleading as there may
# be excellent support for having a gap here).
quals = []
i = 0
for base in consensus_seq:
if base == "-":
quals.append(0)
else:
quals.append(ace_contig.quality[i])
i += 1
assert i == len(ace_contig.quality)
seq_record.letter_annotations["phred_quality"] = quals
yield seq_record |
Iterate over Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
For each record a tuple of two strings is returned, the FASTA title
line (without the leading '>' character), and the sequence (with any
whitespace removed). The title line is not divided up into an
identifier (the first word) and comment or description.
>>> with open("Fasta/dups.fasta") as handle:
... for values in SimpleFastaParser(handle):
... print(values)
...
('alpha', 'ACGTA')
('beta', 'CGTC')
('gamma', 'CCGCC')
('alpha (again - this is a duplicate entry to test the indexing code)', 'ACGTA')
('delta', 'CGCGC') | def SimpleFastaParser(handle):
"""Iterate over Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
For each record a tuple of two strings is returned, the FASTA title
line (without the leading '>' character), and the sequence (with any
whitespace removed). The title line is not divided up into an
identifier (the first word) and comment or description.
>>> with open("Fasta/dups.fasta") as handle:
... for values in SimpleFastaParser(handle):
... print(values)
...
('alpha', 'ACGTA')
('beta', 'CGTC')
('gamma', 'CCGCC')
('alpha (again - this is a duplicate entry to test the indexing code)', 'ACGTA')
('delta', 'CGCGC')
"""
# Skip any text before the first record (e.g. blank lines, comments)
for line in handle:
if line[0] == ">":
title = line[1:].rstrip()
break
else:
# no break encountered - probably an empty file
return
# Main logic
# Note, remove trailing whitespace, and any internal spaces
# (and any embedded \r which are possible in mangled files
# when not opened in universal read lines mode)
lines = []
for line in handle:
if line[0] == ">":
yield title, "".join(lines).replace(" ", "").replace("\r", "")
lines = []
title = line[1:].rstrip()
continue
lines.append(line.rstrip())
yield title, "".join(lines).replace(" ", "").replace("\r", "") |
Iterate over no-wrapping Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
Functionally the same as SimpleFastaParser but with a strict
interpretation of the FASTA format as exactly two lines per
record, the greater-than-sign identifier with description,
and the sequence with no line wrapping.
Any line wrapping will raise an exception, as will excess blank
lines (other than the special case of a zero-length sequence
as the second line of a record).
Examples
--------
This file uses two lines per FASTA record:
>>> with open("Fasta/aster_no_wrap.pro") as handle:
... for title, seq in FastaTwoLineParser(handle):
... print("%s = %s..." % (title, seq[:3]))
...
gi|3298468|dbj|BAA31520.1| SAMIPF = GGH...
This equivalent file uses line wrapping:
>>> with open("Fasta/aster.pro") as handle:
... for title, seq in FastaTwoLineParser(handle):
... print("%s = %s..." % (title, seq[:3]))
...
Traceback (most recent call last):
...
ValueError: Expected FASTA record starting with '>' character. Perhaps this file is using FASTA line wrapping? Got: 'MTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI' | def FastaTwoLineParser(handle):
"""Iterate over no-wrapping Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
Functionally the same as SimpleFastaParser but with a strict
interpretation of the FASTA format as exactly two lines per
record, the greater-than-sign identifier with description,
and the sequence with no line wrapping.
Any line wrapping will raise an exception, as will excess blank
lines (other than the special case of a zero-length sequence
as the second line of a record).
Examples
--------
This file uses two lines per FASTA record:
>>> with open("Fasta/aster_no_wrap.pro") as handle:
... for title, seq in FastaTwoLineParser(handle):
... print("%s = %s..." % (title, seq[:3]))
...
gi|3298468|dbj|BAA31520.1| SAMIPF = GGH...
This equivalent file uses line wrapping:
>>> with open("Fasta/aster.pro") as handle:
... for title, seq in FastaTwoLineParser(handle):
... print("%s = %s..." % (title, seq[:3]))
...
Traceback (most recent call last):
...
ValueError: Expected FASTA record starting with '>' character. Perhaps this file is using FASTA line wrapping? Got: 'MTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI'
"""
idx = -1 # for empty file
for idx, line in enumerate(handle):
if idx % 2 == 0: # title line
if line[0] != ">":
raise ValueError(
"Expected FASTA record starting with '>' character. "
"Perhaps this file is using FASTA line wrapping? "
f"Got: '{line}'"
)
title = line[1:].rstrip()
else: # sequence line
if line[0] == ">":
raise ValueError(
"Two '>' FASTA lines in a row. Missing sequence line "
"if this is strict two-line-per-record FASTA format. "
f"Have '>{title}' and '{line}'"
)
yield title, line.strip()
if idx == -1:
pass # empty file
elif idx % 2 == 0: # on a title line
raise ValueError(
"Missing sequence line at end of file if this is strict "
f"two-line-per-record FASTA format. Have title line '{line}'"
)
else:
assert line[0] != ">", "line[0] == '>' ; this should be impossible!" |
Turn a SeqRecord into a FASTA formatted string.
This is used internally by the SeqRecord's .format("fasta")
method and by the SeqIO.write(..., ..., "fasta") function. | def as_fasta(record):
"""Turn a SeqRecord into a FASTA formatted string.
This is used internally by the SeqRecord's .format("fasta")
method and by the SeqIO.write(..., ..., "fasta") function.
"""
id = _clean(record.id)
description = _clean(record.description)
if description and description.split(None, 1)[0] == id:
# The description includes the id at the start
title = description
elif description:
title = f"{id} {description}"
else:
title = id
assert "\n" not in title
assert "\r" not in title
lines = [f">{title}\n"]
data = _get_seq_string(record) # Catches sequence being None
assert "\n" not in data
assert "\r" not in data
for i in range(0, len(data), 60):
lines.append(data[i : i + 60] + "\n")
return "".join(lines) |
Turn a SeqRecord into a two-line FASTA formatted string.
This is used internally by the SeqRecord's .format("fasta-2line")
method and by the SeqIO.write(..., ..., "fasta-2line") function. | def as_fasta_2line(record):
"""Turn a SeqRecord into a two-line FASTA formatted string.
This is used internally by the SeqRecord's .format("fasta-2line")
method and by the SeqIO.write(..., ..., "fasta-2line") function.
"""
id = _clean(record.id)
description = _clean(record.description)
if description and description.split(None, 1)[0] == id:
# The description includes the id at the start
title = description
elif description:
title = f"{id} {description}"
else:
title = id
assert "\n" not in title
assert "\r" not in title
data = _get_seq_string(record) # Catches sequence being None
assert "\n" not in data
assert "\r" not in data
return f">{title}\n{data}\n" |
Read the specified number of bytes from the given handle. | def _read(handle, length):
"""Read the specified number of bytes from the given handle."""
data = handle.read(length)
if len(data) < length:
raise ValueError(f"Cannot read {length} bytes from handle")
return data |
Read a length-prefixed packet.
Parts of a GCK file are made of "packets" comprising of 4 bytes
giving the packet's size, followed by the packet's data.
There is no type tag. The type of a packet, and thus the type of data
it contains, is solely indicated by the position of the packet within
the GCK file. | def _read_packet(handle):
"""Read a length-prefixed packet.
Parts of a GCK file are made of "packets" comprising of 4 bytes
giving the packet's size, followed by the packet's data.
There is no type tag. The type of a packet, and thus the type of data
it contains, is solely indicated by the position of the packet within
the GCK file.
"""
length = _read(handle, 4)
length = unpack(">I", length)[0]
data = _read(handle, length)
return (data, length) |
Read a Pascal string.
A Pascal string is one byte for length followed by the actual string. | def _read_pstring(handle):
"""Read a Pascal string.
A Pascal string is one byte for length followed by the actual string.
"""
length = _read(handle, 1)
length = unpack(">B", length)[0]
data = _read(handle, length).decode("ASCII")
return data |
Read a 32-bit Pascal string.
Similar to a Pascal string but length is encoded on 4 bytes. | def _read_p4string(handle):
"""Read a 32-bit Pascal string.
Similar to a Pascal string but length is encoded on 4 bytes.
"""
length = _read(handle, 4)
length = unpack(">I", length)[0]
data = _read(handle, length).decode("ASCII")
return data |
Check a segment line's tags for inconsistencies (PRIVATE). | def _check_tags(seq, tags):
"""Check a segment line's tags for inconsistencies (PRIVATE)."""
for tag in tags:
if tag[:2] == "LN":
# Sequence length
if len(seq) == 0:
# No sequence data, set the sequence length
seq._data = _UndefinedSequenceData(int(tag[5:]))
elif int(tag[5:]) != len(seq):
warnings.warn(
f"Segment line has incorrect length. Expected {tag[5:]} but got {len(seq)}.",
BiopythonWarning,
)
elif tag[:2] == "SH":
# SHA256 checksum
checksum = hashlib.sha256(str(seq).encode()).hexdigest()
if checksum.upper() != tag[5:]:
warnings.warn(
f"Segment line has incorrect checksum. Expected {tag[5:]} but got {checksum}.",
BiopythonWarning,
) |
Build an annotations dictionary from a list of tags (PRIVATE). | def _tags_to_annotations(tags):
"""Build an annotations dictionary from a list of tags (PRIVATE)."""
annotations = {}
for tag in tags:
parts = tag.split(":")
if len(parts) < 3:
raise ValueError(f"Segment line has invalid tag: {tag}.")
if re.fullmatch(r"[A-Za-z][A-Za-z0-9]", parts[0]) is None:
warnings.warn(
f"Tag has invalid name: {parts[0]}. Are they tab delimited?",
BiopythonWarning,
)
parts[2] = ":".join(parts[2:]) # tag value may contain : characters
annotations[parts[0]] = (parts[1], parts[2])
# Check type of the tag and raise warning on a mismatch. These RegExs
# are part of the 1.0 standard.
if parts[1] not in "AifZJHB":
warnings.warn(f"Tag has invalid type: {parts[1]}", BiopythonWarning)
elif parts[1] == "A" and re.fullmatch(r"[!-~]", parts[2]) is None:
warnings.warn(
f"Tag has incorrect type. Expected printable character, got {parts[2]}.",
BiopythonWarning,
)
elif parts[1] == "i" and re.fullmatch(r"[-+]?[0-9]+", parts[2]) is None:
warnings.warn(
f"Tag has incorrect type. Expected signed integer, got {parts[2]}.",
BiopythonWarning,
)
elif (
parts[1] == "f"
and re.fullmatch(r"[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?", parts[2])
is None
):
warnings.warn(
f"Tag has incorrect type. Expected float, got {parts[2]}.",
BiopythonWarning,
)
elif parts[1] == "Z" and re.fullmatch(r"[ !-~]+", parts[2]) is None:
warnings.warn(
f"Tag has incorrect type. Expected printable string, got {parts[2]}.",
BiopythonWarning,
)
elif parts[1] == "J" and re.fullmatch(r"[ !-~]+", parts[2]) is None:
warnings.warn(
f"Tag has incorrect type. Expected JSON excluding new-line and tab characters, got {parts[2]}.",
BiopythonWarning,
)
elif parts[1] == "H" and re.fullmatch(r"[0-9A-F]+", parts[2]) is None:
warnings.warn(
f"Tag has incorrect type. Expected byte array in hex format, got {parts[2]}.",
BiopythonWarning,
)
elif (
parts[1] == "B"
and re.fullmatch(
r"[cCsSiIf](,[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)+", parts[2]
)
is None
):
warnings.warn(
f"Tag has incorrect type. Expected array of integers or floats, got {parts[2]}.",
BiopythonWarning,
)
return annotations |
Parser for GFA 1.x files.
Documentation: https://gfa-spec.github.io/GFA-spec/GFA1.html | def Gfa1Iterator(source):
"""Parser for GFA 1.x files.
Documentation: https://gfa-spec.github.io/GFA-spec/GFA1.html
"""
with as_handle(source) as handle:
for line in handle:
if line == "\n":
warnings.warn("GFA data has a blank line.", BiopythonWarning)
continue
fields = line.strip("\n").split("\t")
if fields[0] != "S":
continue
if len(fields) < 3:
raise ValueError(
f"Segment line must have name and sequence fields: {line}."
)
if fields[2] == "*":
seq = Seq(None, length=0)
else:
seq = Seq(fields[2])
tags = fields[3:]
_check_tags(seq, tags)
annotations = _tags_to_annotations(tags)
yield SeqRecord(seq, id=fields[1], name=fields[1], annotations=annotations) |
Parser for GFA 2.0 files.
Documentation for version 2: https://gfa-spec.github.io/GFA-spec/GFA2.html | def Gfa2Iterator(source):
"""Parser for GFA 2.0 files.
Documentation for version 2: https://gfa-spec.github.io/GFA-spec/GFA2.html
"""
with as_handle(source) as handle:
for line in handle:
if line == "\n":
warnings.warn("GFA data has a blank line.", BiopythonWarning)
continue
fields = line.strip("\n").split("\t")
if fields[0] != "S":
continue
if len(fields) < 4:
raise ValueError(
f"Segment line must have name, length, and sequence fields: {line}."
)
try:
int(fields[2])
except ValueError:
raise ValueError(
f"Segment line must have an integer length: {line}."
) from None
if fields[3] == "*":
seq = Seq(None, length=0)
else:
seq = Seq(fields[3])
tags = fields[4:]
_check_tags(seq, tags)
annotations = _tags_to_annotations(tags)
yield SeqRecord(seq, id=fields[1], name=fields[1], annotations=annotations) |
Build a GenBank/EMBL position string (PRIVATE).
Use offset=1 to add one to convert a start position from python counting. | def _insdc_feature_position_string(pos, offset=0):
"""Build a GenBank/EMBL position string (PRIVATE).
Use offset=1 to add one to convert a start position from python counting.
"""
if isinstance(pos, SeqFeature.ExactPosition):
return "%i" % (pos + offset)
elif isinstance(pos, SeqFeature.WithinPosition):
# TODO - avoid private variables
return "(%i.%i)" % (
pos._left + offset,
pos._right + offset,
)
elif isinstance(pos, SeqFeature.BetweenPosition):
# TODO - avoid private variables
return "(%i^%i)" % (
pos._left + offset,
pos._right + offset,
)
elif isinstance(pos, SeqFeature.BeforePosition):
return "<%i" % (pos + offset)
elif isinstance(pos, SeqFeature.AfterPosition):
return ">%i" % (pos + offset)
elif isinstance(pos, SeqFeature.OneOfPosition):
return "one-of(%s)" % ",".join(
_insdc_feature_position_string(p, offset) for p in pos.position_choices
)
elif isinstance(pos, SeqFeature.Position):
raise NotImplementedError("Please report this as a bug in Biopython.")
else:
raise ValueError("Expected a SeqFeature position object.") |
Build a GenBank/EMBL location from a (Compound) SimpleLocation (PRIVATE).
There is a choice of how to show joins on the reverse complement strand,
GenBank used "complement(join(1,10),(20,100))" while EMBL used to use
"join(complement(20,100),complement(1,10))" instead (but appears to have
now adopted the GenBank convention). Notice that the order of the entries
is reversed! This function therefore uses the first form. In this situation
we expect the CompoundLocation and its parts to all be marked as
strand == -1, and to be in the order 19:100 then 0:10. | def _insdc_location_string(location, rec_length):
"""Build a GenBank/EMBL location from a (Compound) SimpleLocation (PRIVATE).
There is a choice of how to show joins on the reverse complement strand,
GenBank used "complement(join(1,10),(20,100))" while EMBL used to use
"join(complement(20,100),complement(1,10))" instead (but appears to have
now adopted the GenBank convention). Notice that the order of the entries
is reversed! This function therefore uses the first form. In this situation
we expect the CompoundLocation and its parts to all be marked as
strand == -1, and to be in the order 19:100 then 0:10.
"""
try:
parts = location.parts
# CompoundLocation
if location.strand == -1:
# Special case, put complement outside the join/order/... and reverse order
return "complement(%s(%s))" % (
location.operator,
",".join(
_insdc_location_string_ignoring_strand_and_subfeatures(
p, rec_length
)
for p in parts[::-1]
),
)
else:
return "%s(%s)" % (
location.operator,
",".join(_insdc_location_string(p, rec_length) for p in parts),
)
except AttributeError:
# SimpleLocation
loc = _insdc_location_string_ignoring_strand_and_subfeatures(
location, rec_length
)
if location.strand == -1:
return f"complement({loc})"
else:
return loc |
Fast GenBank to FASTA (PRIVATE). | def _genbank_convert_fasta(in_file, out_file):
"""Fast GenBank to FASTA (PRIVATE)."""
# We don't need to parse the features...
records = GenBankScanner().parse_records(in_file, do_features=False)
return SeqIO.write(records, out_file, "fasta") |
Fast EMBL to FASTA (PRIVATE). | def _embl_convert_fasta(in_file, out_file):
"""Fast EMBL to FASTA (PRIVATE)."""
# We don't need to parse the features...
records = EmblScanner().parse_records(in_file, do_features=False)
return SeqIO.write(records, out_file, "fasta") |
Use this to catch errors like the sequence being None (PRIVATE). | def _get_seq_string(record: SeqRecord) -> str:
"""Use this to catch errors like the sequence being None (PRIVATE)."""
if not isinstance(record, SeqRecord):
raise TypeError("Expected a SeqRecord object")
if record.seq is None:
raise TypeError(f"SeqRecord (id={record.id}) has None for its sequence.")
elif not isinstance(record.seq, (Seq, MutableSeq)):
raise TypeError(f"SeqRecord (id={record.id}) has an invalid sequence.")
return str(record.seq) |
Use this to avoid getting newlines in the output (PRIVATE). | def _clean(text: str) -> str:
"""Use this to avoid getting newlines in the output (PRIVATE)."""
return text.replace("\n", " ").replace("\r", " ") |
Return the one-letter amino acid code from the residue name.
Non-amino acid are returned as "X". | def _res2aacode(residue, undef_code="X"):
"""Return the one-letter amino acid code from the residue name.
Non-amino acid are returned as "X".
"""
if isinstance(residue, str):
return _aa3to1_dict.get(residue, undef_code)
return _aa3to1_dict.get(residue.resname, undef_code) |
Return SeqRecords from Structure objects.
Base function for sequence parsers that read structures Bio.PDB parsers.
Once a parser from Bio.PDB has been used to load a structure into a
Bio.PDB.Structure.Structure object, there is no difference in how the
sequence parser interprets the residue sequence. The functions in this
module may be used by SeqIO modules wishing to parse sequences from lists
of residues.
Calling functions must pass a Bio.PDB.Structure.Structure object.
See Bio.SeqIO.PdbIO.PdbAtomIterator and Bio.SeqIO.PdbIO.CifAtomIterator for
details. | def AtomIterator(pdb_id, structure):
"""Return SeqRecords from Structure objects.
Base function for sequence parsers that read structures Bio.PDB parsers.
Once a parser from Bio.PDB has been used to load a structure into a
Bio.PDB.Structure.Structure object, there is no difference in how the
sequence parser interprets the residue sequence. The functions in this
module may be used by SeqIO modules wishing to parse sequences from lists
of residues.
Calling functions must pass a Bio.PDB.Structure.Structure object.
See Bio.SeqIO.PdbIO.PdbAtomIterator and Bio.SeqIO.PdbIO.CifAtomIterator for
details.
"""
model = structure[0]
for chn_id, chain in sorted(model.child_dict.items()):
# HETATM mod. res. policy: remove mod if in sequence, else discard
residues = [
res
for res in chain.get_unpacked_list()
if _res2aacode(res.get_resname().upper()) != "X"
]
if not residues:
continue
# Identify missing residues in the structure
# (fill the sequence with 'X' residues in these regions)
gaps = []
rnumbers = [r.id[1] for r in residues]
for i, rnum in enumerate(rnumbers[:-1]):
if rnumbers[i + 1] != rnum + 1 and rnumbers[i + 1] != rnum:
# It's a gap!
gaps.append((i + 1, rnum, rnumbers[i + 1]))
if gaps:
res_out = []
prev_idx = 0
for i, pregap, postgap in gaps:
if postgap > pregap:
gapsize = postgap - pregap - 1
res_out.extend(_res2aacode(x) for x in residues[prev_idx:i])
prev_idx = i
res_out.append("X" * gapsize)
else:
warnings.warn(
"Ignoring out-of-order residues after a gap",
BiopythonParserWarning,
)
# Keep the normal part, drop the out-of-order segment
# (presumably modified or hetatm residues, e.g. 3BEG)
res_out.extend(_res2aacode(x) for x in residues[prev_idx:i])
break
else:
# Last segment
res_out.extend(_res2aacode(x) for x in residues[prev_idx:])
else:
# No gaps
res_out = [_res2aacode(x) for x in residues]
record_id = f"{pdb_id}:{chn_id}"
# ENH - model number in SeqRecord id if multiple models?
# id = "Chain%s" % str(chain.id)
# if len(structure) > 1 :
# id = ("Model%s|" % str(model.id)) + id
record = SeqRecord(Seq("".join(res_out)), id=record_id, description=record_id)
# TODO: Test PDB files with DNA and RNA too:
record.annotations["molecule_type"] = "protein"
record.annotations["model"] = model.id
record.annotations["chain"] = chain.id
record.annotations["start"] = int(rnumbers[0])
record.annotations["end"] = int(rnumbers[-1])
yield record |
Return SeqRecord objects for each chain in a PDB file.
Argument source is a file-like object or a path to a file.
The sequences are derived from the 3D structure (ATOM records), not the
SEQRES lines in the PDB file header.
Unrecognised three letter amino acid codes (e.g. "CSD") from HETATM entries
are converted to "X" in the sequence.
In addition to information from the PDB header (which is the same for all
records), the following chain specific information is placed in the
annotation:
record.annotations["residues"] = List of residue ID strings
record.annotations["chain"] = Chain ID (typically A, B ,...)
record.annotations["model"] = Model ID (typically zero)
Where amino acids are missing from the structure, as indicated by residue
numbering, the sequence is filled in with 'X' characters to match the size
of the missing region, and None is included as the corresponding entry in
the list record.annotations["residues"].
This function uses the Bio.PDB module to do most of the hard work. The
annotation information could be improved but this extra parsing should be
done in parse_pdb_header, not this module.
This gets called internally via Bio.SeqIO for the atom based interpretation
of the PDB file format:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("PDB/1A8O.pdb", "pdb-atom"):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A
Equivalently,
>>> with open("PDB/1A8O.pdb") as handle:
... for record in PdbAtomIterator(handle):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A | def PdbAtomIterator(source):
"""Return SeqRecord objects for each chain in a PDB file.
Argument source is a file-like object or a path to a file.
The sequences are derived from the 3D structure (ATOM records), not the
SEQRES lines in the PDB file header.
Unrecognised three letter amino acid codes (e.g. "CSD") from HETATM entries
are converted to "X" in the sequence.
In addition to information from the PDB header (which is the same for all
records), the following chain specific information is placed in the
annotation:
record.annotations["residues"] = List of residue ID strings
record.annotations["chain"] = Chain ID (typically A, B ,...)
record.annotations["model"] = Model ID (typically zero)
Where amino acids are missing from the structure, as indicated by residue
numbering, the sequence is filled in with 'X' characters to match the size
of the missing region, and None is included as the corresponding entry in
the list record.annotations["residues"].
This function uses the Bio.PDB module to do most of the hard work. The
annotation information could be improved but this extra parsing should be
done in parse_pdb_header, not this module.
This gets called internally via Bio.SeqIO for the atom based interpretation
of the PDB file format:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("PDB/1A8O.pdb", "pdb-atom"):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A
Equivalently,
>>> with open("PDB/1A8O.pdb") as handle:
... for record in PdbAtomIterator(handle):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A
"""
# TODO - Add record.annotations to the doctest, esp the residues (not working?)
# Only import PDB when needed, to avoid/delay NumPy dependency in SeqIO
from Bio.PDB import PDBParser
structure = PDBParser().get_structure(None, source)
pdb_id = structure.header["idcode"]
if not pdb_id:
warnings.warn(
"'HEADER' line not found; can't determine PDB ID.", BiopythonParserWarning
)
pdb_id = "????"
for record in AtomIterator(pdb_id, structure):
# The PDB header was loaded as a dictionary, so let's reuse it all
record.annotations.update(structure.header)
# ENH - add letter annotations -- per-residue info, e.g. numbers
yield record |
Return SeqRecord objects for each chain in an mmCIF file.
Argument source is a file-like object or a path to a file.
The sequences are derived from the _entity_poly_seq entries in the mmCIF
file, not the atoms of the 3D structure.
Specifically, these mmCIF records are handled: _pdbx_poly_seq_scheme and
_struct_ref_seq. The _pdbx_poly_seq records contain sequence information,
and the _struct_ref_seq records contain database cross-references.
See:
http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v40.dic/Categories/pdbx_poly_seq_scheme.html
and
http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Categories/struct_ref_seq.html
This gets called internally via Bio.SeqIO for the sequence-based
interpretation of the mmCIF file format:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("PDB/1A8O.cif", "cif-seqres"):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
... print(record.dbxrefs)
...
Record id 1A8O:A, chain A
['UNP:P12497', 'UNP:POL_HV1N5']
Equivalently,
>>> with open("PDB/1A8O.cif") as handle:
... for record in CifSeqresIterator(handle):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
... print(record.dbxrefs)
...
Record id 1A8O:A, chain A
['UNP:P12497', 'UNP:POL_HV1N5']
Note the chain is recorded in the annotations dictionary, and any mmCIF
_struct_ref_seq entries are recorded in the database cross-references list. | def CifSeqresIterator(source):
"""Return SeqRecord objects for each chain in an mmCIF file.
Argument source is a file-like object or a path to a file.
The sequences are derived from the _entity_poly_seq entries in the mmCIF
file, not the atoms of the 3D structure.
Specifically, these mmCIF records are handled: _pdbx_poly_seq_scheme and
_struct_ref_seq. The _pdbx_poly_seq records contain sequence information,
and the _struct_ref_seq records contain database cross-references.
See:
http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v40.dic/Categories/pdbx_poly_seq_scheme.html
and
http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Categories/struct_ref_seq.html
This gets called internally via Bio.SeqIO for the sequence-based
interpretation of the mmCIF file format:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("PDB/1A8O.cif", "cif-seqres"):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
... print(record.dbxrefs)
...
Record id 1A8O:A, chain A
['UNP:P12497', 'UNP:POL_HV1N5']
Equivalently,
>>> with open("PDB/1A8O.cif") as handle:
... for record in CifSeqresIterator(handle):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
... print(record.dbxrefs)
...
Record id 1A8O:A, chain A
['UNP:P12497', 'UNP:POL_HV1N5']
Note the chain is recorded in the annotations dictionary, and any mmCIF
_struct_ref_seq entries are recorded in the database cross-references list.
"""
# Only import PDB when needed, to avoid/delay NumPy dependency in SeqIO
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
chains = collections.defaultdict(list)
metadata = collections.defaultdict(list)
records = MMCIF2Dict(source)
# Explicitly convert records to list (See #1533).
# If an item is not present, use an empty list
for field in (
PDBX_POLY_SEQ_SCHEME_FIELDS + STRUCT_REF_SEQ_FIELDS + STRUCT_REF_FIELDS
):
if field not in records:
records[field] = []
elif not isinstance(records[field], list):
records[field] = [records[field]]
for asym_id, mon_id in zip(
records["_pdbx_poly_seq_scheme.asym_id"],
records["_pdbx_poly_seq_scheme.mon_id"],
):
mon_id_1l = _res2aacode(mon_id)
chains[asym_id].append(mon_id_1l)
# Build a dict of _struct_ref records, indexed by the id field:
struct_refs = {}
for ref_id, db_name, db_code, db_acc in zip(
records["_struct_ref.id"],
records["_struct_ref.db_name"],
records["_struct_ref.db_code"],
records["_struct_ref.pdbx_db_accession"],
):
struct_refs[ref_id] = {
"database": db_name,
"db_id_code": db_code,
"db_acc": db_acc,
}
# Look through _struct_ref_seq records, look up the corresponding
# _struct_ref and add an entry to the metadata list for this chain.
for ref_id, pdb_id, chain_id in zip(
records["_struct_ref_seq.ref_id"],
records["_struct_ref_seq.pdbx_PDB_id_code"],
records["_struct_ref_seq.pdbx_strand_id"],
):
struct_ref = struct_refs[ref_id]
# The names here mirror those in PdbIO
metadata[chain_id].append({"pdb_id": pdb_id})
metadata[chain_id][-1].update(struct_ref)
for chn_id, residues in sorted(chains.items()):
record = SeqRecord(Seq("".join(residues)))
record.annotations = {"chain": chn_id}
# TODO: Test PDB files with DNA and RNA too:
record.annotations["molecule_type"] = "protein"
if chn_id in metadata:
m = metadata[chn_id][0]
record.id = record.name = f"{m['pdb_id']}:{chn_id}"
record.description = f"{m['database']}:{m['db_acc']} {m['db_id_code']}"
for melem in metadata[chn_id]:
record.dbxrefs.extend(
[
f"{melem['database']}:{melem['db_acc']}",
f"{melem['database']}:{melem['db_id_code']}",
]
)
else:
record.id = chn_id
yield record |
Return SeqRecord objects for each chain in an mmCIF file.
Argument source is a file-like object or a path to a file.
The sequences are derived from the 3D structure (_atom_site.* fields)
in the mmCIF file.
Unrecognised three letter amino acid codes (e.g. "CSD") from HETATM entries
are converted to "X" in the sequence.
In addition to information from the PDB header (which is the same for all
records), the following chain specific information is placed in the
annotation:
record.annotations["residues"] = List of residue ID strings
record.annotations["chain"] = Chain ID (typically A, B ,...)
record.annotations["model"] = Model ID (typically zero)
Where amino acids are missing from the structure, as indicated by residue
numbering, the sequence is filled in with 'X' characters to match the size
of the missing region, and None is included as the corresponding entry in
the list record.annotations["residues"].
This function uses the Bio.PDB module to do most of the hard work. The
annotation information could be improved but this extra parsing should be
done in parse_pdb_header, not this module.
This gets called internally via Bio.SeqIO for the atom based interpretation
of the PDB file format:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("PDB/1A8O.cif", "cif-atom"):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A
Equivalently,
>>> with open("PDB/1A8O.cif") as handle:
... for record in CifAtomIterator(handle):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A | def CifAtomIterator(source):
"""Return SeqRecord objects for each chain in an mmCIF file.
Argument source is a file-like object or a path to a file.
The sequences are derived from the 3D structure (_atom_site.* fields)
in the mmCIF file.
Unrecognised three letter amino acid codes (e.g. "CSD") from HETATM entries
are converted to "X" in the sequence.
In addition to information from the PDB header (which is the same for all
records), the following chain specific information is placed in the
annotation:
record.annotations["residues"] = List of residue ID strings
record.annotations["chain"] = Chain ID (typically A, B ,...)
record.annotations["model"] = Model ID (typically zero)
Where amino acids are missing from the structure, as indicated by residue
numbering, the sequence is filled in with 'X' characters to match the size
of the missing region, and None is included as the corresponding entry in
the list record.annotations["residues"].
This function uses the Bio.PDB module to do most of the hard work. The
annotation information could be improved but this extra parsing should be
done in parse_pdb_header, not this module.
This gets called internally via Bio.SeqIO for the atom based interpretation
of the PDB file format:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("PDB/1A8O.cif", "cif-atom"):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A
Equivalently,
>>> with open("PDB/1A8O.cif") as handle:
... for record in CifAtomIterator(handle):
... print("Record id %s, chain %s" % (record.id, record.annotations["chain"]))
...
Record id 1A8O:A, chain A
"""
# TODO - Add record.annotations to the doctest, esp the residues (not working?)
# Only import parser when needed, to avoid/delay NumPy dependency in SeqIO
from Bio.PDB.MMCIFParser import MMCIFParser
structure = MMCIFParser().get_structure(None, source)
pdb_id = structure.header["idcode"]
if not pdb_id:
warnings.warn("Could not determine the PDB ID.", BiopythonParserWarning)
pdb_id = "????"
yield from AtomIterator(pdb_id, structure) |
Return SeqRecord objects from a PHD file.
Arguments:
- source - input stream opened in text mode, or a path to a file
This uses the Bio.Sequencing.Phd module to do the hard work. | def PhdIterator(source: _TextIOSource) -> Iterator[SeqRecord]:
"""Return SeqRecord objects from a PHD file.
Arguments:
- source - input stream opened in text mode, or a path to a file
This uses the Bio.Sequencing.Phd module to do the hard work.
"""
phd_records = Phd.parse(source)
for phd_record in phd_records:
# Convert the PHY record into a SeqRecord...
# The "filename" can contain spaces, e.g. 'HWI-EAS94_4_1_1_602_99 1'
# from unit test example file phd_solexa.
# This will cause problems if used as the record identifier
# (e.g. output for FASTQ format).
name = phd_record.file_name.split(None, 1)[0]
seq_record = SeqRecord(
phd_record.seq, id=name, name=name, description=phd_record.file_name
)
# Just reuse the comments dictionary as the SeqRecord's annotations
seq_record.annotations = phd_record.comments
seq_record.annotations["molecule_type"] = "DNA"
# And store the qualities and peak locations as per-letter-annotation
seq_record.letter_annotations["phred_quality"] = [
int(site[1]) for site in phd_record.sites
]
try:
seq_record.letter_annotations["peak_location"] = [
int(site[2]) for site in phd_record.sites
]
except IndexError:
# peak locations are not always there according to
# David Gordon (the Consed author)
pass
yield seq_record |
Convert a PHRED quality (range 0 to about 90) to a Solexa quality.
PHRED and Solexa quality scores are both log transformations of a
probality of error (high score = low probability of error). This function
takes a PHRED score, transforms it back to a probability of error, and
then re-expresses it as a Solexa score. This assumes the error estimates
are equivalent.
How does this work exactly? Well the PHRED quality is minus ten times the
base ten logarithm of the probability of error::
phred_quality = -10*log(error,10)
Therefore, turning this round::
error = 10 ** (- phred_quality / 10)
Now, Solexa qualities use a different log transformation::
solexa_quality = -10*log(error/(1-error),10)
After substitution and a little manipulation we get::
solexa_quality = 10*log(10**(phred_quality/10.0) - 1, 10)
However, real Solexa files use a minimum quality of -5. This does have a
good reason - a random base call would be correct 25% of the time,
and thus have a probability of error of 0.75, which gives 1.25 as the PHRED
quality, or -4.77 as the Solexa quality. Thus (after rounding), a random
nucleotide read would have a PHRED quality of 1, or a Solexa quality of -5.
Taken literally, this logarithic formula would map a PHRED quality of zero
to a Solexa quality of minus infinity. Of course, taken literally, a PHRED
score of zero means a probability of error of one (i.e. the base call is
definitely wrong), which is worse than random! In practice, a PHRED quality
of zero usually means a default value, or perhaps random - and therefore
mapping it to the minimum Solexa score of -5 is reasonable.
In conclusion, we follow EMBOSS, and take this logarithmic formula but also
apply a minimum value of -5.0 for the Solexa quality, and also map a PHRED
quality of zero to -5.0 as well.
Note this function will return a floating point number, it is up to you to
round this to the nearest integer if appropriate. e.g.
>>> print("%0.2f" % round(solexa_quality_from_phred(80), 2))
80.00
>>> print("%0.2f" % round(solexa_quality_from_phred(50), 2))
50.00
>>> print("%0.2f" % round(solexa_quality_from_phred(20), 2))
19.96
>>> print("%0.2f" % round(solexa_quality_from_phred(10), 2))
9.54
>>> print("%0.2f" % round(solexa_quality_from_phred(5), 2))
3.35
>>> print("%0.2f" % round(solexa_quality_from_phred(4), 2))
1.80
>>> print("%0.2f" % round(solexa_quality_from_phred(3), 2))
-0.02
>>> print("%0.2f" % round(solexa_quality_from_phred(2), 2))
-2.33
>>> print("%0.2f" % round(solexa_quality_from_phred(1), 2))
-5.00
>>> print("%0.2f" % round(solexa_quality_from_phred(0), 2))
-5.00
Notice that for high quality reads PHRED and Solexa scores are numerically
equal. The differences are important for poor quality reads, where PHRED
has a minimum of zero but Solexa scores can be negative.
Finally, as a special case where None is used for a "missing value", None
is returned:
>>> print(solexa_quality_from_phred(None))
None | def solexa_quality_from_phred(phred_quality: float) -> float:
"""Convert a PHRED quality (range 0 to about 90) to a Solexa quality.
PHRED and Solexa quality scores are both log transformations of a
probality of error (high score = low probability of error). This function
takes a PHRED score, transforms it back to a probability of error, and
then re-expresses it as a Solexa score. This assumes the error estimates
are equivalent.
How does this work exactly? Well the PHRED quality is minus ten times the
base ten logarithm of the probability of error::
phred_quality = -10*log(error,10)
Therefore, turning this round::
error = 10 ** (- phred_quality / 10)
Now, Solexa qualities use a different log transformation::
solexa_quality = -10*log(error/(1-error),10)
After substitution and a little manipulation we get::
solexa_quality = 10*log(10**(phred_quality/10.0) - 1, 10)
However, real Solexa files use a minimum quality of -5. This does have a
good reason - a random base call would be correct 25% of the time,
and thus have a probability of error of 0.75, which gives 1.25 as the PHRED
quality, or -4.77 as the Solexa quality. Thus (after rounding), a random
nucleotide read would have a PHRED quality of 1, or a Solexa quality of -5.
Taken literally, this logarithic formula would map a PHRED quality of zero
to a Solexa quality of minus infinity. Of course, taken literally, a PHRED
score of zero means a probability of error of one (i.e. the base call is
definitely wrong), which is worse than random! In practice, a PHRED quality
of zero usually means a default value, or perhaps random - and therefore
mapping it to the minimum Solexa score of -5 is reasonable.
In conclusion, we follow EMBOSS, and take this logarithmic formula but also
apply a minimum value of -5.0 for the Solexa quality, and also map a PHRED
quality of zero to -5.0 as well.
Note this function will return a floating point number, it is up to you to
round this to the nearest integer if appropriate. e.g.
>>> print("%0.2f" % round(solexa_quality_from_phred(80), 2))
80.00
>>> print("%0.2f" % round(solexa_quality_from_phred(50), 2))
50.00
>>> print("%0.2f" % round(solexa_quality_from_phred(20), 2))
19.96
>>> print("%0.2f" % round(solexa_quality_from_phred(10), 2))
9.54
>>> print("%0.2f" % round(solexa_quality_from_phred(5), 2))
3.35
>>> print("%0.2f" % round(solexa_quality_from_phred(4), 2))
1.80
>>> print("%0.2f" % round(solexa_quality_from_phred(3), 2))
-0.02
>>> print("%0.2f" % round(solexa_quality_from_phred(2), 2))
-2.33
>>> print("%0.2f" % round(solexa_quality_from_phred(1), 2))
-5.00
>>> print("%0.2f" % round(solexa_quality_from_phred(0), 2))
-5.00
Notice that for high quality reads PHRED and Solexa scores are numerically
equal. The differences are important for poor quality reads, where PHRED
has a minimum of zero but Solexa scores can be negative.
Finally, as a special case where None is used for a "missing value", None
is returned:
>>> print(solexa_quality_from_phred(None))
None
"""
if phred_quality is None:
# Assume None is used as some kind of NULL or NA value; return None
# e.g. Bio.SeqIO gives Ace contig gaps a quality of None.
return None
elif phred_quality > 0:
# Solexa uses a minimum value of -5, which after rounding matches a
# random nucleotide base call.
return max(-5.0, 10 * log(10 ** (phred_quality / 10.0) - 1, 10))
elif phred_quality == 0:
# Special case, map to -5 as discussed in the docstring
return -5.0
else:
raise ValueError(
f"PHRED qualities must be positive (or zero), not {phred_quality!r}"
) |
Convert a Solexa quality (which can be negative) to a PHRED quality.
PHRED and Solexa quality scores are both log transformations of a
probality of error (high score = low probability of error). This function
takes a Solexa score, transforms it back to a probability of error, and
then re-expresses it as a PHRED score. This assumes the error estimates
are equivalent.
The underlying formulas are given in the documentation for the sister
function solexa_quality_from_phred, in this case the operation is::
phred_quality = 10*log(10**(solexa_quality/10.0) + 1, 10)
This will return a floating point number, it is up to you to round this to
the nearest integer if appropriate. e.g.
>>> print("%0.2f" % round(phred_quality_from_solexa(80), 2))
80.00
>>> print("%0.2f" % round(phred_quality_from_solexa(20), 2))
20.04
>>> print("%0.2f" % round(phred_quality_from_solexa(10), 2))
10.41
>>> print("%0.2f" % round(phred_quality_from_solexa(0), 2))
3.01
>>> print("%0.2f" % round(phred_quality_from_solexa(-5), 2))
1.19
Note that a solexa_quality less then -5 is not expected, will trigger a
warning, but will still be converted as per the logarithmic mapping
(giving a number between 0 and 1.19 back).
As a special case where None is used for a "missing value", None is
returned:
>>> print(phred_quality_from_solexa(None))
None | def phred_quality_from_solexa(solexa_quality: float) -> float:
"""Convert a Solexa quality (which can be negative) to a PHRED quality.
PHRED and Solexa quality scores are both log transformations of a
probality of error (high score = low probability of error). This function
takes a Solexa score, transforms it back to a probability of error, and
then re-expresses it as a PHRED score. This assumes the error estimates
are equivalent.
The underlying formulas are given in the documentation for the sister
function solexa_quality_from_phred, in this case the operation is::
phred_quality = 10*log(10**(solexa_quality/10.0) + 1, 10)
This will return a floating point number, it is up to you to round this to
the nearest integer if appropriate. e.g.
>>> print("%0.2f" % round(phred_quality_from_solexa(80), 2))
80.00
>>> print("%0.2f" % round(phred_quality_from_solexa(20), 2))
20.04
>>> print("%0.2f" % round(phred_quality_from_solexa(10), 2))
10.41
>>> print("%0.2f" % round(phred_quality_from_solexa(0), 2))
3.01
>>> print("%0.2f" % round(phred_quality_from_solexa(-5), 2))
1.19
Note that a solexa_quality less then -5 is not expected, will trigger a
warning, but will still be converted as per the logarithmic mapping
(giving a number between 0 and 1.19 back).
As a special case where None is used for a "missing value", None is
returned:
>>> print(phred_quality_from_solexa(None))
None
"""
if solexa_quality is None:
# Assume None is used as some kind of NULL or NA value; return None
return None
if solexa_quality < -5:
warnings.warn(
f"Solexa quality less than -5 passed, {solexa_quality!r}", BiopythonWarning
)
return 10 * log(10 ** (solexa_quality / 10.0) + 1, 10) |
Extract PHRED qualities from a SeqRecord's letter_annotations (PRIVATE).
If there are no PHRED qualities, but there are Solexa qualities, those are
used instead after conversion. | def _get_phred_quality(record: SeqRecord) -> Union[List[float], List[int]]:
"""Extract PHRED qualities from a SeqRecord's letter_annotations (PRIVATE).
If there are no PHRED qualities, but there are Solexa qualities, those are
used instead after conversion.
"""
try:
return record.letter_annotations["phred_quality"]
except KeyError:
pass
try:
return [
phred_quality_from_solexa(q)
for q in record.letter_annotations["solexa_quality"]
]
except KeyError:
raise ValueError(
"No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." % record.id
) from None |
Return a Sanger FASTQ encoded quality string (PRIVATE).
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> r = SeqRecord(Seq("ACGTAN"), id="Test",
... letter_annotations = {"phred_quality":[50, 40, 30, 20, 10, 0]})
>>> _get_sanger_quality_str(r)
'SI?5+!'
If as in the above example (or indeed a SeqRecord parser with Bio.SeqIO),
the PHRED qualities are integers, this function is able to use a very fast
pre-cached mapping. However, if they are floats which differ slightly, then
it has to do the appropriate rounding - which is slower:
>>> r2 = SeqRecord(Seq("ACGTAN"), id="Test2",
... letter_annotations = {"phred_quality":[50.0, 40.05, 29.99, 20, 9.55, 0.01]})
>>> _get_sanger_quality_str(r2)
'SI?5+!'
If your scores include a None value, this raises an exception:
>>> r3 = SeqRecord(Seq("ACGTAN"), id="Test3",
... letter_annotations = {"phred_quality":[50, 40, 30, 20, 10, None]})
>>> _get_sanger_quality_str(r3)
Traceback (most recent call last):
...
TypeError: A quality value of None was found
If (strangely) your record has both PHRED and Solexa scores, then the PHRED
scores are used in preference:
>>> r4 = SeqRecord(Seq("ACGTAN"), id="Test4",
... letter_annotations = {"phred_quality":[50, 40, 30, 20, 10, 0],
... "solexa_quality":[-5, -4, 0, None, 0, 40]})
>>> _get_sanger_quality_str(r4)
'SI?5+!'
If there are no PHRED scores, but there are Solexa scores, these are used
instead (after the appropriate conversion):
>>> r5 = SeqRecord(Seq("ACGTAN"), id="Test5",
... letter_annotations = {"solexa_quality":[40, 30, 20, 10, 0, -5]})
>>> _get_sanger_quality_str(r5)
'I?5+$"'
Again, integer Solexa scores can be looked up in a pre-cached mapping making
this very fast. You can still use approximate floating point scores:
>>> r6 = SeqRecord(Seq("ACGTAN"), id="Test6",
... letter_annotations = {"solexa_quality":[40.1, 29.7, 20.01, 10, 0.0, -4.9]})
>>> _get_sanger_quality_str(r6)
'I?5+$"'
Notice that due to the limited range of printable ASCII characters, a
PHRED quality of 93 is the maximum that can be held in an Illumina FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation. | def _get_sanger_quality_str(record: SeqRecord) -> str:
"""Return a Sanger FASTQ encoded quality string (PRIVATE).
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> r = SeqRecord(Seq("ACGTAN"), id="Test",
... letter_annotations = {"phred_quality":[50, 40, 30, 20, 10, 0]})
>>> _get_sanger_quality_str(r)
'SI?5+!'
If as in the above example (or indeed a SeqRecord parser with Bio.SeqIO),
the PHRED qualities are integers, this function is able to use a very fast
pre-cached mapping. However, if they are floats which differ slightly, then
it has to do the appropriate rounding - which is slower:
>>> r2 = SeqRecord(Seq("ACGTAN"), id="Test2",
... letter_annotations = {"phred_quality":[50.0, 40.05, 29.99, 20, 9.55, 0.01]})
>>> _get_sanger_quality_str(r2)
'SI?5+!'
If your scores include a None value, this raises an exception:
>>> r3 = SeqRecord(Seq("ACGTAN"), id="Test3",
... letter_annotations = {"phred_quality":[50, 40, 30, 20, 10, None]})
>>> _get_sanger_quality_str(r3)
Traceback (most recent call last):
...
TypeError: A quality value of None was found
If (strangely) your record has both PHRED and Solexa scores, then the PHRED
scores are used in preference:
>>> r4 = SeqRecord(Seq("ACGTAN"), id="Test4",
... letter_annotations = {"phred_quality":[50, 40, 30, 20, 10, 0],
... "solexa_quality":[-5, -4, 0, None, 0, 40]})
>>> _get_sanger_quality_str(r4)
'SI?5+!'
If there are no PHRED scores, but there are Solexa scores, these are used
instead (after the appropriate conversion):
>>> r5 = SeqRecord(Seq("ACGTAN"), id="Test5",
... letter_annotations = {"solexa_quality":[40, 30, 20, 10, 0, -5]})
>>> _get_sanger_quality_str(r5)
'I?5+$"'
Again, integer Solexa scores can be looked up in a pre-cached mapping making
this very fast. You can still use approximate floating point scores:
>>> r6 = SeqRecord(Seq("ACGTAN"), id="Test6",
... letter_annotations = {"solexa_quality":[40.1, 29.7, 20.01, 10, 0.0, -4.9]})
>>> _get_sanger_quality_str(r6)
'I?5+$"'
Notice that due to the limited range of printable ASCII characters, a
PHRED quality of 93 is the maximum that can be held in an Illumina FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation.
"""
# TODO - This functions works and is fast, but it is also ugly
# and there is considerable repetition of code for the other
# two FASTQ variants.
try:
# These take priority (in case both Solexa and PHRED scores found)
qualities = record.letter_annotations["phred_quality"]
except KeyError:
# Fall back on solexa scores...
pass
else:
# Try and use the precomputed mapping:
try:
return "".join(_phred_to_sanger_quality_str[qp] for qp in qualities)
except KeyError:
# Could be a float, or a None in the list, or a high value.
pass
if None in qualities:
raise TypeError("A quality value of None was found")
if max(qualities) >= 93.5:
warnings.warn(
"Data loss - max PHRED quality 93 in Sanger FASTQ", BiopythonWarning
)
# This will apply the truncation at 93, giving max ASCII 126
return "".join(
chr(min(126, int(round(qp)) + SANGER_SCORE_OFFSET)) for qp in qualities
)
# Fall back on the Solexa scores...
try:
qualities = record.letter_annotations["solexa_quality"]
except KeyError:
raise ValueError(
"No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." % record.id
) from None
# Try and use the precomputed mapping:
try:
return "".join(_solexa_to_sanger_quality_str[qs] for qs in qualities)
except KeyError:
# Either no PHRED scores, or something odd like a float or None
pass
if None in qualities:
raise TypeError("A quality value of None was found")
# Must do this the slow way, first converting the PHRED scores into
# Solexa scores:
if max(qualities) >= 93.5:
warnings.warn(
"Data loss - max PHRED quality 93 in Sanger FASTQ", BiopythonWarning
)
# This will apply the truncation at 93, giving max ASCII 126
return "".join(
chr(min(126, int(round(phred_quality_from_solexa(qs))) + SANGER_SCORE_OFFSET))
for qs in qualities
) |
Return an Illumina 1.3 to 1.7 FASTQ encoded quality string (PRIVATE).
Notice that due to the limited range of printable ASCII characters, a
PHRED quality of 62 is the maximum that can be held in an Illumina FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation. | def _get_illumina_quality_str(record: SeqRecord) -> str:
"""Return an Illumina 1.3 to 1.7 FASTQ encoded quality string (PRIVATE).
Notice that due to the limited range of printable ASCII characters, a
PHRED quality of 62 is the maximum that can be held in an Illumina FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation.
"""
# TODO - This functions works and is fast, but it is also ugly
# and there is considerable repetition of code for the other
# two FASTQ variants.
try:
# These take priority (in case both Solexa and PHRED scores found)
qualities = record.letter_annotations["phred_quality"]
except KeyError:
# Fall back on solexa scores...
pass
else:
# Try and use the precomputed mapping:
try:
return "".join(_phred_to_illumina_quality_str[qp] for qp in qualities)
except KeyError:
# Could be a float, or a None in the list, or a high value.
pass
if None in qualities:
raise TypeError("A quality value of None was found")
if max(qualities) >= 62.5:
warnings.warn(
"Data loss - max PHRED quality 62 in Illumina FASTQ", BiopythonWarning
)
# This will apply the truncation at 62, giving max ASCII 126
return "".join(
chr(min(126, int(round(qp)) + SOLEXA_SCORE_OFFSET)) for qp in qualities
)
# Fall back on the Solexa scores...
try:
qualities = record.letter_annotations["solexa_quality"]
except KeyError:
raise ValueError(
"No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." % record.id
) from None
# Try and use the precomputed mapping:
try:
return "".join(_solexa_to_illumina_quality_str[qs] for qs in qualities)
except KeyError:
# Either no PHRED scores, or something odd like a float or None
pass
if None in qualities:
raise TypeError("A quality value of None was found")
# Must do this the slow way, first converting the PHRED scores into
# Solexa scores:
if max(qualities) >= 62.5:
warnings.warn(
"Data loss - max PHRED quality 62 in Illumina FASTQ", BiopythonWarning
)
# This will apply the truncation at 62, giving max ASCII 126
return "".join(
chr(min(126, int(round(phred_quality_from_solexa(qs))) + SOLEXA_SCORE_OFFSET))
for qs in qualities
) |
Return a Solexa FASTQ encoded quality string (PRIVATE).
Notice that due to the limited range of printable ASCII characters, a
Solexa quality of 62 is the maximum that can be held in a Solexa FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation. | def _get_solexa_quality_str(record: SeqRecord) -> str:
"""Return a Solexa FASTQ encoded quality string (PRIVATE).
Notice that due to the limited range of printable ASCII characters, a
Solexa quality of 62 is the maximum that can be held in a Solexa FASTQ
file (using ASCII 126, the tilde). This function will issue a warning
in this situation.
"""
# TODO - This functions works and is fast, but it is also ugly
# and there is considerable repetition of code for the other
# two FASTQ variants.
try:
# These take priority (in case both Solexa and PHRED scores found)
qualities = record.letter_annotations["solexa_quality"]
except KeyError:
# Fall back on PHRED scores...
pass
else:
# Try and use the precomputed mapping:
try:
return "".join(_solexa_to_solexa_quality_str[qs] for qs in qualities)
except KeyError:
# Could be a float, or a None in the list, or a high value.
pass
if None in qualities:
raise TypeError("A quality value of None was found")
if max(qualities) >= 62.5:
warnings.warn(
"Data loss - max Solexa quality 62 in Solexa FASTQ", BiopythonWarning
)
# This will apply the truncation at 62, giving max ASCII 126
return "".join(
chr(min(126, int(round(qs)) + SOLEXA_SCORE_OFFSET)) for qs in qualities
)
# Fall back on the PHRED scores...
try:
qualities = record.letter_annotations["phred_quality"]
except KeyError:
raise ValueError(
"No suitable quality scores found in "
"letter_annotations of SeqRecord (id=%s)." % record.id
) from None
# Try and use the precomputed mapping:
try:
return "".join(_phred_to_solexa_quality_str[qp] for qp in qualities)
except KeyError:
# Either no PHRED scores, or something odd like a float or None
# or too big to be in the cache
pass
if None in qualities:
raise TypeError("A quality value of None was found")
# Must do this the slow way, first converting the PHRED scores into
# Solexa scores:
if max(qualities) >= 62.5:
warnings.warn(
"Data loss - max Solexa quality 62 in Solexa FASTQ", BiopythonWarning
)
return "".join(
chr(min(126, int(round(solexa_quality_from_phred(qp))) + SOLEXA_SCORE_OFFSET))
for qp in qualities
) |
Iterate over Fastq records as string tuples (not as SeqRecord objects).
Arguments:
- source - input stream opened in text mode, or a path to a file
This code does not try to interpret the quality string numerically. It
just returns tuples of the title, sequence and quality as strings. For
the sequence and quality, any whitespace (such as new lines) is removed.
Our SeqRecord based FASTQ iterators call this function internally, and then
turn the strings into a SeqRecord objects, mapping the quality string into
a list of numerical scores. If you want to do a custom quality mapping,
then you might consider calling this function directly.
For parsing FASTQ files, the title string from the "@" line at the start
of each record can optionally be omitted on the "+" lines. If it is
repeated, it must be identical.
The sequence string and the quality string can optionally be split over
multiple lines, although several sources discourage this. In comparison,
for the FASTA file format line breaks between 60 and 80 characters are
the norm.
**WARNING** - Because the "@" character can appear in the quality string,
this can cause problems as this is also the marker for the start of
a new sequence. In fact, the "+" sign can also appear as well. Some
sources recommended having no line breaks in the quality to avoid this,
but even that is not enough, consider this example::
@071113_EAS56_0053:1:1:998:236
TTTCTTGCCCCCATAGACTGAGACCTTCCCTAAATA
+071113_EAS56_0053:1:1:998:236
IIIIIIIIIIIIIIIIIIIIIIIIIIIIICII+III
@071113_EAS56_0053:1:1:182:712
ACCCAGCTAATTTTTGTATTTTTGTTAGAGACAGTG
+
@IIIIIIIIIIIIIIICDIIIII<%<6&-*).(*%+
@071113_EAS56_0053:1:1:153:10
TGTTCTGAAGGAAGGTGTGCGTGCGTGTGTGTGTGT
+
IIIIIIIIIIIICIIGIIIII>IAIIIE65I=II:6
@071113_EAS56_0053:1:3:990:501
TGGGAGGTTTTATGTGGA
AAGCAGCAATGTACAAGA
+
IIIIIII.IIIIII1@44
@-7.%<&+/$/%4(++(%
This is four PHRED encoded FASTQ entries originally from an NCBI source
(given the read length of 36, these are probably Solexa Illumina reads where
the quality has been mapped onto the PHRED values).
This example has been edited to illustrate some of the nasty things allowed
in the FASTQ format. Firstly, on the "+" lines most but not all of the
(redundant) identifiers are omitted. In real files it is likely that all or
none of these extra identifiers will be present.
Secondly, while the first three sequences have been shown without line
breaks, the last has been split over multiple lines. In real files any line
breaks are likely to be consistent.
Thirdly, some of the quality string lines start with an "@" character. For
the second record this is unavoidable. However for the fourth sequence this
only happens because its quality string is split over two lines. A naive
parser could wrongly treat any line starting with an "@" as the beginning of
a new sequence! This code copes with this possible ambiguity by keeping
track of the length of the sequence which gives the expected length of the
quality string.
Using this tricky example file as input, this short bit of code demonstrates
what this parsing function would return:
>>> with open("Quality/tricky.fastq") as handle:
... for (title, sequence, quality) in FastqGeneralIterator(handle):
... print(title)
... print("%s %s" % (sequence, quality))
...
071113_EAS56_0053:1:1:998:236
TTTCTTGCCCCCATAGACTGAGACCTTCCCTAAATA IIIIIIIIIIIIIIIIIIIIIIIIIIIIICII+III
071113_EAS56_0053:1:1:182:712
ACCCAGCTAATTTTTGTATTTTTGTTAGAGACAGTG @IIIIIIIIIIIIIIICDIIIII<%<6&-*).(*%+
071113_EAS56_0053:1:1:153:10
TGTTCTGAAGGAAGGTGTGCGTGCGTGTGTGTGTGT IIIIIIIIIIIICIIGIIIII>IAIIIE65I=II:6
071113_EAS56_0053:1:3:990:501
TGGGAGGTTTTATGTGGAAAGCAGCAATGTACAAGA IIIIIII.IIIIII1@44@-7.%<&+/$/%4(++(%
Finally we note that some sources state that the quality string should
start with "!" (which using the PHRED mapping means the first letter always
has a quality score of zero). This rather restrictive rule is not widely
observed, so is therefore ignored here. One plus point about this "!" rule
is that (provided there are no line breaks in the quality sequence) it
would prevent the above problem with the "@" character. | def FastqGeneralIterator(source: _TextIOSource) -> Iterator[Tuple[str, str, str]]:
"""Iterate over Fastq records as string tuples (not as SeqRecord objects).
Arguments:
- source - input stream opened in text mode, or a path to a file
This code does not try to interpret the quality string numerically. It
just returns tuples of the title, sequence and quality as strings. For
the sequence and quality, any whitespace (such as new lines) is removed.
Our SeqRecord based FASTQ iterators call this function internally, and then
turn the strings into a SeqRecord objects, mapping the quality string into
a list of numerical scores. If you want to do a custom quality mapping,
then you might consider calling this function directly.
For parsing FASTQ files, the title string from the "@" line at the start
of each record can optionally be omitted on the "+" lines. If it is
repeated, it must be identical.
The sequence string and the quality string can optionally be split over
multiple lines, although several sources discourage this. In comparison,
for the FASTA file format line breaks between 60 and 80 characters are
the norm.
**WARNING** - Because the "@" character can appear in the quality string,
this can cause problems as this is also the marker for the start of
a new sequence. In fact, the "+" sign can also appear as well. Some
sources recommended having no line breaks in the quality to avoid this,
but even that is not enough, consider this example::
@071113_EAS56_0053:1:1:998:236
TTTCTTGCCCCCATAGACTGAGACCTTCCCTAAATA
+071113_EAS56_0053:1:1:998:236
IIIIIIIIIIIIIIIIIIIIIIIIIIIIICII+III
@071113_EAS56_0053:1:1:182:712
ACCCAGCTAATTTTTGTATTTTTGTTAGAGACAGTG
+
@IIIIIIIIIIIIIIICDIIIII<%<6&-*).(*%+
@071113_EAS56_0053:1:1:153:10
TGTTCTGAAGGAAGGTGTGCGTGCGTGTGTGTGTGT
+
IIIIIIIIIIIICIIGIIIII>IAIIIE65I=II:6
@071113_EAS56_0053:1:3:990:501
TGGGAGGTTTTATGTGGA
AAGCAGCAATGTACAAGA
+
IIIIIII.IIIIII1@44
@-7.%<&+/$/%4(++(%
This is four PHRED encoded FASTQ entries originally from an NCBI source
(given the read length of 36, these are probably Solexa Illumina reads where
the quality has been mapped onto the PHRED values).
This example has been edited to illustrate some of the nasty things allowed
in the FASTQ format. Firstly, on the "+" lines most but not all of the
(redundant) identifiers are omitted. In real files it is likely that all or
none of these extra identifiers will be present.
Secondly, while the first three sequences have been shown without line
breaks, the last has been split over multiple lines. In real files any line
breaks are likely to be consistent.
Thirdly, some of the quality string lines start with an "@" character. For
the second record this is unavoidable. However for the fourth sequence this
only happens because its quality string is split over two lines. A naive
parser could wrongly treat any line starting with an "@" as the beginning of
a new sequence! This code copes with this possible ambiguity by keeping
track of the length of the sequence which gives the expected length of the
quality string.
Using this tricky example file as input, this short bit of code demonstrates
what this parsing function would return:
>>> with open("Quality/tricky.fastq") as handle:
... for (title, sequence, quality) in FastqGeneralIterator(handle):
... print(title)
... print("%s %s" % (sequence, quality))
...
071113_EAS56_0053:1:1:998:236
TTTCTTGCCCCCATAGACTGAGACCTTCCCTAAATA IIIIIIIIIIIIIIIIIIIIIIIIIIIIICII+III
071113_EAS56_0053:1:1:182:712
ACCCAGCTAATTTTTGTATTTTTGTTAGAGACAGTG @IIIIIIIIIIIIIIICDIIIII<%<6&-*).(*%+
071113_EAS56_0053:1:1:153:10
TGTTCTGAAGGAAGGTGTGCGTGCGTGTGTGTGTGT IIIIIIIIIIIICIIGIIIII>IAIIIE65I=II:6
071113_EAS56_0053:1:3:990:501
TGGGAGGTTTTATGTGGAAAGCAGCAATGTACAAGA IIIIIII.IIIIII1@44@-7.%<&+/$/%4(++(%
Finally we note that some sources state that the quality string should
start with "!" (which using the PHRED mapping means the first letter always
has a quality score of zero). This rather restrictive rule is not widely
observed, so is therefore ignored here. One plus point about this "!" rule
is that (provided there are no line breaks in the quality sequence) it
would prevent the above problem with the "@" character.
"""
with as_handle(source) as handle:
if handle.read(0) != "":
raise StreamModeError("Fastq files must be opened in text mode") from None
try:
line = next(handle)
except StopIteration:
return # Premature end of file, or just empty?
while True:
if line[0] != "@":
raise ValueError(
"Records in Fastq files should start with '@' character"
)
title_line = line[1:].rstrip()
seq_string = ""
# There will now be one or more sequence lines; keep going until we
# find the "+" marking the quality line:
for line in handle:
if line[0] == "+":
break
seq_string += line.rstrip()
else:
if seq_string:
raise ValueError("End of file without quality information.")
else:
raise ValueError("Unexpected end of file")
# The title here is optional, but if present must match!
second_title = line[1:].rstrip()
if second_title and second_title != title_line:
raise ValueError("Sequence and quality captions differ.")
# This is going to slow things down a little, but assuming
# this isn't allowed we should try and catch it here:
if " " in seq_string or "\t" in seq_string:
raise ValueError("Whitespace is not allowed in the sequence.")
seq_len = len(seq_string)
# There will now be at least one line of quality data, followed by
# another sequence, or EOF
line = None
quality_string = ""
for line in handle:
if line[0] == "@":
# This COULD be the start of a new sequence. However, it MAY just
# be a line of quality data which starts with a "@" character. We
# should be able to check this by looking at the sequence length
# and the amount of quality data found so far.
if len(quality_string) >= seq_len:
# We expect it to be equal if this is the start of a new record.
# If the quality data is longer, we'll raise an error below.
break
# Continue - its just some (more) quality data.
quality_string += line.rstrip()
else:
if line is None:
raise ValueError("Unexpected end of file")
line = None
if seq_len != len(quality_string):
raise ValueError(
"Lengths of sequence and quality values differs for %s (%i and %i)."
% (title_line, seq_len, len(quality_string))
)
# Return the record and then continue...
yield (title_line, seq_string, quality_string)
if line is None:
break |
Parse old Solexa/Illumina FASTQ like files (which differ in the quality mapping).
The optional arguments are the same as those for the FastqPhredIterator.
For each sequence in Solexa/Illumina FASTQ files there is a matching string
encoding the Solexa integer qualities using ASCII values with an offset
of 64. Solexa scores are scaled differently to PHRED scores, and Biopython
will NOT perform any automatic conversion when loading.
NOTE - This file format is used by the OLD versions of the Solexa/Illumina
pipeline. See also the FastqIlluminaIterator function for the NEW version.
For example, consider a file containing these five records::
@SLXA-B3_649_FC8437_R1_1_1_610_79
GATGTGCAATACCTTTGTAGAGGAA
+SLXA-B3_649_FC8437_R1_1_1_610_79
YYYYYYYYYYYYYYYYYYWYWYYSU
@SLXA-B3_649_FC8437_R1_1_1_397_389
GGTTTGAGAAAGAGAAATGAGATAA
+SLXA-B3_649_FC8437_R1_1_1_397_389
YYYYYYYYYWYYYYWWYYYWYWYWW
@SLXA-B3_649_FC8437_R1_1_1_850_123
GAGGGTGTTGATCATGATGATGGCG
+SLXA-B3_649_FC8437_R1_1_1_850_123
YYYYYYYYYYYYYWYYWYYSYYYSY
@SLXA-B3_649_FC8437_R1_1_1_362_549
GGAAACAAAGTTTTTCTCAACATAG
+SLXA-B3_649_FC8437_R1_1_1_362_549
YYYYYYYYYYYYYYYYYYWWWWYWY
@SLXA-B3_649_FC8437_R1_1_1_183_714
GTATTATTTAATGGCATACACTCAA
+SLXA-B3_649_FC8437_R1_1_1_183_714
YYYYYYYYYYWYYYYWYWWUWWWQQ
Using this module directly you might run:
>>> with open("Quality/solexa_example.fastq") as handle:
... for record in FastqSolexaIterator(handle):
... print("%s %s" % (record.id, record.seq))
SLXA-B3_649_FC8437_R1_1_1_610_79 GATGTGCAATACCTTTGTAGAGGAA
SLXA-B3_649_FC8437_R1_1_1_397_389 GGTTTGAGAAAGAGAAATGAGATAA
SLXA-B3_649_FC8437_R1_1_1_850_123 GAGGGTGTTGATCATGATGATGGCG
SLXA-B3_649_FC8437_R1_1_1_362_549 GGAAACAAAGTTTTTCTCAACATAG
SLXA-B3_649_FC8437_R1_1_1_183_714 GTATTATTTAATGGCATACACTCAA
Typically however, you would call this via Bio.SeqIO instead with
"fastq-solexa" as the format:
>>> from Bio import SeqIO
>>> with open("Quality/solexa_example.fastq") as handle:
... for record in SeqIO.parse(handle, "fastq-solexa"):
... print("%s %s" % (record.id, record.seq))
SLXA-B3_649_FC8437_R1_1_1_610_79 GATGTGCAATACCTTTGTAGAGGAA
SLXA-B3_649_FC8437_R1_1_1_397_389 GGTTTGAGAAAGAGAAATGAGATAA
SLXA-B3_649_FC8437_R1_1_1_850_123 GAGGGTGTTGATCATGATGATGGCG
SLXA-B3_649_FC8437_R1_1_1_362_549 GGAAACAAAGTTTTTCTCAACATAG
SLXA-B3_649_FC8437_R1_1_1_183_714 GTATTATTTAATGGCATACACTCAA
If you want to look at the qualities, they are recorded in each record's
per-letter-annotation dictionary as a simple list of integers:
>>> print(record.letter_annotations["solexa_quality"])
[25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 23, 25, 25, 25, 25, 23, 25, 23, 23, 21, 23, 23, 23, 17, 17]
These scores aren't very good, but they are high enough that they map
almost exactly onto PHRED scores:
>>> print("%0.2f" % phred_quality_from_solexa(25))
25.01
Let's look at faked example read which is even worse, where there are
more noticeable differences between the Solexa and PHRED scores::
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+slxa_0001_1_0001_01
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
Again, you would typically use Bio.SeqIO to read this file in (rather than
calling the Bio.SeqIO.QualtityIO module directly). Most FASTQ files will
contain thousands of reads, so you would normally use Bio.SeqIO.parse()
as shown above. This example has only as one entry, so instead we can
use the Bio.SeqIO.read() function:
>>> from Bio import SeqIO
>>> with open("Quality/solexa_faked.fastq") as handle:
... record = SeqIO.read(handle, "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
These quality scores are so low that when converted from the Solexa scheme
into PHRED scores they look quite different:
>>> print("%0.2f" % phred_quality_from_solexa(-1))
2.54
>>> print("%0.2f" % phred_quality_from_solexa(-5))
1.19
Note you can use the Bio.SeqIO.write() function or the SeqRecord's format
method to output the record(s):
>>> print(record.format("fastq-solexa"))
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
<BLANKLINE>
Note this output is slightly different from the input file as Biopython
has left out the optional repetition of the sequence identifier on the "+"
line. If you want the to use PHRED scores, use "fastq" or "qual" as the
output format instead, and Biopython will do the conversion for you:
>>> print(record.format("fastq"))
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+
IHGFEDCBA@?>=<;:9876543210/.-,++*)('&&%%$$##""
<BLANKLINE>
>>> print(record.format("qual"))
>slxa_0001_1_0001_01
40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21
20 19 18 17 16 15 14 13 12 11 10 10 9 8 7 6 5 5 4 4 3 3 2 2
1 1
<BLANKLINE>
As shown above, the poor quality Solexa reads have been mapped to the
equivalent PHRED score (e.g. -5 to 1 as shown earlier). | def FastqSolexaIterator(
source: _TextIOSource,
alphabet: None = None,
) -> Iterator[SeqRecord]:
r"""Parse old Solexa/Illumina FASTQ like files (which differ in the quality mapping).
The optional arguments are the same as those for the FastqPhredIterator.
For each sequence in Solexa/Illumina FASTQ files there is a matching string
encoding the Solexa integer qualities using ASCII values with an offset
of 64. Solexa scores are scaled differently to PHRED scores, and Biopython
will NOT perform any automatic conversion when loading.
NOTE - This file format is used by the OLD versions of the Solexa/Illumina
pipeline. See also the FastqIlluminaIterator function for the NEW version.
For example, consider a file containing these five records::
@SLXA-B3_649_FC8437_R1_1_1_610_79
GATGTGCAATACCTTTGTAGAGGAA
+SLXA-B3_649_FC8437_R1_1_1_610_79
YYYYYYYYYYYYYYYYYYWYWYYSU
@SLXA-B3_649_FC8437_R1_1_1_397_389
GGTTTGAGAAAGAGAAATGAGATAA
+SLXA-B3_649_FC8437_R1_1_1_397_389
YYYYYYYYYWYYYYWWYYYWYWYWW
@SLXA-B3_649_FC8437_R1_1_1_850_123
GAGGGTGTTGATCATGATGATGGCG
+SLXA-B3_649_FC8437_R1_1_1_850_123
YYYYYYYYYYYYYWYYWYYSYYYSY
@SLXA-B3_649_FC8437_R1_1_1_362_549
GGAAACAAAGTTTTTCTCAACATAG
+SLXA-B3_649_FC8437_R1_1_1_362_549
YYYYYYYYYYYYYYYYYYWWWWYWY
@SLXA-B3_649_FC8437_R1_1_1_183_714
GTATTATTTAATGGCATACACTCAA
+SLXA-B3_649_FC8437_R1_1_1_183_714
YYYYYYYYYYWYYYYWYWWUWWWQQ
Using this module directly you might run:
>>> with open("Quality/solexa_example.fastq") as handle:
... for record in FastqSolexaIterator(handle):
... print("%s %s" % (record.id, record.seq))
SLXA-B3_649_FC8437_R1_1_1_610_79 GATGTGCAATACCTTTGTAGAGGAA
SLXA-B3_649_FC8437_R1_1_1_397_389 GGTTTGAGAAAGAGAAATGAGATAA
SLXA-B3_649_FC8437_R1_1_1_850_123 GAGGGTGTTGATCATGATGATGGCG
SLXA-B3_649_FC8437_R1_1_1_362_549 GGAAACAAAGTTTTTCTCAACATAG
SLXA-B3_649_FC8437_R1_1_1_183_714 GTATTATTTAATGGCATACACTCAA
Typically however, you would call this via Bio.SeqIO instead with
"fastq-solexa" as the format:
>>> from Bio import SeqIO
>>> with open("Quality/solexa_example.fastq") as handle:
... for record in SeqIO.parse(handle, "fastq-solexa"):
... print("%s %s" % (record.id, record.seq))
SLXA-B3_649_FC8437_R1_1_1_610_79 GATGTGCAATACCTTTGTAGAGGAA
SLXA-B3_649_FC8437_R1_1_1_397_389 GGTTTGAGAAAGAGAAATGAGATAA
SLXA-B3_649_FC8437_R1_1_1_850_123 GAGGGTGTTGATCATGATGATGGCG
SLXA-B3_649_FC8437_R1_1_1_362_549 GGAAACAAAGTTTTTCTCAACATAG
SLXA-B3_649_FC8437_R1_1_1_183_714 GTATTATTTAATGGCATACACTCAA
If you want to look at the qualities, they are recorded in each record's
per-letter-annotation dictionary as a simple list of integers:
>>> print(record.letter_annotations["solexa_quality"])
[25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 23, 25, 25, 25, 25, 23, 25, 23, 23, 21, 23, 23, 23, 17, 17]
These scores aren't very good, but they are high enough that they map
almost exactly onto PHRED scores:
>>> print("%0.2f" % phred_quality_from_solexa(25))
25.01
Let's look at faked example read which is even worse, where there are
more noticeable differences between the Solexa and PHRED scores::
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+slxa_0001_1_0001_01
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
Again, you would typically use Bio.SeqIO to read this file in (rather than
calling the Bio.SeqIO.QualtityIO module directly). Most FASTQ files will
contain thousands of reads, so you would normally use Bio.SeqIO.parse()
as shown above. This example has only as one entry, so instead we can
use the Bio.SeqIO.read() function:
>>> from Bio import SeqIO
>>> with open("Quality/solexa_faked.fastq") as handle:
... record = SeqIO.read(handle, "fastq-solexa")
>>> print("%s %s" % (record.id, record.seq))
slxa_0001_1_0001_01 ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
>>> print(record.letter_annotations["solexa_quality"])
[40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]
These quality scores are so low that when converted from the Solexa scheme
into PHRED scores they look quite different:
>>> print("%0.2f" % phred_quality_from_solexa(-1))
2.54
>>> print("%0.2f" % phred_quality_from_solexa(-5))
1.19
Note you can use the Bio.SeqIO.write() function or the SeqRecord's format
method to output the record(s):
>>> print(record.format("fastq-solexa"))
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+
hgfedcba`_^]\[ZYXWVUTSRQPONMLKJIHGFEDCBA@?>=<;
<BLANKLINE>
Note this output is slightly different from the input file as Biopython
has left out the optional repetition of the sequence identifier on the "+"
line. If you want the to use PHRED scores, use "fastq" or "qual" as the
output format instead, and Biopython will do the conversion for you:
>>> print(record.format("fastq"))
@slxa_0001_1_0001_01
ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTNNNNNN
+
IHGFEDCBA@?>=<;:9876543210/.-,++*)('&&%%$$##""
<BLANKLINE>
>>> print(record.format("qual"))
>slxa_0001_1_0001_01
40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21
20 19 18 17 16 15 14 13 12 11 10 10 9 8 7 6 5 5 4 4 3 3 2 2
1 1
<BLANKLINE>
As shown above, the poor quality Solexa reads have been mapped to the
equivalent PHRED score (e.g. -5 to 1 as shown earlier).
"""
if alphabet is not None:
raise ValueError("The alphabet argument is no longer supported")
q_mapping = {
chr(letter): letter - SOLEXA_SCORE_OFFSET
for letter in range(SOLEXA_SCORE_OFFSET - 5, 63 + SOLEXA_SCORE_OFFSET)
}
for title_line, seq_string, quality_string in FastqGeneralIterator(source):
descr = title_line
id = descr.split()[0]
name = id
record = SeqRecord(Seq(seq_string), id=id, name=name, description=descr)
try:
qualities = [q_mapping[letter2] for letter2 in quality_string]
# DO NOT convert these into PHRED qualities automatically!
except KeyError:
raise ValueError("Invalid character in quality string") from None
# Dirty trick to speed up this line:
# record.letter_annotations["solexa_quality"] = qualities
dict.__setitem__(record._per_letter_annotations, "solexa_quality", qualities)
yield record |
Parse Illumina 1.3 to 1.7 FASTQ like files (which differ in the quality mapping).
The optional arguments are the same as those for the FastqPhredIterator.
For each sequence in Illumina 1.3+ FASTQ files there is a matching string
encoding PHRED integer qualities using ASCII values with an offset of 64.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/illumina_faked.fastq", "fastq-illumina")
>>> print("%s %s" % (record.id, record.seq))
Test ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTN
>>> max(record.letter_annotations["phred_quality"])
40
>>> min(record.letter_annotations["phred_quality"])
0
NOTE - Older versions of the Solexa/Illumina pipeline encoded Solexa scores
with an ASCII offset of 64. They are approximately equal but only for high
quality reads. If you have an old Solexa/Illumina file with negative
Solexa scores, and try and read this as an Illumina 1.3+ file it will fail:
>>> record2 = SeqIO.read("Quality/solexa_faked.fastq", "fastq-illumina")
Traceback (most recent call last):
...
ValueError: Invalid character in quality string
NOTE - True Sanger style FASTQ files use PHRED scores with an offset of 33. | def FastqIlluminaIterator(
source: _TextIOSource,
alphabet: None = None,
) -> Iterator[SeqRecord]:
"""Parse Illumina 1.3 to 1.7 FASTQ like files (which differ in the quality mapping).
The optional arguments are the same as those for the FastqPhredIterator.
For each sequence in Illumina 1.3+ FASTQ files there is a matching string
encoding PHRED integer qualities using ASCII values with an offset of 64.
>>> from Bio import SeqIO
>>> record = SeqIO.read("Quality/illumina_faked.fastq", "fastq-illumina")
>>> print("%s %s" % (record.id, record.seq))
Test ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTN
>>> max(record.letter_annotations["phred_quality"])
40
>>> min(record.letter_annotations["phred_quality"])
0
NOTE - Older versions of the Solexa/Illumina pipeline encoded Solexa scores
with an ASCII offset of 64. They are approximately equal but only for high
quality reads. If you have an old Solexa/Illumina file with negative
Solexa scores, and try and read this as an Illumina 1.3+ file it will fail:
>>> record2 = SeqIO.read("Quality/solexa_faked.fastq", "fastq-illumina")
Traceback (most recent call last):
...
ValueError: Invalid character in quality string
NOTE - True Sanger style FASTQ files use PHRED scores with an offset of 33.
"""
if alphabet is not None:
raise ValueError("The alphabet argument is no longer supported")
q_mapping = {
chr(letter): letter - SOLEXA_SCORE_OFFSET
for letter in range(SOLEXA_SCORE_OFFSET, 63 + SOLEXA_SCORE_OFFSET)
}
for title_line, seq_string, quality_string in FastqGeneralIterator(source):
descr = title_line
id = descr.split()[0]
name = id
record = SeqRecord(Seq(seq_string), id=id, name=name, description=descr)
try:
qualities = [q_mapping[letter2] for letter2 in quality_string]
except KeyError:
raise ValueError("Invalid character in quality string") from None
# Dirty trick to speed up this line:
# record.letter_annotations["phred_quality"] = qualities
dict.__setitem__(record._per_letter_annotations, "phred_quality", qualities)
yield record |
Turn a SeqRecord into a Sanger FASTQ formatted string.
This is used internally by the SeqRecord's .format("fastq")
method and by the SeqIO.write(..., ..., "fastq") function,
and under the format alias "fastq-sanger" as well. | def as_fastq(record: SeqRecord) -> str:
"""Turn a SeqRecord into a Sanger FASTQ formatted string.
This is used internally by the SeqRecord's .format("fastq")
method and by the SeqIO.write(..., ..., "fastq") function,
and under the format alias "fastq-sanger" as well.
"""
seq_str = _get_seq_string(record)
qualities_str = _get_sanger_quality_str(record)
if len(qualities_str) != len(seq_str):
raise ValueError(
"Record %s has sequence length %i but %i quality scores"
% (record.id, len(seq_str), len(qualities_str))
)
id_ = _clean(record.id) if record.id else ""
description = _clean(record.description)
if description and description.split(None, 1)[0] == id_:
title = description
elif description:
title = f"{id_} {description}"
else:
title = id_
return f"@{title}\n{seq_str}\n+\n{qualities_str}\n" |
Turn a SeqRecord into a QUAL formatted string.
This is used internally by the SeqRecord's .format("qual")
method and by the SeqIO.write(..., ..., "qual") function. | def as_qual(record: SeqRecord) -> str:
"""Turn a SeqRecord into a QUAL formatted string.
This is used internally by the SeqRecord's .format("qual")
method and by the SeqIO.write(..., ..., "qual") function.
"""
id_ = _clean(record.id) if record.id else ""
description = _clean(record.description)
if description and description.split(None, 1)[0] == id_:
title = description
elif description:
title = f"{id_} {description}"
else:
title = id_
lines = [f">{title}\n"]
qualities = _get_phred_quality(record)
try:
# This rounds to the nearest integer.
# TODO - can we record a float in a qual file?
qualities_strs = [("%i" % round(q, 0)) for q in qualities]
except TypeError:
if None in qualities:
raise TypeError("A quality value of None was found") from None
else:
raise
# Safe wrapping
while qualities_strs:
line = qualities_strs.pop(0)
while qualities_strs and len(line) + 1 + len(qualities_strs[0]) < 60:
line += " " + qualities_strs.pop(0)
lines.append(line + "\n")
return "".join(lines) |
Turn a SeqRecord into a Solexa FASTQ formatted string.
This is used internally by the SeqRecord's .format("fastq-solexa")
method and by the SeqIO.write(..., ..., "fastq-solexa") function. | def as_fastq_solexa(record: SeqRecord) -> str:
"""Turn a SeqRecord into a Solexa FASTQ formatted string.
This is used internally by the SeqRecord's .format("fastq-solexa")
method and by the SeqIO.write(..., ..., "fastq-solexa") function.
"""
seq_str = _get_seq_string(record)
qualities_str = _get_solexa_quality_str(record)
if len(qualities_str) != len(seq_str):
raise ValueError(
"Record %s has sequence length %i but %i quality scores"
% (record.id, len(seq_str), len(qualities_str))
)
id_ = _clean(record.id) if record.id else ""
description = _clean(record.description)
if description and description.split(None, 1)[0] == id_:
# The description includes the id at the start
title = description
elif description:
title = f"{id_} {description}"
else:
title = id_
return f"@{title}\n{seq_str}\n+\n{qualities_str}\n" |
Turn a SeqRecord into an Illumina FASTQ formatted string.
This is used internally by the SeqRecord's .format("fastq-illumina")
method and by the SeqIO.write(..., ..., "fastq-illumina") function. | def as_fastq_illumina(record: SeqRecord) -> str:
"""Turn a SeqRecord into an Illumina FASTQ formatted string.
This is used internally by the SeqRecord's .format("fastq-illumina")
method and by the SeqIO.write(..., ..., "fastq-illumina") function.
"""
seq_str = _get_seq_string(record)
qualities_str = _get_illumina_quality_str(record)
if len(qualities_str) != len(seq_str):
raise ValueError(
"Record %s has sequence length %i but %i quality scores"
% (record.id, len(seq_str), len(qualities_str))
)
id_ = _clean(record.id) if record.id else ""
description = _clean(record.description)
if description and description.split(None, 1)[0] == id_:
title = description
elif description:
title = f"{id_} {description}"
else:
title = id_
return f"@{title}\n{seq_str}\n+\n{qualities_str}\n" |
Iterate over matched FASTA and QUAL files as SeqRecord objects.
For example, consider this short QUAL file with PHRED quality scores::
>EAS54_6_R1_2_1_413_324
26 26 18 26 26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26
26 26 26 23 23
>EAS54_6_R1_2_1_540_792
26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26 26 12 26 26
26 18 26 23 18
>EAS54_6_R1_2_1_443_348
26 26 26 26 26 26 26 26 26 26 26 24 26 22 26 26 13 22 26 18
24 18 18 18 18
And a matching FASTA file::
>EAS54_6_R1_2_1_413_324
CCCTTCTTGTCTTCAGCGTTTCTCC
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
>EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
You can parse these separately using Bio.SeqIO with the "qual" and
"fasta" formats, but then you'll get a group of SeqRecord objects with
no sequence, and a matching group with the sequence but not the
qualities. Because it only deals with one input file handle, Bio.SeqIO
can't be used to read the two files together - but this function can!
For example,
>>> with open("Quality/example.fasta") as f:
... with open("Quality/example.qual") as q:
... for record in PairedFastaQualIterator(f, q):
... print("%s %s" % (record.id, record.seq))
...
EAS54_6_R1_2_1_413_324 CCCTTCTTGTCTTCAGCGTTTCTCC
EAS54_6_R1_2_1_540_792 TTGGCAGGCCAAGGCCGATGGATCA
EAS54_6_R1_2_1_443_348 GTTGCTTCTGGCGTGGGTGGGGGGG
As with the FASTQ or QUAL parsers, if you want to look at the qualities,
they are in each record's per-letter-annotation dictionary as a simple
list of integers:
>>> print(record.letter_annotations["phred_quality"])
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
If you have access to data as a FASTQ format file, using that directly
would be simpler and more straight forward. Note that you can easily use
this function to convert paired FASTA and QUAL files into FASTQ files:
>>> from Bio import SeqIO
>>> with open("Quality/example.fasta") as f:
... with open("Quality/example.qual") as q:
... SeqIO.write(PairedFastaQualIterator(f, q), "Quality/temp.fastq", "fastq")
...
3
And don't forget to clean up the temp file if you don't need it anymore:
>>> import os
>>> os.remove("Quality/temp.fastq") | def PairedFastaQualIterator(
fasta_source: _TextIOSource,
qual_source: _TextIOSource,
alphabet: None = None,
) -> Iterator[SeqRecord]:
"""Iterate over matched FASTA and QUAL files as SeqRecord objects.
For example, consider this short QUAL file with PHRED quality scores::
>EAS54_6_R1_2_1_413_324
26 26 18 26 26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26
26 26 26 23 23
>EAS54_6_R1_2_1_540_792
26 26 26 26 26 26 26 26 26 26 26 22 26 26 26 26 26 12 26 26
26 18 26 23 18
>EAS54_6_R1_2_1_443_348
26 26 26 26 26 26 26 26 26 26 26 24 26 22 26 26 13 22 26 18
24 18 18 18 18
And a matching FASTA file::
>EAS54_6_R1_2_1_413_324
CCCTTCTTGTCTTCAGCGTTTCTCC
>EAS54_6_R1_2_1_540_792
TTGGCAGGCCAAGGCCGATGGATCA
>EAS54_6_R1_2_1_443_348
GTTGCTTCTGGCGTGGGTGGGGGGG
You can parse these separately using Bio.SeqIO with the "qual" and
"fasta" formats, but then you'll get a group of SeqRecord objects with
no sequence, and a matching group with the sequence but not the
qualities. Because it only deals with one input file handle, Bio.SeqIO
can't be used to read the two files together - but this function can!
For example,
>>> with open("Quality/example.fasta") as f:
... with open("Quality/example.qual") as q:
... for record in PairedFastaQualIterator(f, q):
... print("%s %s" % (record.id, record.seq))
...
EAS54_6_R1_2_1_413_324 CCCTTCTTGTCTTCAGCGTTTCTCC
EAS54_6_R1_2_1_540_792 TTGGCAGGCCAAGGCCGATGGATCA
EAS54_6_R1_2_1_443_348 GTTGCTTCTGGCGTGGGTGGGGGGG
As with the FASTQ or QUAL parsers, if you want to look at the qualities,
they are in each record's per-letter-annotation dictionary as a simple
list of integers:
>>> print(record.letter_annotations["phred_quality"])
[26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 24, 26, 22, 26, 26, 13, 22, 26, 18, 24, 18, 18, 18, 18]
If you have access to data as a FASTQ format file, using that directly
would be simpler and more straight forward. Note that you can easily use
this function to convert paired FASTA and QUAL files into FASTQ files:
>>> from Bio import SeqIO
>>> with open("Quality/example.fasta") as f:
... with open("Quality/example.qual") as q:
... SeqIO.write(PairedFastaQualIterator(f, q), "Quality/temp.fastq", "fastq")
...
3
And don't forget to clean up the temp file if you don't need it anymore:
>>> import os
>>> os.remove("Quality/temp.fastq")
"""
if alphabet is not None:
raise ValueError("The alphabet argument is no longer supported")
from Bio.SeqIO.FastaIO import FastaIterator
fasta_iter = FastaIterator(fasta_source)
qual_iter = QualPhredIterator(qual_source)
# Using zip wouldn't load everything into memory, but also would not catch
# any extra records found in only one file.
while True:
try:
f_rec = next(fasta_iter)
except StopIteration:
f_rec = None
try:
q_rec = next(qual_iter)
except StopIteration:
q_rec = None
if f_rec is None and q_rec is None:
# End of both files
break
if f_rec is None:
raise ValueError("FASTA file has more entries than the QUAL file.")
if q_rec is None:
raise ValueError("QUAL file has more entries than the FASTA file.")
if f_rec.id != q_rec.id:
raise ValueError(
f"FASTA and QUAL entries do not match ({f_rec.id} vs {q_rec.id})."
)
if len(f_rec) != len(q_rec.letter_annotations["phred_quality"]):
raise ValueError(
f"Sequence length and number of quality scores disagree for {f_rec.id}"
)
# Merge the data....
f_rec.letter_annotations["phred_quality"] = q_rec.letter_annotations[
"phred_quality"
]
yield f_rec |
FASTQ helper function where can't have data loss by truncation (PRIVATE). | def _fastq_generic(
in_file: _TextIOSource,
out_file: _TextIOSource,
mapping: Union[Sequence[str], Mapping[int, Optional[Union[str, int]]]],
) -> int:
"""FASTQ helper function where can't have data loss by truncation (PRIVATE)."""
# For real speed, don't even make SeqRecord and Seq objects!
count = 0
null = chr(0)
with as_handle(out_file, "w") as out_handle:
for title, seq, old_qual in FastqGeneralIterator(in_file):
count += 1
# map the qual...
qual = old_qual.translate(mapping)
if null in qual:
raise ValueError("Invalid character in quality string")
out_handle.write(f"@{title}\n{seq}\n+\n{qual}\n")
return count |
FASTQ helper function where there could be data loss by truncation (PRIVATE). | def _fastq_generic2(
in_file: _TextIOSource,
out_file: _TextIOSource,
mapping: Union[Sequence[str], Mapping[int, Optional[Union[str, int]]]],
truncate_char: str,
truncate_msg: str,
) -> int:
"""FASTQ helper function where there could be data loss by truncation (PRIVATE)."""
# For real speed, don't even make SeqRecord and Seq objects!
count = 0
null = chr(0)
with as_handle(out_file, "w") as out_handle:
for title, seq, old_qual in FastqGeneralIterator(in_file):
count += 1
# map the qual...
qual = old_qual.translate(mapping)
if null in qual:
raise ValueError("Invalid character in quality string")
if truncate_char in qual:
qual = qual.replace(truncate_char, chr(126))
warnings.warn(truncate_msg, BiopythonWarning)
out_handle.write(f"@{title}\n{seq}\n+\n{qual}\n")
return count |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.