response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Iterate over a MAF file handle as MultipleSeqAlignment objects.
Iterates over lines in a MAF file-like object (handle), yielding
MultipleSeqAlignment objects. SeqRecord IDs generally correspond to
species names. | def MafIterator(handle, seq_count=None):
"""Iterate over a MAF file handle as MultipleSeqAlignment objects.
Iterates over lines in a MAF file-like object (handle), yielding
MultipleSeqAlignment objects. SeqRecord IDs generally correspond to
species names.
"""
in_a_bundle = False
annotations = []
records = []
while True:
# allows parsing of the last bundle without duplicating code
try:
line = next(handle)
except StopIteration:
line = ""
if in_a_bundle:
if line.startswith("s"):
# add a SeqRecord to the bundle
line_split = line.strip().split()
if len(line_split) != 7:
raise ValueError(
"Error parsing alignment - 's' line must have 7 fields"
)
# convert MAF-style +/- strand to biopython-type 1/-1
if line_split[4] == "+":
strand = 1
elif line_split[4] == "-":
strand = -1
else:
# TODO: issue warning, set to 0?
strand = 1
# s (literal), src (ID), start, size, strand, srcSize, text (sequence)
anno = {
"start": int(line_split[2]),
"size": int(line_split[3]),
"strand": strand,
"srcSize": int(line_split[5]),
}
sequence = line_split[6]
# interpret a dot/period to mean the same as the first sequence
if "." in sequence:
if not records:
raise ValueError(
"Found dot/period in first sequence of alignment"
)
ref = records[0].seq
new = []
for letter, ref_letter in zip(sequence, ref):
new.append(ref_letter if letter == "." else letter)
sequence = "".join(new)
records.append(
SeqRecord(
Seq(sequence),
id=line_split[1],
name=line_split[1],
description="",
annotations=anno,
)
)
elif line.startswith("i"):
# TODO: information about what is in the aligned species DNA before
# and after the immediately preceding "s" line
pass
elif line.startswith("e"):
# TODO: information about the size of the gap between the alignments
# that span the current block
pass
elif line.startswith("q"):
# TODO: quality of each aligned base for the species.
# Need to find documentation on this, looks like ASCII 0-9 or gap?
# Can then store in each SeqRecord's .letter_annotations dictionary,
# perhaps as the raw string or turned into integers / None for gap?
pass
elif line.startswith("#"):
# ignore comments
# (not sure whether comments
# are in the maf specification, though)
pass
elif not line.strip():
# end a bundle of records
if seq_count is not None:
assert len(records) == seq_count
alignment = MultipleSeqAlignment(records)
# TODO - Introduce an annotated alignment class?
# See also Bio/AlignIO/FastaIO.py for same requirement.
# For now, store the annotation a new private property:
alignment._annotations = annotations
yield alignment
in_a_bundle = False
annotations = []
records = []
else:
raise ValueError(f"Error parsing alignment - unexpected line:\n{line}")
elif line.startswith("a"):
# start a bundle of records
in_a_bundle = True
annot_strings = line.strip().split()[1:]
if len(annot_strings) != line.count("="):
raise ValueError("Error parsing alignment - invalid key in 'a' line")
annotations = dict(a_string.split("=") for a_string in annot_strings)
elif line.startswith("#"):
# ignore comments
pass
elif not line:
break |
Return (name, start, end) string tuple from an identifier (PRIVATE). | def _identifier_split(identifier):
"""Return (name, start, end) string tuple from an identifier (PRIVATE)."""
id, loc, strand = identifier.split(":")
start, end = map(int, loc.split("-"))
start -= 1
return id, start, end, strand |
Return SeqRecord objects from a Nexus file.
Thus uses the Bio.Nexus module to do the hard work.
You are expected to call this function via Bio.SeqIO or Bio.AlignIO
(and not use it directly).
NOTE - We only expect ONE alignment matrix per Nexus file,
meaning this iterator will only yield one MultipleSeqAlignment. | def NexusIterator(
handle: IO[str], seq_count: Optional[int] = None
) -> Iterator[MultipleSeqAlignment]:
"""Return SeqRecord objects from a Nexus file.
Thus uses the Bio.Nexus module to do the hard work.
You are expected to call this function via Bio.SeqIO or Bio.AlignIO
(and not use it directly).
NOTE - We only expect ONE alignment matrix per Nexus file,
meaning this iterator will only yield one MultipleSeqAlignment.
"""
n = Nexus.Nexus(handle)
if not n.matrix:
# No alignment found
return
# Bio.Nexus deals with duplicated names by adding a '.copy' suffix.
# The original names and the modified names are kept in these two lists:
assert len(n.unaltered_taxlabels) == len(n.taxlabels)
if seq_count and seq_count != len(n.unaltered_taxlabels):
raise ValueError(
"Found %i sequences, but seq_count=%i"
% (len(n.unaltered_taxlabels), seq_count)
)
# TODO - Can we extract any annotation too?
annotations: Optional[SeqRecord._AnnotationsDict]
if n.datatype in ("dna", "nucleotide"):
annotations = {"molecule_type": "DNA"}
elif n.datatype == "rna":
annotations = {"molecule_type": "RNA"}
elif n.datatype == "protein":
annotations = {"molecule_type": "protein"}
else:
annotations = None
records = (
SeqRecord(
n.matrix[new_name],
id=new_name,
name=old_name,
description="",
annotations=annotations,
)
for old_name, new_name in zip(n.unaltered_taxlabels, n.taxlabels)
)
# All done
yield MultipleSeqAlignment(records) |
Sanitise sequence identifier for output.
Removes the banned characters "[]()" and replaces the characters ":;"
with "|". The name is truncated to "width" characters if specified. | def sanitize_name(name, width=None):
"""Sanitise sequence identifier for output.
Removes the banned characters "[]()" and replaces the characters ":;"
with "|". The name is truncated to "width" characters if specified.
"""
name = name.strip()
for char in "[](),":
name = name.replace(char, "")
for char in ":;":
name = name.replace(char, "|")
if width is not None:
name = name[:width]
return name |
Write complete set of alignments to a file.
Arguments:
- alignments - A list (or iterator) of MultipleSeqAlignment objects,
or a single alignment object.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of alignments written (as an integer). | def write(alignments, handle, format):
"""Write complete set of alignments to a file.
Arguments:
- alignments - A list (or iterator) of MultipleSeqAlignment objects,
or a single alignment object.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of alignments written (as an integer).
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError(f"Format string '{format}' should be lower case")
if isinstance(alignments, MultipleSeqAlignment):
# This raised an exception in older versions of Biopython
alignments = [alignments]
with as_handle(handle, "w") as fp:
# Map the file format to a writer class
if format in _FormatToWriter:
writer_class = _FormatToWriter[format]
count = writer_class(fp).write_file(alignments)
elif format in SeqIO._FormatToWriter:
# Exploit the existing SeqIO parser to do the dirty work!
# TODO - Can we make one call to SeqIO.write() and count the alignments?
count = 0
for alignment in alignments:
if not isinstance(alignment, MultipleSeqAlignment):
raise TypeError(
"Expect a list or iterator of MultipleSeqAlignment "
"objects, got: %r" % alignment
)
SeqIO.write(alignment, fp, format)
count += 1
elif format in _FormatToIterator or format in SeqIO._FormatToIterator:
raise ValueError(f"Reading format '{format}' is supported, but not writing")
else:
raise ValueError(f"Unknown format '{format}'")
if not isinstance(count, int):
raise RuntimeError(
"Internal error - the underlying %s "
"writer should have returned the alignment count, not %r" % (format, count)
)
return count |
Use Bio.SeqIO to create an MultipleSeqAlignment iterator (PRIVATE).
Arguments:
- handle - handle to the file.
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If count is omitted (default) then all the sequences in the file are
combined into a single MultipleSeqAlignment. | def _SeqIO_to_alignment_iterator(handle, format, seq_count=None):
"""Use Bio.SeqIO to create an MultipleSeqAlignment iterator (PRIVATE).
Arguments:
- handle - handle to the file.
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If count is omitted (default) then all the sequences in the file are
combined into a single MultipleSeqAlignment.
"""
from Bio import SeqIO
if format not in SeqIO._FormatToIterator:
raise ValueError(f"Unknown format '{format}'")
if seq_count:
# Use the count to split the records into batches.
seq_record_iterator = SeqIO.parse(handle, format)
records = []
for record in seq_record_iterator:
records.append(record)
if len(records) == seq_count:
yield MultipleSeqAlignment(records)
records = []
if records:
raise ValueError("Check seq_count argument, not enough sequences?")
else:
# Must assume that there is a single alignment using all
# the SeqRecord objects:
records = list(SeqIO.parse(handle, format))
if records:
yield MultipleSeqAlignment(records) |
Iterate over an alignment file as MultipleSeqAlignment objects.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If you have the file name in a string 'filename', use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> for alignment in AlignIO.parse(filename, format):
... print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 124
Alignment of length 119
Alignment of length 120
Alignment of length 118
Alignment of length 125
If you have a string 'data' containing the file contents, use::
from Bio import AlignIO
from io import StringIO
my_iterator = AlignIO.parse(StringIO(data), format)
Use the Bio.AlignIO.read() function when you expect a single record only. | def parse(handle, format, seq_count=None):
"""Iterate over an alignment file as MultipleSeqAlignment objects.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If you have the file name in a string 'filename', use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> for alignment in AlignIO.parse(filename, format):
... print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 124
Alignment of length 119
Alignment of length 120
Alignment of length 118
Alignment of length 125
If you have a string 'data' containing the file contents, use::
from Bio import AlignIO
from io import StringIO
my_iterator = AlignIO.parse(StringIO(data), format)
Use the Bio.AlignIO.read() function when you expect a single record only.
"""
from Bio import SeqIO
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError(f"Format string '{format}' should be lower case")
if seq_count is not None and not isinstance(seq_count, int):
raise TypeError("Need integer for seq_count (sequences per alignment)")
with as_handle(handle) as fp:
# Map the file format to a sequence iterator:
if format in _FormatToIterator:
iterator_generator = _FormatToIterator[format]
i = iterator_generator(fp, seq_count)
elif format in SeqIO._FormatToIterator:
# Exploit the existing SeqIO parser to the dirty work!
i = _SeqIO_to_alignment_iterator(fp, format, seq_count=seq_count)
else:
raise ValueError(f"Unknown format '{format}'")
yield from i |
Turn an alignment file into a single MultipleSeqAlignment object.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If the handle contains no alignments, or more than one alignment,
an exception is raised. For example, using a PFAM/Stockholm file
containing one alignment:
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> format = "clustal"
>>> alignment = AlignIO.read(filename, format)
>>> print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 411
If however you want the first alignment from a file containing
multiple alignments this function would raise an exception.
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.read(filename, format)
Traceback (most recent call last):
...
ValueError: More than one record found in handle
Instead use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = next(AlignIO.parse(filename, format))
>>> print("First alignment has length %i" % alignment.get_alignment_length())
First alignment has length 124
You must use the Bio.AlignIO.parse() function if you want to read multiple
records from the handle. | def read(handle, format, seq_count=None):
"""Turn an alignment file into a single MultipleSeqAlignment object.
Arguments:
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
- seq_count - Optional integer, number of sequences expected in each
alignment. Recommended for fasta format files.
If the handle contains no alignments, or more than one alignment,
an exception is raised. For example, using a PFAM/Stockholm file
containing one alignment:
>>> from Bio import AlignIO
>>> filename = "Clustalw/protein.aln"
>>> format = "clustal"
>>> alignment = AlignIO.read(filename, format)
>>> print("Alignment of length %i" % alignment.get_alignment_length())
Alignment of length 411
If however you want the first alignment from a file containing
multiple alignments this function would raise an exception.
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = AlignIO.read(filename, format)
Traceback (most recent call last):
...
ValueError: More than one record found in handle
Instead use:
>>> from Bio import AlignIO
>>> filename = "Emboss/needle.txt"
>>> format = "emboss"
>>> alignment = next(AlignIO.parse(filename, format))
>>> print("First alignment has length %i" % alignment.get_alignment_length())
First alignment has length 124
You must use the Bio.AlignIO.parse() function if you want to read multiple
records from the handle.
"""
iterator = parse(handle, format, seq_count)
try:
alignment = next(iterator)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(iterator)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
if seq_count:
if len(alignment) != seq_count:
raise RuntimeError(
"More sequences found in alignment than specified in seq_count: %s."
% seq_count
)
return alignment |
Convert between two alignment files, returns number of alignments.
Arguments:
- in_file - an input handle or filename
- in_format - input file format, lower case string
- output - an output handle or filename
- out_file - output file format, lower case string
- molecule_type - optional molecule type to apply, string containing
"DNA", "RNA" or "protein".
**NOTE** - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even the
conversion is aborted (e.g. an invalid out_format name is given).
Some output formats require the molecule type be specified where this
cannot be determined by the parser. For example, converting to FASTA,
Clustal, or PHYLIP format to NEXUS:
>>> from io import StringIO
>>> from Bio import AlignIO
>>> handle = StringIO()
>>> AlignIO.convert("Phylip/horses.phy", "phylip", handle, "nexus", "DNA")
1
>>> print(handle.getvalue())
#NEXUS
begin data;
dimensions ntax=10 nchar=40;
format datatype=dna missing=? gap=-;
matrix
Mesohippus AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Hypohippus AAACCCCCCCAAAAAAAAACAAAAAAAAAAAAAAAAAAAA
Archaeohip CAAAAAAAAAAAAAAAACACAAAAAAAAAAAAAAAAAAAA
Parahippus CAAACAACAACAAAAAAAACAAAAAAAAAAAAAAAAAAAA
Merychippu CCAACCACCACCCCACACCCAAAAAAAAAAAAAAAAAAAA
'M. secundu' CCAACCACCACCCACACCCCAAAAAAAAAAAAAAAAAAAA
Nannipus CCAACCACAACCCCACACCCAAAAAAAAAAAAAAAAAAAA
Neohippari CCAACCCCCCCCCCACACCCAAAAAAAAAAAAAAAAAAAA
Calippus CCAACCACAACCCACACCCCAAAAAAAAAAAAAAAAAAAA
Pliohippus CCCACCCCCCCCCACACCCCAAAAAAAAAAAAAAAAAAAA
;
end;
<BLANKLINE> | def convert(in_file, in_format, out_file, out_format, molecule_type=None):
"""Convert between two alignment files, returns number of alignments.
Arguments:
- in_file - an input handle or filename
- in_format - input file format, lower case string
- output - an output handle or filename
- out_file - output file format, lower case string
- molecule_type - optional molecule type to apply, string containing
"DNA", "RNA" or "protein".
**NOTE** - If you provide an output filename, it will be opened which will
overwrite any existing file without warning. This may happen if even the
conversion is aborted (e.g. an invalid out_format name is given).
Some output formats require the molecule type be specified where this
cannot be determined by the parser. For example, converting to FASTA,
Clustal, or PHYLIP format to NEXUS:
>>> from io import StringIO
>>> from Bio import AlignIO
>>> handle = StringIO()
>>> AlignIO.convert("Phylip/horses.phy", "phylip", handle, "nexus", "DNA")
1
>>> print(handle.getvalue())
#NEXUS
begin data;
dimensions ntax=10 nchar=40;
format datatype=dna missing=? gap=-;
matrix
Mesohippus AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
Hypohippus AAACCCCCCCAAAAAAAAACAAAAAAAAAAAAAAAAAAAA
Archaeohip CAAAAAAAAAAAAAAAACACAAAAAAAAAAAAAAAAAAAA
Parahippus CAAACAACAACAAAAAAAACAAAAAAAAAAAAAAAAAAAA
Merychippu CCAACCACCACCCCACACCCAAAAAAAAAAAAAAAAAAAA
'M. secundu' CCAACCACCACCCACACCCCAAAAAAAAAAAAAAAAAAAA
Nannipus CCAACCACAACCCCACACCCAAAAAAAAAAAAAAAAAAAA
Neohippari CCAACCCCCCCCCCACACCCAAAAAAAAAAAAAAAAAAAA
Calippus CCAACCACAACCCACACCCCAAAAAAAAAAAAAAAAAAAA
Pliohippus CCCACCCCCCCCCACACCCCAAAAAAAAAAAAAAAAAAAA
;
end;
<BLANKLINE>
"""
if molecule_type:
if not isinstance(molecule_type, str):
raise TypeError(f"Molecule type should be a string, not {molecule_type!r}")
elif (
"DNA" in molecule_type
or "RNA" in molecule_type
or "protein" in molecule_type
):
pass
else:
raise ValueError(f"Unexpected molecule type, {molecule_type!r}")
# TODO - Add optimised versions of important conversions
# For now just off load the work to SeqIO parse/write
# Don't open the output file until we've checked the input is OK:
alignments = parse(in_file, in_format, None)
if molecule_type:
# Edit the records on the fly to set molecule type
def over_ride(alignment):
"""Over-ride molecule in-place."""
for record in alignment:
record.annotations["molecule_type"] = molecule_type
return alignment
alignments = (over_ride(_) for _ in alignments)
return write(alignments, out_file, out_format) |
Escape filenames with spaces by adding quotes (PRIVATE).
Note this will not add quotes if they are already included:
>>> print((_escape_filename('example with spaces')))
"example with spaces"
>>> print((_escape_filename('"example with spaces"')))
"example with spaces"
>>> print((_escape_filename(1)))
1
Note the function is more generic than the name suggests, since it
is used to add quotes around any string arguments containing spaces. | def _escape_filename(filename):
"""Escape filenames with spaces by adding quotes (PRIVATE).
Note this will not add quotes if they are already included:
>>> print((_escape_filename('example with spaces')))
"example with spaces"
>>> print((_escape_filename('"example with spaces"')))
"example with spaces"
>>> print((_escape_filename(1)))
1
Note the function is more generic than the name suggests, since it
is used to add quotes around any string arguments containing spaces.
"""
# Is adding the following helpful
# if os.path.isfile(filename):
# # On Windows, if the file exists, we can ask for
# # its alternative short name (DOS style 8.3 format)
# # which has no spaces in it. Note that this name
# # is not portable between machines, or even folder!
# try:
# import win32api
# short = win32api.GetShortPathName(filename)
# assert os.path.isfile(short)
# return short
# except ImportError:
# pass
if not isinstance(filename, str):
# for example the NCBI BLAST+ -outfmt argument can be an integer
return filename
if " " not in filename:
return filename
# We'll just quote it - works on Windows, Mac OS X etc
if filename.startswith('"') and filename.endswith('"'):
# Its already quoted
return filename
else:
return f'"{filename}"' |
Run the Bio.Application module's doctests (PRIVATE). | def _test():
"""Run the Bio.Application module's doctests (PRIVATE)."""
import doctest
doctest.testmod(verbose=1) |
Run the Bio.Blast.Applications module's doctests (PRIVATE). | def _test():
"""Run the Bio.Blast.Applications module's doctests (PRIVATE)."""
import doctest
doctest.testmod(verbose=1) |
BLAST search using NCBI's QBLAST server or a cloud service provider.
Supports all parameters of the old qblast API for Put and Get.
Please note that NCBI uses the new Common URL API for BLAST searches
on the internet (http://ncbi.github.io/blast-cloud/dev/api.html). Thus,
some of the parameters used by this function are not (or are no longer)
officially supported by NCBI. Although they are still functioning, this
may change in the future.
The Common URL API (http://ncbi.github.io/blast-cloud/dev/api.html) allows
doing BLAST searches on cloud servers. To use this feature, please set
``url_base='http://host.my.cloud.service.provider.com/cgi-bin/blast.cgi'``
and ``format_object='Alignment'``. For more details, please see
https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=CloudBlast
Some useful parameters:
- program blastn, blastp, blastx, tblastn, or tblastx (lower case)
- database Which database to search against (e.g. "nr").
- sequence The sequence to search.
- ncbi_gi TRUE/FALSE whether to give 'gi' identifier.
- descriptions Number of descriptions to show. Def 500.
- alignments Number of alignments to show. Def 500.
- expect An expect value cutoff. Def 10.0.
- matrix_name Specify an alt. matrix (PAM30, PAM70, BLOSUM80, BLOSUM45).
- filter "none" turns off filtering. Default no filtering
- format_type "HTML", "Text", "ASN.1", or "XML". Def. "XML".
- entrez_query Entrez query to limit Blast search
- hitlist_size Number of hits to return. Default 50
- megablast TRUE/FALSE whether to use MEga BLAST algorithm (blastn only)
- short_query TRUE/FALSE whether to adjust the search parameters for a
short query sequence. Note that this will override
manually set parameters like word size and e value. Turns
off when sequence length is > 30 residues. Default: None.
- service plain, psi, phi, rpsblast, megablast (lower case)
This function does no checking of the validity of the parameters
and passes the values to the server as is. More help is available at:
https://ncbi.github.io/blast-cloud/dev/api.html | def qblast(
program,
database,
sequence,
url_base=NCBI_BLAST_URL,
auto_format=None,
composition_based_statistics=None,
db_genetic_code=None,
endpoints=None,
entrez_query="(none)",
expect=10.0,
filter=None,
gapcosts=None,
genetic_code=None,
hitlist_size=50,
i_thresh=None,
layout=None,
lcase_mask=None,
matrix_name=None,
nucl_penalty=None,
nucl_reward=None,
other_advanced=None,
perc_ident=None,
phi_pattern=None,
query_file=None,
query_believe_defline=None,
query_from=None,
query_to=None,
searchsp_eff=None,
service=None,
threshold=None,
ungapped_alignment=None,
word_size=None,
short_query=None,
alignments=500,
alignment_view=None,
descriptions=500,
entrez_links_new_window=None,
expect_low=None,
expect_high=None,
format_entrez_query=None,
format_object=None,
format_type="XML",
ncbi_gi=None,
results_file=None,
show_overview=None,
megablast=None,
template_type=None,
template_length=None,
username="blast",
password=None,
):
"""BLAST search using NCBI's QBLAST server or a cloud service provider.
Supports all parameters of the old qblast API for Put and Get.
Please note that NCBI uses the new Common URL API for BLAST searches
on the internet (http://ncbi.github.io/blast-cloud/dev/api.html). Thus,
some of the parameters used by this function are not (or are no longer)
officially supported by NCBI. Although they are still functioning, this
may change in the future.
The Common URL API (http://ncbi.github.io/blast-cloud/dev/api.html) allows
doing BLAST searches on cloud servers. To use this feature, please set
``url_base='http://host.my.cloud.service.provider.com/cgi-bin/blast.cgi'``
and ``format_object='Alignment'``. For more details, please see
https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=CloudBlast
Some useful parameters:
- program blastn, blastp, blastx, tblastn, or tblastx (lower case)
- database Which database to search against (e.g. "nr").
- sequence The sequence to search.
- ncbi_gi TRUE/FALSE whether to give 'gi' identifier.
- descriptions Number of descriptions to show. Def 500.
- alignments Number of alignments to show. Def 500.
- expect An expect value cutoff. Def 10.0.
- matrix_name Specify an alt. matrix (PAM30, PAM70, BLOSUM80, BLOSUM45).
- filter "none" turns off filtering. Default no filtering
- format_type "HTML", "Text", "ASN.1", or "XML". Def. "XML".
- entrez_query Entrez query to limit Blast search
- hitlist_size Number of hits to return. Default 50
- megablast TRUE/FALSE whether to use MEga BLAST algorithm (blastn only)
- short_query TRUE/FALSE whether to adjust the search parameters for a
short query sequence. Note that this will override
manually set parameters like word size and e value. Turns
off when sequence length is > 30 residues. Default: None.
- service plain, psi, phi, rpsblast, megablast (lower case)
This function does no checking of the validity of the parameters
and passes the values to the server as is. More help is available at:
https://ncbi.github.io/blast-cloud/dev/api.html
"""
programs = ["blastn", "blastp", "blastx", "tblastn", "tblastx"]
if program not in programs:
raise ValueError(
f"Program specified is {program}. Expected one of {', '.join(programs)}"
)
# SHORT_QUERY_ADJUST throws an error when using blastn (wrong parameter
# assignment from NCBIs side).
# Thus we set the (known) parameters directly:
if short_query and program == "blastn":
short_query = None
# We only use the 'short-query' parameters for short sequences:
if len(sequence) < 31:
expect = 1000
word_size = 7
nucl_reward = 1
filter = None
lcase_mask = None
warnings.warn(
'"SHORT_QUERY_ADJUST" is incorrectly implemented (by NCBI) for blastn.'
" We bypass the problem by manually adjusting the search parameters."
" Thus, results may slightly differ from web page searches.",
BiopythonWarning,
)
# Format the "Put" command, which sends search requests to qblast.
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node5.html on 9 July 2007
# Additional parameters are taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node9.html on 8 Oct 2010
# To perform a PSI-BLAST or PHI-BLAST search the service ("Put" and "Get" commands) must be specified
# (e.g. psi_blast = NCBIWWW.qblast("blastp", "refseq_protein", input_sequence, service="psi"))
parameters = {
"AUTO_FORMAT": auto_format,
"COMPOSITION_BASED_STATISTICS": composition_based_statistics,
"DATABASE": database,
"DB_GENETIC_CODE": db_genetic_code,
"ENDPOINTS": endpoints,
"ENTREZ_QUERY": entrez_query,
"EXPECT": expect,
"FILTER": filter,
"GAPCOSTS": gapcosts,
"GENETIC_CODE": genetic_code,
"HITLIST_SIZE": hitlist_size,
"I_THRESH": i_thresh,
"LAYOUT": layout,
"LCASE_MASK": lcase_mask,
"MEGABLAST": megablast,
"MATRIX_NAME": matrix_name,
"NUCL_PENALTY": nucl_penalty,
"NUCL_REWARD": nucl_reward,
"OTHER_ADVANCED": other_advanced,
"PERC_IDENT": perc_ident,
"PHI_PATTERN": phi_pattern,
"PROGRAM": program,
# ('PSSM': pssm: - It is possible to use PSI-BLAST via this API?
"QUERY": sequence,
"QUERY_FILE": query_file,
"QUERY_BELIEVE_DEFLINE": query_believe_defline,
"QUERY_FROM": query_from,
"QUERY_TO": query_to,
# 'RESULTS_FILE': ...: - Can we use this parameter?
"SEARCHSP_EFF": searchsp_eff,
"SERVICE": service,
"SHORT_QUERY_ADJUST": short_query,
"TEMPLATE_TYPE": template_type,
"TEMPLATE_LENGTH": template_length,
"THRESHOLD": threshold,
"UNGAPPED_ALIGNMENT": ungapped_alignment,
"WORD_SIZE": word_size,
"CMD": "Put",
}
if password is not None:
# handle authentication for BLAST cloud
password_mgr = HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url_base, username, password)
handler = HTTPBasicAuthHandler(password_mgr)
opener = build_opener(handler)
install_opener(opener)
if url_base == NCBI_BLAST_URL:
parameters.update({"email": email, "tool": tool})
parameters = {key: value for key, value in parameters.items() if value is not None}
message = urlencode(parameters).encode()
request = Request(url_base, message, {"User-Agent": "BiopythonClient"})
# Send off the initial query to qblast.
# Note the NCBI do not currently impose a rate limit here, other
# than the request not to make say 50 queries at once using multiple
# threads.
handle = urlopen(request)
# Format the "Get" command, which gets the formatted results from qblast
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node6.html on 9 July 2007
rid, rtoe = _parse_qblast_ref_page(handle)
parameters = {
"ALIGNMENTS": alignments,
"ALIGNMENT_VIEW": alignment_view,
"DESCRIPTIONS": descriptions,
"ENTREZ_LINKS_NEW_WINDOW": entrez_links_new_window,
"EXPECT_LOW": expect_low,
"EXPECT_HIGH": expect_high,
"FORMAT_ENTREZ_QUERY": format_entrez_query,
"FORMAT_OBJECT": format_object,
"FORMAT_TYPE": format_type,
"NCBI_GI": ncbi_gi,
"RID": rid,
"RESULTS_FILE": results_file,
"SERVICE": service,
"SHOW_OVERVIEW": show_overview,
"CMD": "Get",
}
parameters = {key: value for key, value in parameters.items() if value is not None}
message = urlencode(parameters).encode()
# Poll NCBI until the results are ready.
# https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=DeveloperInfo
# 1. Do not contact the server more often than once every 10 seconds.
# 2. Do not poll for any single RID more often than once a minute.
# 3. Use the URL parameter email and tool, so that the NCBI
# can contact you if there is a problem.
# 4. Run scripts weekends or between 9 pm and 5 am Eastern time
# on weekdays if more than 50 searches will be submitted.
# --
# Could start with a 10s delay, but expect most short queries
# will take longer thus at least 70s with delay. Therefore,
# start with 20s delay, thereafter once a minute.
delay = 20 # seconds
start_time = time.time()
while True:
current = time.time()
wait = qblast.previous + delay - current
if wait > 0:
time.sleep(wait)
qblast.previous = current + wait
else:
qblast.previous = current
# delay by at least 60 seconds only if running the request against the public NCBI API
if delay < 60 and url_base == NCBI_BLAST_URL:
# Wasn't a quick return, must wait at least a minute
delay = 60
elapsed = time.time() - start_time
# Throw a warning if search takes longer than ten minutes
if elapsed >= 600:
warnings.warn(
f"BLAST request {rid} is taking longer than 10 minutes, consider re-issuing it",
BiopythonWarning,
)
request = Request(url_base, message, {"User-Agent": "BiopythonClient"})
handle = urlopen(request)
results = handle.read().decode()
# Can see an "\n\n" page while results are in progress,
# if so just wait a bit longer...
if results == "\n\n":
continue
# XML results don't have the Status tag when finished
if "Status=" not in results:
break
i = results.index("Status=")
j = results.index("\n", i)
status = results[i + len("Status=") : j].strip()
if status.upper() == "READY":
break
return StringIO(results) |
Extract a tuple of RID, RTOE from the 'please wait' page (PRIVATE).
The NCBI FAQ pages use TOE for 'Time of Execution', so RTOE is probably
'Request Time of Execution' and RID would be 'Request Identifier'. | def _parse_qblast_ref_page(handle):
"""Extract a tuple of RID, RTOE from the 'please wait' page (PRIVATE).
The NCBI FAQ pages use TOE for 'Time of Execution', so RTOE is probably
'Request Time of Execution' and RID would be 'Request Identifier'.
"""
s = handle.read().decode()
i = s.find("RID =")
if i == -1:
rid = None
else:
j = s.find("\n", i)
rid = s[i + len("RID =") : j].strip()
i = s.find("RTOE =")
if i == -1:
rtoe = None
else:
j = s.find("\n", i)
rtoe = s[i + len("RTOE =") : j].strip()
if not rid and not rtoe:
# Can we reliably extract the error message from the HTML page?
# e.g. "Message ID#24 Error: Failed to read the Blast query:
# Nucleotide FASTA provided for protein sequence"
# or "Message ID#32 Error: Query contains no data: Query
# contains no sequence data"
#
# This used to occur inside a <div class="error msInf"> entry:
i = s.find('<div class="error msInf">')
if i != -1:
msg = s[i + len('<div class="error msInf">') :].strip()
msg = msg.split("</div>", 1)[0].split("\n", 1)[0].strip()
if msg:
raise ValueError(f"Error message from NCBI: {msg}")
# In spring 2010 the markup was like this:
i = s.find('<p class="error">')
if i != -1:
msg = s[i + len('<p class="error">') :].strip()
msg = msg.split("</p>", 1)[0].split("\n", 1)[0].strip()
if msg:
raise ValueError(f"Error message from NCBI: {msg}")
# Generic search based on the way the error messages start:
i = s.find("Message ID#")
if i != -1:
# Break the message at the first HTML tag
msg = s[i:].split("<", 1)[0].split("\n", 1)[0].strip()
raise ValueError(f"Error message from NCBI: {msg}")
# We didn't recognise the error layout :(
# print(s)
raise ValueError(
"No RID and no RTOE found in the 'please wait' page, "
"there was probably an error in your request but we "
"could not extract a helpful error message."
)
elif not rid:
# Can this happen?
raise ValueError(
f"No RID found in the 'please wait' page. (although RTOE = {rtoe!r})"
)
elif not rtoe:
# Can this happen?
raise ValueError(
f"No RTOE found in the 'please wait' page. (although RID = {rid!r})"
)
try:
return rid, int(rtoe)
except ValueError:
raise ValueError(
f"A non-integer RTOE found in the 'please wait' page, {rtoe!r}"
) from None |
Ensure the given value formats to a string correctly. | def fmt_(value, format_spec="%s", default_str="<unknown>"):
"""Ensure the given value formats to a string correctly."""
if value is None:
return default_str
return format_spec % value |
Return a single Blast record (assumes just one query).
Uses the BlastParser internally.
This function is for use when there is one and only one BLAST
result in your XML file.
Use the Bio.Blast.NCBIXML.parse() function if you expect more than
one BLAST record (i.e. if you have more than one query sequence). | def read(handle, debug=0):
"""Return a single Blast record (assumes just one query).
Uses the BlastParser internally.
This function is for use when there is one and only one BLAST
result in your XML file.
Use the Bio.Blast.NCBIXML.parse() function if you expect more than
one BLAST record (i.e. if you have more than one query sequence).
"""
iterator = parse(handle, debug)
try:
record = next(iterator)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(iterator)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
return record |
Return an iterator a Blast record for each query.
Incremental parser, this is an iterator that returns
Blast records. It uses the BlastParser internally.
handle - file handle to and XML file to parse
debug - integer, amount of debug information to print
This is a generator function that returns multiple Blast records
objects - one for each query sequence given to blast. The file
is read incrementally, returning complete records as they are read
in.
Should cope with new BLAST 2.2.14+ which gives a single XML file
for multiple query records.
Should also cope with XML output from older versions BLAST which
gave multiple XML files concatenated together (giving a single file
which strictly speaking wasn't valid XML). | def parse(handle, debug=0):
"""Return an iterator a Blast record for each query.
Incremental parser, this is an iterator that returns
Blast records. It uses the BlastParser internally.
handle - file handle to and XML file to parse
debug - integer, amount of debug information to print
This is a generator function that returns multiple Blast records
objects - one for each query sequence given to blast. The file
is read incrementally, returning complete records as they are read
in.
Should cope with new BLAST 2.2.14+ which gives a single XML file
for multiple query records.
Should also cope with XML output from older versions BLAST which
gave multiple XML files concatenated together (giving a single file
which strictly speaking wasn't valid XML).
"""
from xml.parsers import expat
BLOCK = 1024
MARGIN = 10 # must be at least length of newline + XML start
XML_START = "<?xml"
NEW_LINE = "\n"
NULL = ""
pending = ""
text = handle.read(BLOCK)
if isinstance(text, bytes):
# Not a text handle, raw bytes mode
XML_START = b"<?xml"
NEW_LINE = b"\n"
NULL = b""
pending = b""
if not text:
# NO DATA FOUND!
raise ValueError("Your XML file was empty")
while text:
# We are now starting a new XML file
if not text.startswith(XML_START):
raise ValueError(
"Your XML file did not start with %r... but instead %r"
% (XML_START, text[:20])
)
expat_parser = expat.ParserCreate()
blast_parser = BlastParser(debug)
expat_parser.StartElementHandler = blast_parser.startElement
expat_parser.EndElementHandler = blast_parser.endElement
expat_parser.CharacterDataHandler = blast_parser.characters
expat_parser.Parse(text, False)
while blast_parser._records:
record = blast_parser._records[0]
blast_parser._records = blast_parser._records[1:]
yield record
while True:
# Read in another block of the file...
text, pending = pending + handle.read(BLOCK), ""
if not text:
# End of the file!
expat_parser.Parse(NULL, True) # End of XML record
break
# Now read a little bit more so we can check for the
# start of another XML file...
pending = handle.read(MARGIN)
if (NEW_LINE + XML_START) not in (text + pending):
# Good - still dealing with the same XML file
expat_parser.Parse(text, False)
while blast_parser._records:
yield blast_parser._records.pop(0)
else:
# This is output from pre 2.2.14 BLAST,
# one XML file for each query!
# Finish the old file:
text, pending = (text + pending).split(NEW_LINE + XML_START, 1)
pending = XML_START + pending
expat_parser.Parse(text, True) # End of XML record
while blast_parser._records:
yield blast_parser._records.pop(0)
# Now we are going to re-loop, reset the
# parsers and start reading the next XML file
text, pending = pending, NULL
break
# At this point we have finished the first XML record.
# If the file is from an old version of blast, it may
# contain more XML records (check if text=="").
assert not pending, pending
assert len(blast_parser._records) == 0, len(blast_parser._records)
# We should have finished the file!
assert not text, text
assert not pending, pending
assert len(blast_parser._records) == 0, len(blast_parser._records) |
Parse an XML file containing BLAST output and return a Bio.Blast.Records object.
This returns an iterator object; iterating over it returns Bio.Blast.Record
objects one by one.
The source can be a file stream or the path to an XML file containing the
BLAST output. If a file stream, source must be in binary mode. This allows
the parser to detect the encoding from the XML file,and to use it to convert
any text in the XML to the correct Unicode string. The qblast function in
Bio.Blast returns a file stream in binary mode. For files, please use mode
"rb" when opening the file, as in
>>> from Bio import Blast
>>> stream = open("Blast/wnts.xml", "rb") # opened in binary mode
>>> records = Blast.parse(stream)
>>> for record in records:
... print(record.query.id, record.query.description)
...
Query_1 gi|195230749:301-1383 Homo sapiens wingless-type MMTV integration site family member 2 (WNT2), transcript variant 1, mRNA
Query_2 gi|325053704:108-1166 Homo sapiens wingless-type MMTV integration site family, member 3A (WNT3A), mRNA
Query_3 gi|156630997:105-1160 Homo sapiens wingless-type MMTV integration site family, member 4 (WNT4), mRNA
Query_4 gi|371502086:108-1205 Homo sapiens wingless-type MMTV integration site family, member 5A (WNT5A), transcript variant 2, mRNA
Query_5 gi|53729353:216-1313 Homo sapiens wingless-type MMTV integration site family, member 6 (WNT6), mRNA
>>> stream.close() | def parse(source):
"""Parse an XML file containing BLAST output and return a Bio.Blast.Records object.
This returns an iterator object; iterating over it returns Bio.Blast.Record
objects one by one.
The source can be a file stream or the path to an XML file containing the
BLAST output. If a file stream, source must be in binary mode. This allows
the parser to detect the encoding from the XML file,and to use it to convert
any text in the XML to the correct Unicode string. The qblast function in
Bio.Blast returns a file stream in binary mode. For files, please use mode
"rb" when opening the file, as in
>>> from Bio import Blast
>>> stream = open("Blast/wnts.xml", "rb") # opened in binary mode
>>> records = Blast.parse(stream)
>>> for record in records:
... print(record.query.id, record.query.description)
...
Query_1 gi|195230749:301-1383 Homo sapiens wingless-type MMTV integration site family member 2 (WNT2), transcript variant 1, mRNA
Query_2 gi|325053704:108-1166 Homo sapiens wingless-type MMTV integration site family, member 3A (WNT3A), mRNA
Query_3 gi|156630997:105-1160 Homo sapiens wingless-type MMTV integration site family, member 4 (WNT4), mRNA
Query_4 gi|371502086:108-1205 Homo sapiens wingless-type MMTV integration site family, member 5A (WNT5A), transcript variant 2, mRNA
Query_5 gi|53729353:216-1313 Homo sapiens wingless-type MMTV integration site family, member 6 (WNT6), mRNA
>>> stream.close()
"""
return Records(source) |
Parse an XML file containing BLAST output for a single query and return it.
Internally, this function uses Bio.Blast.parse to obtain an iterator over
BLAST records. The function then reads one record from the iterator,
ensures that there are no further records, and returns the record it found
as a Bio.Blast.Record object. An exception is raised if no records are
found, or more than one record is found.
The source can be a file stream or the path to an XML file containing the
BLAST output. If a file stream, source must be in binary mode. This allows
the parser to detect the encoding from the XML file,and to use it to convert
any text in the XML to the correct Unicode string. The qblast function in
Bio.Blast returns a file stream in binary mode. For files, please use mode
"rb" when opening the file, as in
>>> from Bio import Blast
>>> stream = open("Blast/xml_21500_blastn_001.xml", "rb") # opened in binary mode
>>> record = Blast.read(stream)
>>> record.query.id
'Query_78041'
>>> record.query.description
'G26684.1 human STS STS_D11570, sequence tagged site'
>>> len(record)
11
>>> stream.close()
Use the Bio.Blast.parse function if you want to read a file containing
BLAST output for more than one query. | def read(source):
"""Parse an XML file containing BLAST output for a single query and return it.
Internally, this function uses Bio.Blast.parse to obtain an iterator over
BLAST records. The function then reads one record from the iterator,
ensures that there are no further records, and returns the record it found
as a Bio.Blast.Record object. An exception is raised if no records are
found, or more than one record is found.
The source can be a file stream or the path to an XML file containing the
BLAST output. If a file stream, source must be in binary mode. This allows
the parser to detect the encoding from the XML file,and to use it to convert
any text in the XML to the correct Unicode string. The qblast function in
Bio.Blast returns a file stream in binary mode. For files, please use mode
"rb" when opening the file, as in
>>> from Bio import Blast
>>> stream = open("Blast/xml_21500_blastn_001.xml", "rb") # opened in binary mode
>>> record = Blast.read(stream)
>>> record.query.id
'Query_78041'
>>> record.query.description
'G26684.1 human STS STS_D11570, sequence tagged site'
>>> len(record)
11
>>> stream.close()
Use the Bio.Blast.parse function if you want to read a file containing
BLAST output for more than one query.
"""
with parse(source) as records:
try:
record = next(records)
except StopIteration:
raise ValueError("No BLAST output found.") from None
try:
next(records)
raise ValueError("BLAST output for more than one query found.")
except StopIteration:
pass
for key in ("source", "program", "version", "reference", "db", "param", "mbstat"):
try:
value = getattr(records, key)
except AttributeError:
pass
else:
setattr(record, key, value)
return record |
Write BLAST records as an XML file, and return the number of records.
Arguments:
- records - A ``Bio.Blast.Records`` object.
- destination - File or file-like object to write to, or filename as
string.
The File object must have been opened for writing in
binary mode, and must be closed (or flushed) by the caller
after this function returns to ensure that all records are
written.
- fmt - string describing the file format to write
(case-insensitive).
Currently, only "XML" and "XML2" are accepted.
Returns the number of records written (as an integer). | def write(records, destination, fmt="XML"):
"""Write BLAST records as an XML file, and return the number of records.
Arguments:
- records - A ``Bio.Blast.Records`` object.
- destination - File or file-like object to write to, or filename as
string.
The File object must have been opened for writing in
binary mode, and must be closed (or flushed) by the caller
after this function returns to ensure that all records are
written.
- fmt - string describing the file format to write
(case-insensitive).
Currently, only "XML" and "XML2" are accepted.
Returns the number of records written (as an integer).
"""
fmt = fmt.upper()
if fmt == "XML":
Writer = _writers.XMLWriter
elif fmt == "XML2":
Writer = _writers.XML2Writer
else:
raise ValueError(f"Unknown format {fmt}; expected 'XML' or 'XML2'")
try:
stream = open(destination, "wb")
except TypeError: # not a path, assume we received a stream
try:
destination.write(b"")
except TypeError:
# destination was opened in text mode
raise StreamModeError(
"File must be opened in binary mode for writing."
) from None
stream = destination
writer = Writer(stream)
try:
count = writer.write(records)
finally:
if stream is not destination:
stream.close()
return count |
BLAST search using NCBI's QBLAST server or a cloud service provider.
Supports all parameters of the old qblast API for Put and Get.
Please note that NCBI uses the new Common URL API for BLAST searches
on the internet (http://ncbi.github.io/blast-cloud/dev/api.html). Thus,
some of the parameters used by this function are not (or are no longer)
officially supported by NCBI. Although they are still functioning, this
may change in the future.
The Common URL API (http://ncbi.github.io/blast-cloud/dev/api.html) allows
doing BLAST searches on cloud servers. To use this feature, please set
``url_base='http://host.my.cloud.service.provider.com/cgi-bin/blast.cgi'``
and ``format_object='Alignment'``. For more details, please see
https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=CloudBlast
Some useful parameters:
- program blastn, blastp, blastx, tblastn, or tblastx (lower case)
- database Which database to search against (e.g. "nr").
- sequence The sequence to search.
- ncbi_gi TRUE/FALSE whether to give 'gi' identifier.
- descriptions Number of descriptions to show. Def 500.
- alignments Number of alignments to show. Def 500.
- expect An expect value cutoff. Def 10.0.
- matrix_name Specify an alt. matrix (PAM30, PAM70, BLOSUM80, BLOSUM45).
- filter "none" turns off filtering. Default no filtering
- format_type "XML" (default), "HTML", "Text", "XML2", "JSON2",
or "Tabular".
- entrez_query Entrez query to limit Blast search
- hitlist_size Number of hits to return. Default 50
- megablast TRUE/FALSE whether to use MEga BLAST algorithm (blastn only)
- short_query TRUE/FALSE whether to adjust the search parameters for a
short query sequence. Note that this will override
manually set parameters like word size and e value. Turns
off when sequence length is > 30 residues. Default: None.
- service plain, psi, phi, rpsblast, megablast (lower case)
This function does no checking of the validity of the parameters
and passes the values to the server as is. More help is available at:
https://ncbi.github.io/blast-cloud/dev/api.html | def qblast(
program,
database,
sequence,
url_base=NCBI_BLAST_URL,
auto_format=None,
composition_based_statistics=None,
db_genetic_code=None,
endpoints=None,
entrez_query="(none)",
expect=10.0,
filter=None,
gapcosts=None,
genetic_code=None,
hitlist_size=50,
i_thresh=None,
layout=None,
lcase_mask=None,
matrix_name=None,
nucl_penalty=None,
nucl_reward=None,
other_advanced=None,
perc_ident=None,
phi_pattern=None,
query_file=None,
query_believe_defline=None,
query_from=None,
query_to=None,
searchsp_eff=None,
service=None,
threshold=None,
ungapped_alignment=None,
word_size=None,
short_query=None,
alignments=500,
alignment_view=None,
descriptions=500,
entrez_links_new_window=None,
expect_low=None,
expect_high=None,
format_entrez_query=None,
format_object=None,
format_type="XML",
ncbi_gi=None,
results_file=None,
show_overview=None,
megablast=None,
template_type=None,
template_length=None,
username="blast",
password=None,
):
"""BLAST search using NCBI's QBLAST server or a cloud service provider.
Supports all parameters of the old qblast API for Put and Get.
Please note that NCBI uses the new Common URL API for BLAST searches
on the internet (http://ncbi.github.io/blast-cloud/dev/api.html). Thus,
some of the parameters used by this function are not (or are no longer)
officially supported by NCBI. Although they are still functioning, this
may change in the future.
The Common URL API (http://ncbi.github.io/blast-cloud/dev/api.html) allows
doing BLAST searches on cloud servers. To use this feature, please set
``url_base='http://host.my.cloud.service.provider.com/cgi-bin/blast.cgi'``
and ``format_object='Alignment'``. For more details, please see
https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=CloudBlast
Some useful parameters:
- program blastn, blastp, blastx, tblastn, or tblastx (lower case)
- database Which database to search against (e.g. "nr").
- sequence The sequence to search.
- ncbi_gi TRUE/FALSE whether to give 'gi' identifier.
- descriptions Number of descriptions to show. Def 500.
- alignments Number of alignments to show. Def 500.
- expect An expect value cutoff. Def 10.0.
- matrix_name Specify an alt. matrix (PAM30, PAM70, BLOSUM80, BLOSUM45).
- filter "none" turns off filtering. Default no filtering
- format_type "XML" (default), "HTML", "Text", "XML2", "JSON2",
or "Tabular".
- entrez_query Entrez query to limit Blast search
- hitlist_size Number of hits to return. Default 50
- megablast TRUE/FALSE whether to use MEga BLAST algorithm (blastn only)
- short_query TRUE/FALSE whether to adjust the search parameters for a
short query sequence. Note that this will override
manually set parameters like word size and e value. Turns
off when sequence length is > 30 residues. Default: None.
- service plain, psi, phi, rpsblast, megablast (lower case)
This function does no checking of the validity of the parameters
and passes the values to the server as is. More help is available at:
https://ncbi.github.io/blast-cloud/dev/api.html
"""
programs = ["blastn", "blastp", "blastx", "tblastn", "tblastx"]
if program not in programs:
raise ValueError(
f"Program specified is {program}. Expected one of {', '.join(programs)}"
)
# SHORT_QUERY_ADJUST throws an error when using blastn (wrong parameter
# assignment from NCBIs side).
# Thus we set the (known) parameters directly:
if short_query and program == "blastn":
short_query = None
# We only use the 'short-query' parameters for short sequences:
if len(sequence) < 31:
expect = 1000
word_size = 7
nucl_reward = 1
filter = None
lcase_mask = None
warnings.warn(
'"SHORT_QUERY_ADJUST" is incorrectly implemented (by NCBI) for blastn.'
" We bypass the problem by manually adjusting the search parameters."
" Thus, results may slightly differ from web page searches.",
BiopythonWarning,
)
# Format the "Put" command, which sends search requests to qblast.
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node5.html on 9 July 2007
# Additional parameters are taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node9.html on 8 Oct 2010
# To perform a PSI-BLAST or PHI-BLAST search the service ("Put" and "Get" commands) must be specified
# (e.g. psi_blast = NCBIWWW.qblast("blastp", "refseq_protein", input_sequence, service="psi"))
parameters = {
"AUTO_FORMAT": auto_format,
"COMPOSITION_BASED_STATISTICS": composition_based_statistics,
"DATABASE": database,
"DB_GENETIC_CODE": db_genetic_code,
"ENDPOINTS": endpoints,
"ENTREZ_QUERY": entrez_query,
"EXPECT": expect,
"FILTER": filter,
"GAPCOSTS": gapcosts,
"GENETIC_CODE": genetic_code,
"HITLIST_SIZE": hitlist_size,
"I_THRESH": i_thresh,
"LAYOUT": layout,
"LCASE_MASK": lcase_mask,
"MEGABLAST": megablast,
"MATRIX_NAME": matrix_name,
"NUCL_PENALTY": nucl_penalty,
"NUCL_REWARD": nucl_reward,
"OTHER_ADVANCED": other_advanced,
"PERC_IDENT": perc_ident,
"PHI_PATTERN": phi_pattern,
"PROGRAM": program,
# ('PSSM': pssm: - It is possible to use PSI-BLAST via this API?
"QUERY": sequence,
"QUERY_FILE": query_file,
"QUERY_BELIEVE_DEFLINE": query_believe_defline,
"QUERY_FROM": query_from,
"QUERY_TO": query_to,
# 'RESULTS_FILE': ...: - Can we use this parameter?
"SEARCHSP_EFF": searchsp_eff,
"SERVICE": service,
"SHORT_QUERY_ADJUST": short_query,
"TEMPLATE_TYPE": template_type,
"TEMPLATE_LENGTH": template_length,
"THRESHOLD": threshold,
"UNGAPPED_ALIGNMENT": ungapped_alignment,
"WORD_SIZE": word_size,
"CMD": "Put",
}
if password is not None:
# handle authentication for BLAST cloud
password_mgr = HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, url_base, username, password)
handler = HTTPBasicAuthHandler(password_mgr)
opener = build_opener(handler)
install_opener(opener)
if url_base == NCBI_BLAST_URL:
parameters.update({"email": email, "tool": tool})
parameters = {key: value for key, value in parameters.items() if value is not None}
message = urlencode(parameters).encode()
request = Request(url_base, message, {"User-Agent": "BiopythonClient"})
# Send off the initial query to qblast.
# Note the NCBI do not currently impose a rate limit here, other
# than the request not to make say 50 queries at once using multiple
# threads.
stream = urlopen(request)
# Format the "Get" command, which gets the formatted results from qblast
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node6.html on 9 July 2007
rid, rtoe = _parse_qblast_ref_page(stream)
parameters = {
"ALIGNMENTS": alignments,
"ALIGNMENT_VIEW": alignment_view,
"DESCRIPTIONS": descriptions,
"ENTREZ_LINKS_NEW_WINDOW": entrez_links_new_window,
"EXPECT_LOW": expect_low,
"EXPECT_HIGH": expect_high,
"FORMAT_ENTREZ_QUERY": format_entrez_query,
"FORMAT_OBJECT": format_object,
"FORMAT_TYPE": format_type,
"NCBI_GI": ncbi_gi,
"RID": rid,
"RESULTS_FILE": results_file,
"SERVICE": service,
"SHOW_OVERVIEW": show_overview,
"CMD": "Get",
}
parameters = {key: value for key, value in parameters.items() if value is not None}
message = urlencode(parameters).encode()
# Poll NCBI until the results are ready.
# https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=DeveloperInfo
# 1. Do not contact the server more often than once every 10 seconds.
# 2. Do not poll for any single RID more often than once a minute.
# 3. Use the URL parameter email and tool, so that the NCBI
# can contact you if there is a problem.
# 4. Run scripts weekends or between 9 pm and 5 am Eastern time
# on weekdays if more than 50 searches will be submitted.
# --
# Could start with a 10s delay, but expect most short queries
# will take longer thus at least 70s with delay. Therefore,
# start with 20s delay, thereafter once a minute.
delay = 20 # seconds
start_time = time.time()
while True:
current = time.time()
wait = qblast.previous + delay - current
if wait > 0:
time.sleep(wait)
qblast.previous = current + wait
else:
qblast.previous = current
# delay by at least 60 seconds only if running the request against the public NCBI API
if delay < 60 and url_base == NCBI_BLAST_URL:
# Wasn't a quick return, must wait at least a minute
delay = 60
elapsed = time.time() - start_time
if elapsed >= 600:
warnings.warn(
f"BLAST request {rid} is taking longer than 10 minutes, consider re-issuing it",
BiopythonWarning,
)
request = Request(url_base, message, {"User-Agent": "BiopythonClient"})
stream = urlopen(request)
data = stream.peek()
if format_type == "HTML" and b"<title>NCBI Blast:</title>" in data:
continue
elif data.startswith(b"<!DOCTYPE html"):
continue
else:
break
if format_type == "XML":
assert data.startswith(b"<?xml ")
elif format_type == "HTML":
assert data.startswith(b"<!DOCTYPE html ")
elif format_type in ("Text", "Tabular"):
assert data.startswith(b"<p><!--\nQBlastInfoBegin")
elif format_type in ("XML2", "JSON2"):
assert data.startswith(b"PK\x03\x04") # zipped file
return stream |
Extract a tuple of RID, RTOE from the 'please wait' page (PRIVATE).
The NCBI FAQ pages use TOE for 'Time of Execution', so RTOE is probably
'Request Time of Execution' and RID would be 'Request Identifier'. | def _parse_qblast_ref_page(handle):
"""Extract a tuple of RID, RTOE from the 'please wait' page (PRIVATE).
The NCBI FAQ pages use TOE for 'Time of Execution', so RTOE is probably
'Request Time of Execution' and RID would be 'Request Identifier'.
"""
s = handle.read().decode()
i = s.find("RID =")
if i == -1:
rid = None
else:
j = s.find("\n", i)
rid = s[i + len("RID =") : j].strip()
i = s.find("RTOE =")
if i == -1:
rtoe = None
else:
j = s.find("\n", i)
rtoe = s[i + len("RTOE =") : j].strip()
if not rid and not rtoe:
# Can we reliably extract the error message from the HTML page?
# e.g. "Message ID#24 Error: Failed to read the Blast query:
# Nucleotide FASTA provided for protein sequence"
# or "Message ID#32 Error: Query contains no data: Query
# contains no sequence data"
#
# This used to occur inside a <div class="error msInf"> entry:
i = s.find('<div class="error msInf">')
if i != -1:
msg = s[i + len('<div class="error msInf">') :].strip()
msg = msg.split("</div>", 1)[0].split("\n", 1)[0].strip()
if msg:
raise ValueError(f"Error message from NCBI: {msg}")
# In spring 2010 the markup was like this:
i = s.find('<p class="error">')
if i != -1:
msg = s[i + len('<p class="error">') :].strip()
msg = msg.split("</p>", 1)[0].split("\n", 1)[0].strip()
if msg:
raise ValueError(f"Error message from NCBI: {msg}")
# Generic search based on the way the error messages start:
i = s.find("Message ID#")
if i != -1:
# Break the message at the first HTML tag
msg = s[i:].split("<", 1)[0].split("\n", 1)[0].strip()
raise ValueError(f"Error message from NCBI: {msg}")
# We didn't recognise the error layout :(
# print(s)
raise ValueError(
"No RID and no RTOE found in the 'please wait' page, "
"there was probably an error in your request but we "
"could not extract a helpful error message."
)
elif not rid:
# Can this happen?
raise ValueError(
f"No RID found in the 'please wait' page. (although RTOE = {rtoe!r})"
)
elif not rtoe:
# Can this happen?
raise ValueError(
f"No RTOE found in the 'please wait' page. (although RID = {rid!r})"
)
try:
return rid, int(rtoe)
except ValueError:
raise ValueError(
f"A non-integer RTOE found in the 'please wait' page, {rtoe!r}"
) from None |
Perform k-means clustering.
This function performs k-means clustering on the values in data, and
returns the cluster assignments, the within-cluster sum of distances
of the optimal k-means clustering solution, and the number of times
the optimal solution was found.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- nclusters: number of clusters (the 'k' in k-means).
- mask: nrows x ncolumns array of integers, showing which data
are missing. If mask[i,j]==0, then data[i,j] is missing.
- weight: the weights to be used when calculating distances
- transpose:
- if False: rows are clustered;
- if True: columns are clustered.
- npass: number of times the k-means clustering algorithm is
performed, each time with a different (random) initial
condition.
- method: specifies how the center of a cluster is found:
- method == 'a': arithmetic mean;
- method == 'm': median.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance;
- dist == 'b': City Block distance;
- dist == 'c': Pearson correlation;
- dist == 'a': absolute value of the correlation;
- dist == 'u': uncentered correlation;
- dist == 'x': absolute uncentered correlation;
- dist == 's': Spearman's rank correlation;
- dist == 'k': Kendall's tau.
- initialid: the initial clustering from which the algorithm
should start.
If initialid is None, the routine carries out npass
repetitions of the EM algorithm, each time starting from a
different random initial clustering. If initialid is given,
the routine carries out the EM algorithm only once, starting
from the given initial clustering and without randomizing the
order in which items are assigned to clusters (i.e., using
the same order as in the data matrix). In that case, the
k-means algorithm is fully deterministic.
Return values:
- clusterid: array containing the index of the cluster to which each
item was assigned in the best k-means clustering solution that was
found in the npass runs;
- error: the within-cluster sum of distances for the returned k-means
clustering solution;
- nfound: the number of times this solution was found. | def kcluster(
data,
nclusters=2,
mask=None,
weight=None,
transpose=False,
npass=1,
method="a",
dist="e",
initialid=None,
):
"""Perform k-means clustering.
This function performs k-means clustering on the values in data, and
returns the cluster assignments, the within-cluster sum of distances
of the optimal k-means clustering solution, and the number of times
the optimal solution was found.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- nclusters: number of clusters (the 'k' in k-means).
- mask: nrows x ncolumns array of integers, showing which data
are missing. If mask[i,j]==0, then data[i,j] is missing.
- weight: the weights to be used when calculating distances
- transpose:
- if False: rows are clustered;
- if True: columns are clustered.
- npass: number of times the k-means clustering algorithm is
performed, each time with a different (random) initial
condition.
- method: specifies how the center of a cluster is found:
- method == 'a': arithmetic mean;
- method == 'm': median.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance;
- dist == 'b': City Block distance;
- dist == 'c': Pearson correlation;
- dist == 'a': absolute value of the correlation;
- dist == 'u': uncentered correlation;
- dist == 'x': absolute uncentered correlation;
- dist == 's': Spearman's rank correlation;
- dist == 'k': Kendall's tau.
- initialid: the initial clustering from which the algorithm
should start.
If initialid is None, the routine carries out npass
repetitions of the EM algorithm, each time starting from a
different random initial clustering. If initialid is given,
the routine carries out the EM algorithm only once, starting
from the given initial clustering and without randomizing the
order in which items are assigned to clusters (i.e., using
the same order as in the data matrix). In that case, the
k-means algorithm is fully deterministic.
Return values:
- clusterid: array containing the index of the cluster to which each
item was assigned in the best k-means clustering solution that was
found in the npass runs;
- error: the within-cluster sum of distances for the returned k-means
clustering solution;
- nfound: the number of times this solution was found.
"""
data = __check_data(data)
shape = data.shape
if transpose:
ndata, nitems = shape
else:
nitems, ndata = shape
mask = __check_mask(mask, shape)
weight = __check_weight(weight, ndata)
clusterid, npass = __check_initialid(initialid, npass, nitems)
error, nfound = _cluster.kcluster(
data, nclusters, mask, weight, transpose, npass, method, dist, clusterid
)
return clusterid, error, nfound |
Perform k-medoids clustering.
This function performs k-medoids clustering, and returns the cluster
assignments, the within-cluster sum of distances of the optimal
k-medoids clustering solution, and the number of times the optimal
solution was found.
Keyword arguments:
- distance: The distance matrix between the items. There are three
ways in which you can pass a distance matrix:
1. a 2D NumPy array (in which only the left-lower part of the array
will be accessed);
2. a 1D NumPy array containing the distances consecutively;
3. a list of rows containing the lower-triangular part of
the distance matrix.
Examples are:
>>> from numpy import array
>>> # option 1:
>>> distance = array([[0.0, 1.1, 2.3],
... [1.1, 0.0, 4.5],
... [2.3, 4.5, 0.0]])
>>> # option 2:
>>> distance = array([1.1, 2.3, 4.5])
>>> # option 3:
>>> distance = [array([]),
... array([1.1]),
... array([2.3, 4.5])]
These three correspond to the same distance matrix.
- nclusters: number of clusters (the 'k' in k-medoids)
- npass: the number of times the k-medoids clustering algorithm
is performed, each time with a different (random) initial
condition.
- initialid: the initial clustering from which the algorithm should start.
If initialid is not given, the routine carries out npass
repetitions of the EM algorithm, each time starting from a
different random initial clustering. If initialid is given,
the routine carries out the EM algorithm only once, starting
from the initial clustering specified by initialid and
without randomizing the order in which items are assigned to
clusters (i.e., using the same order as in the data matrix).
In that case, the k-medoids algorithm is fully deterministic.
Return values:
- clusterid: array containing the index of the cluster to which each
item was assigned in the best k-medoids clustering solution that was
found in the npass runs; note that the index of a cluster is the index
of the item that is the medoid of the cluster;
- error: the within-cluster sum of distances for the returned k-medoids
clustering solution;
- nfound: the number of times this solution was found. | def kmedoids(distance, nclusters=2, npass=1, initialid=None):
"""Perform k-medoids clustering.
This function performs k-medoids clustering, and returns the cluster
assignments, the within-cluster sum of distances of the optimal
k-medoids clustering solution, and the number of times the optimal
solution was found.
Keyword arguments:
- distance: The distance matrix between the items. There are three
ways in which you can pass a distance matrix:
1. a 2D NumPy array (in which only the left-lower part of the array
will be accessed);
2. a 1D NumPy array containing the distances consecutively;
3. a list of rows containing the lower-triangular part of
the distance matrix.
Examples are:
>>> from numpy import array
>>> # option 1:
>>> distance = array([[0.0, 1.1, 2.3],
... [1.1, 0.0, 4.5],
... [2.3, 4.5, 0.0]])
>>> # option 2:
>>> distance = array([1.1, 2.3, 4.5])
>>> # option 3:
>>> distance = [array([]),
... array([1.1]),
... array([2.3, 4.5])]
These three correspond to the same distance matrix.
- nclusters: number of clusters (the 'k' in k-medoids)
- npass: the number of times the k-medoids clustering algorithm
is performed, each time with a different (random) initial
condition.
- initialid: the initial clustering from which the algorithm should start.
If initialid is not given, the routine carries out npass
repetitions of the EM algorithm, each time starting from a
different random initial clustering. If initialid is given,
the routine carries out the EM algorithm only once, starting
from the initial clustering specified by initialid and
without randomizing the order in which items are assigned to
clusters (i.e., using the same order as in the data matrix).
In that case, the k-medoids algorithm is fully deterministic.
Return values:
- clusterid: array containing the index of the cluster to which each
item was assigned in the best k-medoids clustering solution that was
found in the npass runs; note that the index of a cluster is the index
of the item that is the medoid of the cluster;
- error: the within-cluster sum of distances for the returned k-medoids
clustering solution;
- nfound: the number of times this solution was found.
"""
distance = __check_distancematrix(distance)
nitems = len(distance)
clusterid, npass = __check_initialid(initialid, npass, nitems)
error, nfound = _cluster.kmedoids(distance, nclusters, npass, clusterid)
return clusterid, error, nfound |
Perform hierarchical clustering, and return a Tree object.
This function implements the pairwise single, complete, centroid, and
average linkage hierarchical clustering methods.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i][j]==0, then data[i][j] is missing.
- weight: the weights to be used when calculating distances.
- transpose:
- if False, rows are clustered;
- if True, columns are clustered.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
- method: specifies which linkage method is used:
- method == 's': Single pairwise linkage
- method == 'm': Complete (maximum) pairwise linkage (default)
- method == 'c': Centroid linkage
- method == 'a': Average pairwise linkage
- distancematrix: The distance matrix between the items. There are
three ways in which you can pass a distance matrix:
1. a 2D NumPy array (in which only the left-lower part of the array
will be accessed);
2. a 1D NumPy array containing the distances consecutively;
3. a list of rows containing the lower-triangular part of
the distance matrix.
Examples are:
>>> from numpy import array
>>> # option 1:
>>> distance = array([[0.0, 1.1, 2.3],
... [1.1, 0.0, 4.5],
... [2.3, 4.5, 0.0]])
>>> # option 2:
>>> distance = array([1.1, 2.3, 4.5])
>>> # option 3:
>>> distance = [array([]),
... array([1.1]),
... array([2.3, 4.5])]
These three correspond to the same distance matrix.
PLEASE NOTE:
As the treecluster routine may shuffle the values in the
distance matrix as part of the clustering algorithm, be sure
to save this array in a different variable before calling
treecluster if you need it later.
Either data or distancematrix should be None. If distancematrix is None,
the hierarchical clustering solution is calculated from the values stored
in the argument data. If data is None, the hierarchical clustering solution
is instead calculated from the distance matrix. Pairwise centroid-linkage
clustering can be performed only from the data values and not from the
distance matrix. Pairwise single-, maximum-, and average-linkage clustering
can be calculated from the data values or from the distance matrix.
Return value:
treecluster returns a Tree object describing the hierarchical clustering
result. See the description of the Tree class for more information. | def treecluster(
data,
mask=None,
weight=None,
transpose=False,
method="m",
dist="e",
distancematrix=None,
):
"""Perform hierarchical clustering, and return a Tree object.
This function implements the pairwise single, complete, centroid, and
average linkage hierarchical clustering methods.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i][j]==0, then data[i][j] is missing.
- weight: the weights to be used when calculating distances.
- transpose:
- if False, rows are clustered;
- if True, columns are clustered.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
- method: specifies which linkage method is used:
- method == 's': Single pairwise linkage
- method == 'm': Complete (maximum) pairwise linkage (default)
- method == 'c': Centroid linkage
- method == 'a': Average pairwise linkage
- distancematrix: The distance matrix between the items. There are
three ways in which you can pass a distance matrix:
1. a 2D NumPy array (in which only the left-lower part of the array
will be accessed);
2. a 1D NumPy array containing the distances consecutively;
3. a list of rows containing the lower-triangular part of
the distance matrix.
Examples are:
>>> from numpy import array
>>> # option 1:
>>> distance = array([[0.0, 1.1, 2.3],
... [1.1, 0.0, 4.5],
... [2.3, 4.5, 0.0]])
>>> # option 2:
>>> distance = array([1.1, 2.3, 4.5])
>>> # option 3:
>>> distance = [array([]),
... array([1.1]),
... array([2.3, 4.5])]
These three correspond to the same distance matrix.
PLEASE NOTE:
As the treecluster routine may shuffle the values in the
distance matrix as part of the clustering algorithm, be sure
to save this array in a different variable before calling
treecluster if you need it later.
Either data or distancematrix should be None. If distancematrix is None,
the hierarchical clustering solution is calculated from the values stored
in the argument data. If data is None, the hierarchical clustering solution
is instead calculated from the distance matrix. Pairwise centroid-linkage
clustering can be performed only from the data values and not from the
distance matrix. Pairwise single-, maximum-, and average-linkage clustering
can be calculated from the data values or from the distance matrix.
Return value:
treecluster returns a Tree object describing the hierarchical clustering
result. See the description of the Tree class for more information.
"""
if data is None and distancematrix is None:
raise ValueError("use either data or distancematrix")
if data is not None and distancematrix is not None:
raise ValueError("use either data or distancematrix; do not use both")
if data is not None:
data = __check_data(data)
shape = data.shape
ndata = shape[0] if transpose else shape[1]
mask = __check_mask(mask, shape)
weight = __check_weight(weight, ndata)
if distancematrix is not None:
distancematrix = __check_distancematrix(distancematrix)
if mask is not None:
raise ValueError("mask is ignored if distancematrix is used")
if weight is not None:
raise ValueError("weight is ignored if distancematrix is used")
tree = Tree()
_cluster.treecluster(
tree, data, mask, weight, transpose, method, dist, distancematrix
)
return tree |
Calculate a Self-Organizing Map.
This function implements a Self-Organizing Map on a rectangular grid.
Keyword arguments:
- data: nrows x ncolumns array containing the data values;
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i][j]==0, then data[i][j] is missing.
- weight: the weights to be used when calculating distances
- transpose:
- if False: rows are clustered;
- if True: columns are clustered.
- nxgrid: the horizontal dimension of the rectangular SOM map
- nygrid: the vertical dimension of the rectangular SOM map
- inittau: the initial value of tau (the neighborbood function)
- niter: the number of iterations
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
Return values:
- clusterid: array with two columns, with the number of rows equal to
the items that are being clustered. Each row in the array contains
the x and y coordinates of the cell in the rectangular SOM grid to
which the item was assigned.
- celldata: an array with dimensions [nxgrid, nygrid, number of columns]
if rows are being clustered, or [nxgrid, nygrid, number of rows) if
columns are being clustered.
Each element [ix, iy] of this array is a 1D vector containing the
data values for the centroid of the cluster in the SOM grid cell
with coordinates [ix, iy]. | def somcluster(
data,
mask=None,
weight=None,
transpose=False,
nxgrid=2,
nygrid=1,
inittau=0.02,
niter=1,
dist="e",
):
"""Calculate a Self-Organizing Map.
This function implements a Self-Organizing Map on a rectangular grid.
Keyword arguments:
- data: nrows x ncolumns array containing the data values;
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i][j]==0, then data[i][j] is missing.
- weight: the weights to be used when calculating distances
- transpose:
- if False: rows are clustered;
- if True: columns are clustered.
- nxgrid: the horizontal dimension of the rectangular SOM map
- nygrid: the vertical dimension of the rectangular SOM map
- inittau: the initial value of tau (the neighborbood function)
- niter: the number of iterations
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
Return values:
- clusterid: array with two columns, with the number of rows equal to
the items that are being clustered. Each row in the array contains
the x and y coordinates of the cell in the rectangular SOM grid to
which the item was assigned.
- celldata: an array with dimensions [nxgrid, nygrid, number of columns]
if rows are being clustered, or [nxgrid, nygrid, number of rows) if
columns are being clustered.
Each element [ix, iy] of this array is a 1D vector containing the
data values for the centroid of the cluster in the SOM grid cell
with coordinates [ix, iy].
"""
if transpose:
ndata, nitems = data.shape
else:
nitems, ndata = data.shape
data = __check_data(data)
shape = data.shape
mask = __check_mask(mask, shape)
weight = __check_weight(weight, ndata)
if nxgrid < 1:
raise ValueError("nxgrid should be a positive integer (default is 2)")
if nygrid < 1:
raise ValueError("nygrid should be a positive integer (default is 1)")
clusterids = np.ones((nitems, 2), dtype="intc")
celldata = np.empty((nxgrid, nygrid, ndata), dtype="d")
_cluster.somcluster(
clusterids, celldata, data, mask, weight, transpose, inittau, niter, dist
)
return clusterids, celldata |
Calculate and return the distance between two clusters.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i, j]==0, then data[i, j] is missing.
- weight: the weights to be used when calculating distances
- index1: 1D array identifying which items belong to the
first cluster. If the cluster contains only one item, then
index1 can also be written as a single integer.
- index2: 1D array identifying which items belong to the
second cluster. If the cluster contains only one item, then
index2 can also be written as a single integer.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
- method: specifies how the distance between two clusters is defined:
- method == 'a': the distance between the arithmetic means
of the two clusters
- method == 'm': the distance between the medians of the two clusters
- method == 's': the smallest pairwise distance between members
of the two clusters
- method == 'x': the largest pairwise distance between members
of the two clusters
- method == 'v': average of the pairwise distances between members
of the two clusters
- transpose:
- if False: clusters of rows are considered;
- if True: clusters of columns are considered. | def clusterdistance(
data,
mask=None,
weight=None,
index1=None,
index2=None,
method="a",
dist="e",
transpose=False,
):
"""Calculate and return the distance between two clusters.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i, j]==0, then data[i, j] is missing.
- weight: the weights to be used when calculating distances
- index1: 1D array identifying which items belong to the
first cluster. If the cluster contains only one item, then
index1 can also be written as a single integer.
- index2: 1D array identifying which items belong to the
second cluster. If the cluster contains only one item, then
index2 can also be written as a single integer.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
- method: specifies how the distance between two clusters is defined:
- method == 'a': the distance between the arithmetic means
of the two clusters
- method == 'm': the distance between the medians of the two clusters
- method == 's': the smallest pairwise distance between members
of the two clusters
- method == 'x': the largest pairwise distance between members
of the two clusters
- method == 'v': average of the pairwise distances between members
of the two clusters
- transpose:
- if False: clusters of rows are considered;
- if True: clusters of columns are considered.
"""
data = __check_data(data)
shape = data.shape
ndata = shape[0] if transpose else shape[1]
mask = __check_mask(mask, shape)
weight = __check_weight(weight, ndata)
index1 = __check_index(index1)
index2 = __check_index(index2)
return _cluster.clusterdistance(
data, mask, weight, index1, index2, method, dist, transpose
) |
Calculate and return the centroid of each cluster.
The clustercentroids routine calculates the cluster centroids, given to
which cluster each item belongs. The centroid is defined as either
the mean or the median over all items for each dimension.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i, j]==0, then data[i, j] is missing.
- clusterid: array containing the cluster number for each item.
The cluster number should be non-negative.
- method: specifies whether the centroid is calculated from the
arithmetic mean (method == 'a', default) or the median (method == 'm')
over each dimension.
- transpose: if False, each row contains the data for one item;
if True, each column contains the data for one item.
Return values:
- cdata: 2D array containing the cluster centroids.
If transpose is False, then the dimensions of cdata are
nclusters x ncolumns.
If transpose is True, then the dimensions of cdata are
nrows x nclusters.
- cmask: 2D array of integers describing which items in cdata,
if any, are missing. | def clustercentroids(data, mask=None, clusterid=None, method="a", transpose=False):
"""Calculate and return the centroid of each cluster.
The clustercentroids routine calculates the cluster centroids, given to
which cluster each item belongs. The centroid is defined as either
the mean or the median over all items for each dimension.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i, j]==0, then data[i, j] is missing.
- clusterid: array containing the cluster number for each item.
The cluster number should be non-negative.
- method: specifies whether the centroid is calculated from the
arithmetic mean (method == 'a', default) or the median (method == 'm')
over each dimension.
- transpose: if False, each row contains the data for one item;
if True, each column contains the data for one item.
Return values:
- cdata: 2D array containing the cluster centroids.
If transpose is False, then the dimensions of cdata are
nclusters x ncolumns.
If transpose is True, then the dimensions of cdata are
nrows x nclusters.
- cmask: 2D array of integers describing which items in cdata,
if any, are missing.
"""
data = __check_data(data)
mask = __check_mask(mask, data.shape)
nrows, ncolumns = data.shape
if clusterid is None:
n = ncolumns if transpose else nrows
clusterid = np.zeros(n, dtype="intc")
nclusters = 1
else:
clusterid = np.require(clusterid, dtype="intc", requirements="C")
nclusters = max(clusterid + 1)
if transpose:
shape = (nrows, nclusters)
else:
shape = (nclusters, ncolumns)
cdata = np.zeros(shape, dtype="d")
cmask = np.zeros(shape, dtype="intc")
_cluster.clustercentroids(data, mask, clusterid, method, transpose, cdata, cmask)
return cdata, cmask |
Calculate and return a distance matrix from the data.
This function returns the distance matrix calculated from the data.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i, j]==0, then data[i, j] is missing.
- weight: the weights to be used when calculating distances.
- transpose: if False: the distances between rows are calculated;
if True: the distances between columns are calculated.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
Return value:
The distance matrix is returned as a list of 1D arrays containing the
distance matrix calculated from the data. The number of columns in eac
row is equal to the row number. Hence, the first row has zero length.
For example:
>>> from numpy import array
>>> from Bio.Cluster import distancematrix
>>> data = array([[0, 1, 2, 3],
... [4, 5, 6, 7],
... [8, 9, 10, 11],
... [1, 2, 3, 4]])
>>> distances = distancematrix(data, dist='e')
>>> distances
[array([], dtype=float64), array([16.]), array([64., 16.]), array([ 1., 9., 49.])]
which can be rewritten as::
distances = [array([], dtype=float64),
array([ 16.]),
array([ 64., 16.]),
array([ 1., 9., 49.])]
This corresponds to the distance matrix::
[ 0., 16., 64., 1.]
[16., 0., 16., 9.]
[64., 16., 0., 49.]
[ 1., 9., 49., 0.] | def distancematrix(data, mask=None, weight=None, transpose=False, dist="e"):
"""Calculate and return a distance matrix from the data.
This function returns the distance matrix calculated from the data.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
- mask: nrows x ncolumns array of integers, showing which data are
missing. If mask[i, j]==0, then data[i, j] is missing.
- weight: the weights to be used when calculating distances.
- transpose: if False: the distances between rows are calculated;
if True: the distances between columns are calculated.
- dist: specifies the distance function to be used:
- dist == 'e': Euclidean distance
- dist == 'b': City Block distance
- dist == 'c': Pearson correlation
- dist == 'a': absolute value of the correlation
- dist == 'u': uncentered correlation
- dist == 'x': absolute uncentered correlation
- dist == 's': Spearman's rank correlation
- dist == 'k': Kendall's tau
Return value:
The distance matrix is returned as a list of 1D arrays containing the
distance matrix calculated from the data. The number of columns in eac
row is equal to the row number. Hence, the first row has zero length.
For example:
>>> from numpy import array
>>> from Bio.Cluster import distancematrix
>>> data = array([[0, 1, 2, 3],
... [4, 5, 6, 7],
... [8, 9, 10, 11],
... [1, 2, 3, 4]])
>>> distances = distancematrix(data, dist='e')
>>> distances
[array([], dtype=float64), array([16.]), array([64., 16.]), array([ 1., 9., 49.])]
which can be rewritten as::
distances = [array([], dtype=float64),
array([ 16.]),
array([ 64., 16.]),
array([ 1., 9., 49.])]
This corresponds to the distance matrix::
[ 0., 16., 64., 1.]
[16., 0., 16., 9.]
[64., 16., 0., 49.]
[ 1., 9., 49., 0.]
"""
data = __check_data(data)
shape = data.shape
mask = __check_mask(mask, shape)
if transpose:
ndata, nitems = shape
else:
nitems, ndata = shape
weight = __check_weight(weight, ndata)
matrix = [np.empty(i, dtype="d") for i in range(nitems)]
_cluster.distancematrix(data, mask, weight, transpose, dist, matrix)
return matrix |
Perform principal component analysis.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
Return value:
This function returns an array containing the mean of each column, the
principal components as an nmin x ncolumns array, as well as the
coordinates (an nrows x nmin array) of the data along the principal
components, and the associated eigenvalues. The principal components, the
coordinates, and the eigenvalues are sorted by the magnitude of the
eigenvalue, with the largest eigenvalues appearing first. Here, nmin is
the smaller of nrows and ncolumns.
Adding the column means to the dot product of the coordinates and the
principal components recreates the data matrix:
>>> from numpy import array, dot
>>> from Bio.Cluster import pca
>>> matrix = array([[ 0., 0., 0.],
... [ 1., 0., 0.],
... [ 7., 3., 0.],
... [ 4., 2., 6.]])
>>> columnmean, coordinates, pc, _ = pca(matrix)
>>> m = matrix - (columnmean + dot(coordinates, pc))
>>> abs(m) < 1e-12
array([[ True, True, True],
[ True, True, True],
[ True, True, True],
[ True, True, True]]) | def pca(data):
"""Perform principal component analysis.
Keyword arguments:
- data: nrows x ncolumns array containing the data values.
Return value:
This function returns an array containing the mean of each column, the
principal components as an nmin x ncolumns array, as well as the
coordinates (an nrows x nmin array) of the data along the principal
components, and the associated eigenvalues. The principal components, the
coordinates, and the eigenvalues are sorted by the magnitude of the
eigenvalue, with the largest eigenvalues appearing first. Here, nmin is
the smaller of nrows and ncolumns.
Adding the column means to the dot product of the coordinates and the
principal components recreates the data matrix:
>>> from numpy import array, dot
>>> from Bio.Cluster import pca
>>> matrix = array([[ 0., 0., 0.],
... [ 1., 0., 0.],
... [ 7., 3., 0.],
... [ 4., 2., 6.]])
>>> columnmean, coordinates, pc, _ = pca(matrix)
>>> m = matrix - (columnmean + dot(coordinates, pc))
>>> abs(m) < 1e-12
array([[ True, True, True],
[ True, True, True],
[ True, True, True],
[ True, True, True]])
"""
data = __check_data(data)
nrows, ncols = data.shape
nmin = min(nrows, ncols)
columnmean = np.empty(ncols, dtype="d")
pc = np.empty((nmin, ncols), dtype="d")
coordinates = np.empty((nrows, nmin), dtype="d")
eigenvalues = np.empty(nmin, dtype="d")
_cluster.pca(data, columnmean, coordinates, pc, eigenvalues)
return columnmean, coordinates, pc, eigenvalues |
Read gene expression data from the file handle and return a Record.
The file should be in the file format defined for Michael Eisen's
Cluster/TreeView program. | def read(handle):
"""Read gene expression data from the file handle and return a Record.
The file should be in the file format defined for Michael Eisen's
Cluster/TreeView program.
"""
return Record(handle) |
McDonald-Kreitman test for neutrality.
Implement the McDonald-Kreitman test for neutrality (PMID: 1904993)
This method counts changes rather than sites
(http://mkt.uab.es/mkt/help_mkt.asp).
Arguments:
- codon_alns - list of CodonAlignment to compare (each
CodonAlignment object corresponds to gene sampled from a species)
Return the p-value of test result. | def mktest(codon_alns, codon_table=None, alpha=0.05):
"""McDonald-Kreitman test for neutrality.
Implement the McDonald-Kreitman test for neutrality (PMID: 1904993)
This method counts changes rather than sites
(http://mkt.uab.es/mkt/help_mkt.asp).
Arguments:
- codon_alns - list of CodonAlignment to compare (each
CodonAlignment object corresponds to gene sampled from a species)
Return the p-value of test result.
"""
import copy
if codon_table is None:
codon_table = CodonTable.generic_by_id[1]
if not all(isinstance(i, CodonAlignment) for i in codon_alns):
raise TypeError("mktest accepts CodonAlignment list.")
codon_aln_len = [i.get_alignment_length() for i in codon_alns]
if len(set(codon_aln_len)) != 1:
raise RuntimeError(
"CodonAlignment object for mktest should be of equal length."
)
codon_num = codon_aln_len[0] // 3
# prepare codon_dict (taking stop codon as an extra amino acid)
codon_dict = copy.deepcopy(codon_table.forward_table)
for stop in codon_table.stop_codons:
codon_dict[stop] = "stop"
# prepare codon_lst
codon_lst = []
for codon_aln in codon_alns:
codon_lst.append([])
for i in codon_aln:
codon_lst[-1].append(_get_codon_list(i.seq))
codon_set = []
for i in range(codon_num):
uniq_codons = []
for j in codon_lst:
uniq_codon = {k[i] for k in j}
uniq_codons.append(uniq_codon)
codon_set.append(uniq_codons)
syn_fix, nonsyn_fix, syn_poly, nonsyn_poly = 0, 0, 0, 0
G, nonsyn_G = _get_codon2codon_matrix(codon_table=codon_table)
for i in codon_set:
all_codon = i[0].union(*i[1:])
if "-" in all_codon or len(all_codon) == 1:
continue
fix_or_not = all(len(k) == 1 for k in i)
if fix_or_not:
# fixed
nonsyn_subgraph = _get_subgraph(all_codon, nonsyn_G)
subgraph = _get_subgraph(all_codon, G)
this_non = _count_replacement(all_codon, nonsyn_subgraph)
this_syn = _count_replacement(all_codon, subgraph) - this_non
nonsyn_fix += this_non
syn_fix += this_syn
else:
# not fixed
nonsyn_subgraph = _get_subgraph(all_codon, nonsyn_G)
subgraph = _get_subgraph(all_codon, G)
this_non = _count_replacement(all_codon, nonsyn_subgraph)
this_syn = _count_replacement(all_codon, subgraph) - this_non
nonsyn_poly += this_non
syn_poly += this_syn
return _G_test([syn_fix, nonsyn_fix, syn_poly, nonsyn_poly]) |
Get codon codon substitution matrix (PRIVATE).
Elements in the matrix are number of synonymous and nonsynonymous
substitutions required for the substitution. | def _get_codon2codon_matrix(codon_table):
"""Get codon codon substitution matrix (PRIVATE).
Elements in the matrix are number of synonymous and nonsynonymous
substitutions required for the substitution.
"""
import copy
base_tuple = ("A", "T", "C", "G")
codons = [
i
for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons
if "U" not in i
]
# set up codon_dict considering stop codons
codon_dict = copy.deepcopy(codon_table.forward_table)
for stop in codon_table.stop_codons:
codon_dict[stop] = "stop"
# count site
num = len(codons)
G = {} # graph for substitution
nonsyn_G = {} # graph for nonsynonymous substitution
graph = {}
graph_nonsyn = {}
for i, codon in enumerate(codons):
graph[codon] = {}
graph_nonsyn[codon] = {}
for p, b in enumerate(codon):
for j in base_tuple:
tmp_codon = codon[0:p] + j + codon[p + 1 :]
if codon_dict[codon] != codon_dict[tmp_codon]:
graph_nonsyn[codon][tmp_codon] = 1
graph[codon][tmp_codon] = 1
else:
if codon != tmp_codon:
graph_nonsyn[codon][tmp_codon] = 0.1
graph[codon][tmp_codon] = 1
for codon1 in codons:
nonsyn_G[codon1] = {}
G[codon1] = {}
for codon2 in codons:
if codon1 == codon2:
nonsyn_G[codon1][codon2] = 0
G[codon1][codon2] = 0
else:
nonsyn_G[codon1][codon2] = _dijkstra(graph_nonsyn, codon1, codon2)
G[codon1][codon2] = _dijkstra(graph, codon1, codon2)
return G, nonsyn_G |
Dijkstra's algorithm Python implementation (PRIVATE).
Algorithm adapted from
http://thomas.pelletier.im/2010/02/dijkstras-algorithm-python-implementation/.
However, an obvious bug in::
if D[child_node] >(<) D[node] + child_value:
is fixed.
This function will return the distance between start and end.
Arguments:
- graph: Dictionary of dictionary (keys are vertices).
- start: Start vertex.
- end: End vertex.
Output:
List of vertices from the beginning to the end. | def _dijkstra(graph, start, end):
"""Dijkstra's algorithm Python implementation (PRIVATE).
Algorithm adapted from
http://thomas.pelletier.im/2010/02/dijkstras-algorithm-python-implementation/.
However, an obvious bug in::
if D[child_node] >(<) D[node] + child_value:
is fixed.
This function will return the distance between start and end.
Arguments:
- graph: Dictionary of dictionary (keys are vertices).
- start: Start vertex.
- end: End vertex.
Output:
List of vertices from the beginning to the end.
"""
D = {} # Final distances dict
P = {} # Predecessor dict
# Fill the dicts with default values
for node in graph.keys():
D[node] = 100 # Vertices are unreachable
P[node] = "" # Vertices have no predecessors
D[start] = 0 # The start vertex needs no move
unseen_nodes = list(graph.keys()) # All nodes are unseen
while len(unseen_nodes) > 0:
# Select the node with the lowest value in D (final distance)
shortest = None
node = ""
for temp_node in unseen_nodes:
if shortest is None:
shortest = D[temp_node]
node = temp_node
elif D[temp_node] < shortest:
shortest = D[temp_node]
node = temp_node
# Remove the selected node from unseen_nodes
unseen_nodes.remove(node)
# For each child (ie: connected vertex) of the current node
for child_node, child_value in graph[node].items():
if D[child_node] > D[node] + child_value:
D[child_node] = D[node] + child_value
# To go to child_node, you have to go through node
P[child_node] = node
if node == end:
break
# Set a clean path
path = []
# We begin from the end
node = end
distance = 0
# While we are not arrived at the beginning
while node != start:
if path.count(node) == 0:
path.insert(0, node) # Insert the predecessor of the current node
node = P[node] # The current node becomes its predecessor
else:
break
path.insert(0, start) # Finally, insert the start vertex
for i in range(len(path) - 1):
distance += graph[path[i]][path[i + 1]]
return distance |
Count replacement needed for a given codon_set (PRIVATE). | def _count_replacement(codon_set, G):
"""Count replacement needed for a given codon_set (PRIVATE)."""
from math import floor
if len(codon_set) == 1:
return 0, 0
elif len(codon_set) == 2:
codons = list(codon_set)
return floor(G[codons[0]][codons[1]])
else:
codons = list(codon_set)
return _prim(G) |
Prim's algorithm to find minimum spanning tree (PRIVATE).
Code is adapted from
http://programmingpraxis.com/2010/04/09/minimum-spanning-tree-prims-algorithm/ | def _prim(G):
"""Prim's algorithm to find minimum spanning tree (PRIVATE).
Code is adapted from
http://programmingpraxis.com/2010/04/09/minimum-spanning-tree-prims-algorithm/
"""
from math import floor
from collections import defaultdict
from heapq import heapify, heappop, heappush
nodes = []
edges = []
for i in G.keys():
nodes.append(i)
for j in G[i]:
if (i, j, G[i][j]) not in edges and (j, i, G[i][j]) not in edges:
edges.append((i, j, G[i][j]))
conn = defaultdict(list)
for n1, n2, c in edges:
conn[n1].append((c, n1, n2))
conn[n2].append((c, n2, n1))
mst = [] # minimum spanning tree
used = set(nodes[0])
usable_edges = conn[nodes[0]][:]
heapify(usable_edges)
while usable_edges:
cost, n1, n2 = heappop(usable_edges)
if n2 not in used:
used.add(n2)
mst.append((n1, n2, cost))
for e in conn[n2]:
if e[2] not in used:
heappush(usable_edges, e)
length = 0
for p in mst:
length += floor(p[2])
return length |
Get the subgraph that contains all codons in list (PRIVATE). | def _get_subgraph(codons, G):
"""Get the subgraph that contains all codons in list (PRIVATE)."""
subgraph = {}
for i in codons:
subgraph[i] = {}
for j in codons:
if i != j:
subgraph[i][j] = G[i][j]
return subgraph |
G test for 2x2 contingency table (PRIVATE).
Arguments:
- site_counts - [syn_fix, nonsyn_fix, syn_poly, nonsyn_poly]
>>> print("%0.6f" % _G_test([17, 7, 42, 2]))
0.004924 | def _G_test(site_counts):
"""G test for 2x2 contingency table (PRIVATE).
Arguments:
- site_counts - [syn_fix, nonsyn_fix, syn_poly, nonsyn_poly]
>>> print("%0.6f" % _G_test([17, 7, 42, 2]))
0.004924
"""
# TODO:
# Apply continuity correction for Chi-square test.
from math import log
G = 0
tot = sum(site_counts)
tot_syn = site_counts[0] + site_counts[2]
tot_non = site_counts[1] + site_counts[3]
tot_fix = sum(site_counts[:2])
tot_poly = sum(site_counts[2:])
exp = [
tot_fix * tot_syn / tot,
tot_fix * tot_non / tot,
tot_poly * tot_syn / tot,
tot_poly * tot_non / tot,
]
for obs, ex in zip(site_counts, exp):
G += obs * log(obs / ex)
# with only 1 degree of freedom for a 2x2 table,
# the cumulative chi-square distribution reduces to a simple form:
return erfc(sqrt(G)) |
List of codons according to full_rf_table for counting (PRIVATE). | def _get_codon_list(codonseq):
"""List of codons according to full_rf_table for counting (PRIVATE)."""
# if not isinstance(codonseq, CodonSeq):
# raise TypeError("_get_codon_list accept a CodonSeq object "
# "({0} detected)".format(type(codonseq)))
full_rf_table = codonseq.get_full_rf_table()
codon_lst = []
for i, k in enumerate(full_rf_table):
if isinstance(k, int):
start = k
try:
end = int(full_rf_table[i + 1])
except IndexError:
end = start + 3
this_codon = str(codonseq[start:end])
if len(this_codon) == 3:
codon_lst.append(this_codon)
else:
codon_lst.append(str(this_codon.ungap()))
elif str(codonseq[int(k) : int(k) + 3]) == "---":
codon_lst.append("---")
else:
# this may be problematic, as normally no codon should
# fall into this condition
codon_lst.append(codonseq[int(k) : int(k) + 3])
return codon_lst |
Calculate dN and dS of the given two sequences.
Available methods:
- NG86 - `Nei and Gojobori (1986)`_ (PMID 3444411).
- LWL85 - `Li et al. (1985)`_ (PMID 3916709).
- ML - `Goldman and Yang (1994)`_ (PMID 7968486).
- YN00 - `Yang and Nielsen (2000)`_ (PMID 10666704).
.. _`Nei and Gojobori (1986)`: http://www.ncbi.nlm.nih.gov/pubmed/3444411
.. _`Li et al. (1985)`: http://www.ncbi.nlm.nih.gov/pubmed/3916709
.. _`Goldman and Yang (1994)`: http://mbe.oxfordjournals.org/content/11/5/725
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236
Arguments:
- codon_seq1 - CodonSeq or or SeqRecord that contains a CodonSeq
- codon_seq2 - CodonSeq or or SeqRecord that contains a CodonSeq
- w - transition/transversion ratio
- cfreq - Current codon frequency vector can only be specified
when you are using ML method. Possible ways of
getting cfreq are: F1x4, F3x4 and F61. | def cal_dn_ds(codon_seq1, codon_seq2, method="NG86", codon_table=None, k=1, cfreq=None):
"""Calculate dN and dS of the given two sequences.
Available methods:
- NG86 - `Nei and Gojobori (1986)`_ (PMID 3444411).
- LWL85 - `Li et al. (1985)`_ (PMID 3916709).
- ML - `Goldman and Yang (1994)`_ (PMID 7968486).
- YN00 - `Yang and Nielsen (2000)`_ (PMID 10666704).
.. _`Nei and Gojobori (1986)`: http://www.ncbi.nlm.nih.gov/pubmed/3444411
.. _`Li et al. (1985)`: http://www.ncbi.nlm.nih.gov/pubmed/3916709
.. _`Goldman and Yang (1994)`: http://mbe.oxfordjournals.org/content/11/5/725
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236
Arguments:
- codon_seq1 - CodonSeq or or SeqRecord that contains a CodonSeq
- codon_seq2 - CodonSeq or or SeqRecord that contains a CodonSeq
- w - transition/transversion ratio
- cfreq - Current codon frequency vector can only be specified
when you are using ML method. Possible ways of
getting cfreq are: F1x4, F3x4 and F61.
"""
if isinstance(codon_seq1, CodonSeq) and isinstance(codon_seq2, CodonSeq):
pass
elif isinstance(codon_seq1, SeqRecord) and isinstance(codon_seq2, SeqRecord):
codon_seq1 = codon_seq1.seq
codon_seq2 = codon_seq2.seq
else:
raise TypeError(
"cal_dn_ds accepts two CodonSeq objects or SeqRecord "
"that contains CodonSeq as its seq!"
)
if len(codon_seq1.get_full_rf_table()) != len(codon_seq2.get_full_rf_table()):
raise RuntimeError(
f"full_rf_table length of seq1 ({len(codon_seq1.get_full_rf_table())})"
f" and seq2 ({len(codon_seq2.get_full_rf_table())}) are not the same"
)
if cfreq is None:
cfreq = "F3x4"
elif cfreq is not None and method != "ML":
raise RuntimeError("cfreq can only be specified when you are using ML method")
if cfreq not in ("F1x4", "F3x4", "F61"):
import warnings
warnings.warn(
f"Unknown cfreq ({cfreq}). "
"Only F1x4, F3x4 and F61 are acceptable. Used F3x4 in the following."
)
cfreq = "F3x4"
if codon_table is None:
codon_table = CodonTable.generic_by_id[1]
seq1_codon_lst = _get_codon_list(codon_seq1)
seq2_codon_lst = _get_codon_list(codon_seq2)
# remove gaps in seq_codon_lst
seq1 = []
seq2 = []
for i, j in zip(seq1_codon_lst, seq2_codon_lst):
if ("-" not in i) and ("-" not in j):
seq1.append(i)
seq2.append(j)
dnds_func = {"ML": _ml, "NG86": _ng86, "LWL85": _lwl85, "YN00": _yn00}
if method == "ML":
return dnds_func[method](seq1, seq2, cfreq, codon_table)
else:
return dnds_func[method](seq1, seq2, k, codon_table) |
NG86 method main function (PRIVATE). | def _ng86(seq1, seq2, k, codon_table):
"""NG86 method main function (PRIVATE)."""
S_sites1, N_sites1 = _count_site_NG86(seq1, codon_table=codon_table, k=k)
S_sites2, N_sites2 = _count_site_NG86(seq2, codon_table=codon_table, k=k)
S_sites = (S_sites1 + S_sites2) / 2.0
N_sites = (N_sites1 + N_sites2) / 2.0
SN = [0, 0]
for i, j in zip(seq1, seq2):
SN = [
m + n for m, n in zip(SN, _count_diff_NG86(i, j, codon_table=codon_table))
]
ps = SN[0] / S_sites
pn = SN[1] / N_sites
if ps < 3 / 4:
dS = abs(-3.0 / 4 * log(1 - 4.0 / 3 * ps))
else:
dS = -1
if pn < 3 / 4:
dN = abs(-3.0 / 4 * log(1 - 4.0 / 3 * pn))
else:
dN = -1
return dN, dS |
Count synonymous and non-synonymous sites of a list of codons (PRIVATE).
Arguments:
- codon_lst - A three letter codon list from a CodonSeq object.
This can be returned from _get_codon_list method.
- k - transition/transversion rate ratio. | def _count_site_NG86(codon_lst, codon_table, k=1):
"""Count synonymous and non-synonymous sites of a list of codons (PRIVATE).
Arguments:
- codon_lst - A three letter codon list from a CodonSeq object.
This can be returned from _get_codon_list method.
- k - transition/transversion rate ratio.
"""
S_site = 0 # synonymous sites
N_site = 0 # non-synonymous sites
purine = ("A", "G")
pyrimidine = ("T", "C")
base_tuple = ("A", "T", "C", "G")
for codon in codon_lst:
neighbor_codon = {"transition": [], "transversion": []}
# classify neighbor codons
codon = codon.replace("U", "T")
if codon == "---":
continue
for n, i in enumerate(codon):
for j in base_tuple:
if i == j:
pass
elif i in purine and j in purine:
codon_chars = list(codon)
codon_chars[n] = j
this_codon = "".join(codon_chars)
neighbor_codon["transition"].append(this_codon)
elif i in pyrimidine and j in pyrimidine:
codon_chars = list(codon)
codon_chars[n] = j
this_codon = "".join(codon_chars)
neighbor_codon["transition"].append(this_codon)
else:
codon_chars = list(codon)
codon_chars[n] = j
this_codon = "".join(codon_chars)
neighbor_codon["transversion"].append(this_codon)
# count synonymous and non-synonymous sites
aa = codon_table.forward_table[codon]
this_codon_N_site = this_codon_S_site = 0
for neighbor in neighbor_codon["transition"]:
if neighbor in codon_table.stop_codons:
this_codon_N_site += k
elif codon_table.forward_table[neighbor] == aa:
this_codon_S_site += k
else:
this_codon_N_site += k
for neighbor in neighbor_codon["transversion"]:
if neighbor in codon_table.stop_codons:
this_codon_N_site += 1
elif codon_table.forward_table[neighbor] == aa:
this_codon_S_site += 1
else:
this_codon_N_site += 1
norm_const = (this_codon_N_site + this_codon_S_site) / 3
S_site += this_codon_S_site / norm_const
N_site += this_codon_N_site / norm_const
return (S_site, N_site) |
Count differences between two codons, three-letter string (PRIVATE).
The function will take multiple pathways from codon1 to codon2
into account. | def _count_diff_NG86(codon1, codon2, codon_table):
"""Count differences between two codons, three-letter string (PRIVATE).
The function will take multiple pathways from codon1 to codon2
into account.
"""
if not isinstance(codon1, str) or not isinstance(codon2, str):
raise TypeError(
"_count_diff_NG86 accepts string object to represent codon"
f" ({type(codon1)}, {type(codon2)} detected)"
)
if len(codon1) != 3 or len(codon2) != 3:
raise RuntimeError(
"codon should be three letter string"
f" ({len(codon1)}, {len(codon2)} detected)"
)
SN = [0, 0] # synonymous and nonsynonymous counts
if codon1 == "---" or codon2 == "---":
return SN
base_tuple = ("A", "C", "G", "T")
if not all(i in base_tuple for i in codon1):
raise RuntimeError(
f"Unrecognized character detected in codon1 {codon1}"
" (Codons consist of A, T, C or G)"
)
if not all(i in base_tuple for i in codon2):
raise RuntimeError(
f"Unrecognized character detected in codon2 {codon2}"
" (Codons consist of A, T, C or G)"
)
if codon1 == codon2:
return SN
else:
diff_pos = []
for i, k in enumerate(zip(codon1, codon2)):
if k[0] != k[1]:
diff_pos.append(i)
def compare_codon(codon1, codon2, codon_table, weight=1):
"""Compare two codon accounting for different pathways."""
sd = nd = 0
if len(set(map(codon_table.forward_table.get, [codon1, codon2]))) == 1:
sd += weight
else:
nd += weight
return (sd, nd)
if len(diff_pos) == 1:
SN = [
i + j
for i, j in zip(
SN, compare_codon(codon1, codon2, codon_table=codon_table)
)
]
elif len(diff_pos) == 2:
for i in diff_pos:
temp_codon = codon1[:i] + codon2[i] + codon1[i + 1 :]
SN = [
i + j
for i, j in zip(
SN,
compare_codon(
codon1, temp_codon, codon_table=codon_table, weight=0.5
),
)
]
SN = [
i + j
for i, j in zip(
SN,
compare_codon(
temp_codon, codon2, codon_table=codon_table, weight=0.5
),
)
]
elif len(diff_pos) == 3:
paths = list(permutations([0, 1, 2], 3))
tmp_codon = []
for p in paths:
tmp1 = codon1[: p[0]] + codon2[p[0]] + codon1[p[0] + 1 :]
tmp2 = tmp1[: p[1]] + codon2[p[1]] + tmp1[p[1] + 1 :]
tmp_codon.append((tmp1, tmp2))
SN = [
i + j
for i, j in zip(
SN, compare_codon(codon1, tmp1, codon_table, weight=0.5 / 3)
)
]
SN = [
i + j
for i, j in zip(
SN, compare_codon(tmp1, tmp2, codon_table, weight=0.5 / 3)
)
]
SN = [
i + j
for i, j in zip(
SN, compare_codon(tmp2, codon2, codon_table, weight=0.5 / 3)
)
]
return SN |
LWL85 method main function (PRIVATE).
Nomenclature is according to Li et al. (1985), PMID 3916709. | def _lwl85(seq1, seq2, k, codon_table):
"""LWL85 method main function (PRIVATE).
Nomenclature is according to Li et al. (1985), PMID 3916709.
"""
codon_fold_dict = _get_codon_fold(codon_table)
# count number of sites in different degenerate classes
fold0 = [0, 0]
fold2 = [0, 0]
fold4 = [0, 0]
for codon in seq1 + seq2:
fold_num = codon_fold_dict[codon]
for f in fold_num:
if f == "0":
fold0[0] += 1
elif f == "2":
fold2[0] += 1
elif f == "4":
fold4[0] += 1
L = [sum(fold0) / 2.0, sum(fold2) / 2.0, sum(fold4) / 2.0]
# count number of differences in different degenerate classes
PQ = [0] * 6 # with P0, P2, P4, Q0, Q2, Q4 in each position
for codon1, codon2 in zip(seq1, seq2):
if (codon1 == "---" or codon2 == "---") or codon1 == codon2:
continue
else:
PQ = [
i + j
for i, j in zip(
PQ, _diff_codon(codon1, codon2, fold_dict=codon_fold_dict)
)
]
PQ = [i / j for i, j in zip(PQ, L * 2)]
P = PQ[:3]
Q = PQ[3:]
A = [
(1.0 / 2) * log(1.0 / (1 - 2 * i - j)) - (1.0 / 4) * log(1.0 / (1 - 2 * j))
for i, j in zip(P, Q)
]
B = [(1.0 / 2) * log(1.0 / (1 - 2 * i)) for i in Q]
dS = 3 * (L[2] * A[1] + L[2] * (A[2] + B[2])) / (L[1] + 3 * L[2])
dN = 3 * (L[2] * B[1] + L[0] * (A[0] + B[0])) / (2 * L[1] + 3 * L[0])
return dN, dS |
Classify different position in a codon into different folds (PRIVATE). | def _get_codon_fold(codon_table):
"""Classify different position in a codon into different folds (PRIVATE)."""
def find_fold_class(codon, forward_table):
base = {"A", "T", "C", "G"}
fold = ""
codon_base_lst = list(codon)
for i, b in enumerate(codon_base_lst):
other_base = base - set(b)
aa = []
for j in other_base:
codon_base_lst[i] = j
try:
aa.append(forward_table["".join(codon_base_lst)])
except KeyError:
aa.append("stop")
if aa.count(forward_table[codon]) == 0:
fold += "0"
elif aa.count(forward_table[codon]) in (1, 2):
fold += "2"
elif aa.count(forward_table[codon]) == 3:
fold += "4"
else:
raise RuntimeError(
"Unknown Error, cannot assign the position to a fold"
)
codon_base_lst[i] = b
return fold
fold_table = {}
for codon in codon_table.forward_table:
if "U" not in codon:
fold_table[codon] = find_fold_class(codon, codon_table.forward_table)
fold_table["---"] = "---"
return fold_table |
Count number of different substitution types between two codons (PRIVATE).
returns tuple (P0, P2, P4, Q0, Q2, Q4)
Nomenclature is according to Li et al. (1958), PMID 3916709. | def _diff_codon(codon1, codon2, fold_dict):
"""Count number of different substitution types between two codons (PRIVATE).
returns tuple (P0, P2, P4, Q0, Q2, Q4)
Nomenclature is according to Li et al. (1958), PMID 3916709.
"""
P0 = P2 = P4 = Q0 = Q2 = Q4 = 0
fold_num = fold_dict[codon1]
purine = ("A", "G")
pyrimidine = ("T", "C")
for n, (i, j) in enumerate(zip(codon1, codon2)):
if i != j and (i in purine and j in purine):
if fold_num[n] == "0":
P0 += 1
elif fold_num[n] == "2":
P2 += 1
elif fold_num[n] == "4":
P4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
if i != j and (i in pyrimidine and j in pyrimidine):
if fold_num[n] == "0":
P0 += 1
elif fold_num[n] == "2":
P2 += 1
elif fold_num[n] == "4":
P4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
if i != j and (
(i in purine and j in pyrimidine) or (i in pyrimidine and j in purine)
):
if fold_num[n] == "0":
Q0 += 1
elif fold_num[n] == "2":
Q2 += 1
elif fold_num[n] == "4":
Q4 += 1
else:
raise RuntimeError("Unexpected fold_num %d" % fold_num[n])
return (P0, P2, P4, Q0, Q2, Q4) |
YN00 method main function (PRIVATE).
Nomenclature is according to Yang and Nielsen (2000), PMID 10666704. | def _yn00(seq1, seq2, k, codon_table):
"""YN00 method main function (PRIVATE).
Nomenclature is according to Yang and Nielsen (2000), PMID 10666704.
"""
from collections import defaultdict
from scipy.linalg import expm
fcodon = [
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
]
codon_fold_dict = _get_codon_fold(codon_table)
fold0_cnt = defaultdict(int)
fold4_cnt = defaultdict(int)
for codon in seq1 + seq2:
# count sites at different codon position
if codon != "---":
fcodon[0][codon[0]] += 1
fcodon[1][codon[1]] += 1
fcodon[2][codon[2]] += 1
# count sites in different degenerate fold class
fold_num = codon_fold_dict[codon]
for i, f in enumerate(fold_num):
if f == "0":
fold0_cnt[codon[i]] += 1
elif f == "4":
fold4_cnt[codon[i]] += 1
f0_total = sum(fold0_cnt.values())
f4_total = sum(fold4_cnt.values())
for i, j in zip(fold0_cnt, fold4_cnt):
fold0_cnt[i] = fold0_cnt[i] / f0_total
fold4_cnt[i] = fold4_cnt[i] / f4_total
# TODO:
# the initial kappa is different from what yn00 gives,
# try to find the problem.
TV = _get_TV(seq1, seq2, codon_table=codon_table)
k04 = (_get_kappa_t(fold0_cnt, TV), _get_kappa_t(fold4_cnt, TV))
kappa = (f0_total * k04[0] + f4_total * k04[1]) / (f0_total + f4_total)
# kappa = 2.4285
# count synonymous sites and non-synonymous sites
for i in range(3):
tot = sum(fcodon[i].values())
fcodon[i] = {j: k / tot for j, k in fcodon[i].items()}
pi = defaultdict(int)
for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons:
if "U" not in i:
pi[i] = 0
for i in seq1 + seq2:
pi[i] += 1
S_sites1, N_sites1, bfreqSN1 = _count_site_YN00(
seq1, seq2, pi, k=kappa, codon_table=codon_table
)
S_sites2, N_sites2, bfreqSN2 = _count_site_YN00(
seq2, seq1, pi, k=kappa, codon_table=codon_table
)
N_sites = (N_sites1 + N_sites2) / 2
S_sites = (S_sites1 + S_sites2) / 2
bfreqSN = [{"A": 0, "T": 0, "C": 0, "G": 0}, {"A": 0, "T": 0, "C": 0, "G": 0}]
for i in range(2):
for b in ("A", "T", "C", "G"):
bfreqSN[i][b] = (bfreqSN1[i][b] + bfreqSN2[i][b]) / 2
# use NG86 method to get initial t and w
SN = [0, 0]
for i, j in zip(seq1, seq2):
SN = [
m + n for m, n in zip(SN, _count_diff_NG86(i, j, codon_table=codon_table))
]
ps = SN[0] / S_sites
pn = SN[1] / N_sites
p = sum(SN) / (S_sites + N_sites)
w = log(1 - 4.0 / 3 * pn) / log(1 - 4.0 / 3 * ps)
t = -3 / 4 * log(1 - 4 / 3 * p)
tolerance = 1e-5
dSdN_pre = [0, 0]
for temp in range(20):
# count synonymous and nonsynonymous differences under kappa, w, t
codon_lst = [
i
for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons
if "U" not in i
]
Q = _get_Q(pi, kappa, w, codon_lst, codon_table)
P = expm(Q * t)
TV = [0, 0, 0, 0] # synonymous/nonsynonymous transition/transversion
codon_npath = {}
for i, j in zip(seq1, seq2):
if i != "---" and j != "---":
codon_npath.setdefault((i, j), 0)
codon_npath[(i, j)] += 1
for i in codon_npath:
tv = _count_diff_YN00(i[0], i[1], P, codon_lst, codon_table)
TV = [m + n * codon_npath[i] for m, n in zip(TV, tv)]
TV = (TV[0] / S_sites, TV[1] / S_sites), (TV[2] / N_sites, TV[3] / N_sites)
# according to the DistanceF84() function of yn00.c in paml,
# the t (e.q. 10) appears in PMID: 10666704 is dS and dN
dSdN = []
for f, tv in zip(bfreqSN, TV):
dSdN.append(_get_kappa_t(f, tv, t=True))
t = dSdN[0] * 3 * S_sites / (S_sites + N_sites) + dSdN[1] * 3 * N_sites / (
S_sites + N_sites
)
w = dSdN[1] / dSdN[0]
if all(abs(i - j) < tolerance for i, j in zip(dSdN, dSdN_pre)):
return dSdN[1], dSdN[0] # dN, dS
dSdN_pre = dSdN |
Get TV (PRIVATE).
Arguments:
- T - proportions of transitional differences
- V - proportions of transversional differences | def _get_TV(codon_lst1, codon_lst2, codon_table):
"""Get TV (PRIVATE).
Arguments:
- T - proportions of transitional differences
- V - proportions of transversional differences
"""
purine = ("A", "G")
pyrimidine = ("C", "T")
TV = [0, 0]
sites = 0
for codon1, codon2 in zip(codon_lst1, codon_lst2):
if "---" not in (codon1, codon2):
for i, j in zip(codon1, codon2):
if i == j:
pass
elif i in purine and j in purine:
TV[0] += 1
elif i in pyrimidine and j in pyrimidine:
TV[0] += 1
else:
TV[1] += 1
sites += 1
return (TV[0] / sites, TV[1] / sites) |
Calculate kappa (PRIVATE).
The following formula and variable names are according to PMID: 10666704 | def _get_kappa_t(pi, TV, t=False):
"""Calculate kappa (PRIVATE).
The following formula and variable names are according to PMID: 10666704
"""
pi["Y"] = pi["T"] + pi["C"]
pi["R"] = pi["A"] + pi["G"]
A = (
2 * (pi["T"] * pi["C"] + pi["A"] * pi["G"])
+ 2
* (
pi["T"] * pi["C"] * pi["R"] / pi["Y"]
+ pi["A"] * pi["G"] * pi["Y"] / pi["R"]
)
* (1 - TV[1] / (2 * pi["Y"] * pi["R"]))
- TV[0]
) / (2 * (pi["T"] * pi["C"] / pi["Y"] + pi["A"] * pi["G"] / pi["R"]))
B = 1 - TV[1] / (2 * pi["Y"] * pi["R"])
a = -0.5 * log(A) # this seems to be an error in YANG's original paper
b = -0.5 * log(B)
kappaF84 = a / b - 1
if t is False:
kappaHKY85 = 1 + (
pi["T"] * pi["C"] / pi["Y"] + pi["A"] * pi["G"] / pi["R"]
) * kappaF84 / (pi["T"] * pi["C"] + pi["A"] * pi["G"])
return kappaHKY85
else:
t = (
4 * pi["T"] * pi["C"] * (1 + kappaF84 / pi["Y"])
+ 4 * pi["A"] * pi["G"] * (1 + kappaF84 / pi["R"])
+ 4 * pi["Y"] * pi["R"]
) * b
return t |
Site counting method from Ina / Yang and Nielsen (PRIVATE).
Method from `Ina (1995)`_ as modified by `Yang and Nielsen (2000)`_.
This will return the total number of synonymous and nonsynonymous sites
and base frequencies in each category. The function is equivalent to
the ``CountSites()`` function in ``yn00.c`` of PAML.
.. _`Ina (1995)`: https://doi.org/10.1007/BF00167113
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236 | def _count_site_YN00(codon_lst1, codon_lst2, pi, k, codon_table):
"""Site counting method from Ina / Yang and Nielsen (PRIVATE).
Method from `Ina (1995)`_ as modified by `Yang and Nielsen (2000)`_.
This will return the total number of synonymous and nonsynonymous sites
and base frequencies in each category. The function is equivalent to
the ``CountSites()`` function in ``yn00.c`` of PAML.
.. _`Ina (1995)`: https://doi.org/10.1007/BF00167113
.. _`Yang and Nielsen (2000)`: https://doi.org/10.1093/oxfordjournals.molbev.a026236
"""
if len(codon_lst1) != len(codon_lst2):
raise RuntimeError(
"Length of two codon_lst should be the same (%d and %d detected)"
% (len(codon_lst1), len(codon_lst2))
)
else:
length = len(codon_lst1)
purine = ("A", "G")
pyrimidine = ("T", "C")
base_tuple = ("A", "T", "C", "G")
codon_dict = codon_table.forward_table
stop = codon_table.stop_codons
codon_npath = {}
for i, j in zip(codon_lst1, codon_lst2):
if i != "---" and j != "---":
codon_npath.setdefault((i, j), 0)
codon_npath[(i, j)] += 1
S_sites = N_sites = 0
freqSN = [
{"A": 0, "T": 0, "C": 0, "G": 0}, # synonymous
{"A": 0, "T": 0, "C": 0, "G": 0},
] # nonsynonymous
for codon_pair, npath in codon_npath.items():
codon = codon_pair[0]
S = N = 0
for pos in range(3):
for base in base_tuple:
if codon[pos] == base:
continue
neighbor_codon = codon[:pos] + base + codon[pos + 1 :]
if neighbor_codon in stop:
continue
weight = pi[neighbor_codon]
if codon[pos] in pyrimidine and base in pyrimidine:
weight *= k
elif codon[pos] in purine and base in purine:
weight *= k
if codon_dict[codon] == codon_dict[neighbor_codon]:
S += weight
freqSN[0][base] += weight * npath
else:
N += weight
freqSN[1][base] += weight * npath
S_sites += S * npath
N_sites += N * npath
norm_const = 3 * length / (S_sites + N_sites)
S_sites *= norm_const
N_sites *= norm_const
for i in freqSN:
norm_const = sum(i.values())
for b in i:
i[b] /= norm_const
return S_sites, N_sites, freqSN |
Count differences between two codons (three-letter string; PRIVATE).
The function will weighted multiple pathways from codon1 to codon2
according to P matrix of codon substitution. The proportion
of transition and transversion (TV) will also be calculated in
the function. | def _count_diff_YN00(codon1, codon2, P, codon_lst, codon_table):
"""Count differences between two codons (three-letter string; PRIVATE).
The function will weighted multiple pathways from codon1 to codon2
according to P matrix of codon substitution. The proportion
of transition and transversion (TV) will also be calculated in
the function.
"""
if not isinstance(codon1, str) or not isinstance(codon2, str):
raise TypeError(
"_count_diff_YN00 accepts string object to represent codon"
f" ({type(codon1)}, {type(codon2)} detected)"
)
if len(codon1) != 3 or len(codon2) != 3:
raise RuntimeError(
"codon should be three letter string"
f" ({len(codon1)}, {len(codon2)} detected)"
)
TV = [
0,
0,
0,
0,
] # transition and transversion counts (synonymous and nonsynonymous)
if codon1 == "---" or codon2 == "---":
return TV
base_tuple = ("A", "C", "G", "T")
if not all(i in base_tuple for i in codon1):
raise RuntimeError(
f"Unrecognized character detected in codon1 {codon1}"
" (Codons consist of A, T, C or G)"
)
if not all(i in base_tuple for i in codon2):
raise RuntimeError(
f"Unrecognized character detected in codon2 {codon2}"
" (Codons consist of A, T, C or G)"
)
if codon1 == codon2:
return TV
else:
diff_pos = []
for i, k in enumerate(zip(codon1, codon2)):
if k[0] != k[1]:
diff_pos.append(i)
def count_TV(codon1, codon2, diff, codon_table, weight=1):
purine = ("A", "G")
pyrimidine = ("T", "C")
dic = codon_table.forward_table
stop = codon_table.stop_codons
if codon1 in stop or codon2 in stop:
# stop codon is always considered as nonsynonymous
if codon1[diff] in purine and codon2[diff] in purine:
return [0, 0, weight, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [0, 0, weight, 0]
else:
return [0, 0, 0, weight]
elif dic[codon1] == dic[codon2]:
if codon1[diff] in purine and codon2[diff] in purine:
return [weight, 0, 0, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [weight, 0, 0, 0]
else:
return [0, weight, 0, 0]
else:
if codon1[diff] in purine and codon2[diff] in purine:
return [0, 0, weight, 0]
elif codon1[diff] in pyrimidine and codon2[diff] in pyrimidine:
return [0, 0, weight, 0]
else:
return [0, 0, 0, weight]
if len(diff_pos) == 1:
TV = [
p + q
for p, q in zip(TV, count_TV(codon1, codon2, diff_pos[0], codon_table))
]
elif len(diff_pos) == 2:
tmp_codon = [codon1[:i] + codon2[i] + codon1[i + 1 :] for i in diff_pos]
path_prob = []
for i in tmp_codon:
codon_idx = list(map(codon_lst.index, [codon1, i, codon2]))
prob = (P[codon_idx[0], codon_idx[1]], P[codon_idx[1], codon_idx[2]])
path_prob.append(prob[0] * prob[1])
path_prob = [2 * i / sum(path_prob) for i in path_prob]
for n, i in enumerate(diff_pos):
temp_codon = codon1[:i] + codon2[i] + codon1[i + 1 :]
TV = [
p + q
for p, q in zip(
TV,
count_TV(
codon1, temp_codon, i, codon_table, weight=path_prob[n] / 2
),
)
]
TV = [
p + q
for p, q in zip(
TV,
count_TV(
codon1, temp_codon, i, codon_table, weight=path_prob[n] / 2
),
)
]
elif len(diff_pos) == 3:
paths = list(permutations([0, 1, 2], 3))
path_prob = []
tmp_codon = []
for p in paths:
tmp1 = codon1[: p[0]] + codon2[p[0]] + codon1[p[0] + 1 :]
tmp2 = tmp1[: p[1]] + codon2[p[1]] + tmp1[p[1] + 1 :]
tmp_codon.append((tmp1, tmp2))
codon_idx = list(map(codon_lst.index, [codon1, tmp1, tmp2, codon2]))
prob = (
P[codon_idx[0], codon_idx[1]],
P[codon_idx[1], codon_idx[2]],
P[codon_idx[2], codon_idx[3]],
)
path_prob.append(prob[0] * prob[1] * prob[2])
path_prob = [3 * i / sum(path_prob) for i in path_prob]
for i, j, k in zip(tmp_codon, path_prob, paths):
TV = [
p + q
for p, q in zip(
TV, count_TV(codon1, i[0], k[0], codon_table, weight=j / 3)
)
]
TV = [
p + q
for p, q in zip(
TV, count_TV(i[0], i[1], k[1], codon_table, weight=j / 3)
)
]
TV = [
p + q
for p, q in zip(
TV, count_TV(i[1], codon2, k[1], codon_table, weight=j / 3)
)
]
return TV |
ML method main function (PRIVATE). | def _ml(seq1, seq2, cmethod, codon_table):
"""ML method main function (PRIVATE)."""
from collections import Counter
from scipy.optimize import minimize
codon_cnt = Counter()
pi = _get_pi(seq1, seq2, cmethod, codon_table=codon_table)
for i, j in zip(seq1, seq2):
# if i != j and ('---' not in (i, j)):
if "---" not in (i, j):
codon_cnt[(i, j)] += 1
codon_lst = [
i
for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons
if "U" not in i
]
# apply optimization
def func(
params, pi=pi, codon_cnt=codon_cnt, codon_lst=codon_lst, codon_table=codon_table
):
"""Temporary function, params = [t, k, w]."""
return -_likelihood_func(
params[0],
params[1],
params[2],
pi,
codon_cnt,
codon_lst=codon_lst,
codon_table=codon_table,
)
# count sites
opt_res = minimize(
func,
[1, 0.1, 2],
method="L-BFGS-B",
bounds=((1e-10, 20), (1e-10, 20), (1e-10, 10)),
tol=1e-5,
)
t, k, w = opt_res.x
Q = _get_Q(pi, k, w, codon_lst, codon_table)
Sd = Nd = 0
for i, c1 in enumerate(codon_lst):
for j, c2 in enumerate(codon_lst):
if i != j:
try:
if codon_table.forward_table[c1] == codon_table.forward_table[c2]:
# synonymous count
Sd += pi[c1] * Q[i, j]
else:
# nonsynonymous count
Nd += pi[c1] * Q[i, j]
except KeyError:
# This is probably due to stop codons
pass
Sd *= t
Nd *= t
# count differences (with w fixed to 1)
def func_w1(
params, pi=pi, codon_cnt=codon_cnt, codon_lst=codon_lst, codon_table=codon_table
):
"""Temporary function, params = [t, k]. w is fixed to 1."""
return -_likelihood_func(
params[0],
params[1],
1.0,
pi,
codon_cnt,
codon_lst=codon_lst,
codon_table=codon_table,
)
opt_res = minimize(
func_w1,
[1, 0.1],
method="L-BFGS-B",
bounds=((1e-10, 20), (1e-10, 20)),
tol=1e-5,
)
t, k = opt_res.x
w = 1.0
Q = _get_Q(pi, k, w, codon_lst, codon_table)
rhoS = rhoN = 0
for i, c1 in enumerate(codon_lst):
for j, c2 in enumerate(codon_lst):
if i != j:
try:
if codon_table.forward_table[c1] == codon_table.forward_table[c2]:
# synonymous count
rhoS += pi[c1] * Q[i, j]
else:
# nonsynonymous count
rhoN += pi[c1] * Q[i, j]
except KeyError:
# This is probably due to stop codons
pass
rhoS *= 3
rhoN *= 3
dN = Nd / rhoN
dS = Sd / rhoS
return dN, dS |
Obtain codon frequency dict (pi) from two codon list (PRIVATE).
This function is designed for ML method. Available counting methods
(cfreq) are F1x4, F3x4 and F64. | def _get_pi(seq1, seq2, cmethod, codon_table):
"""Obtain codon frequency dict (pi) from two codon list (PRIVATE).
This function is designed for ML method. Available counting methods
(cfreq) are F1x4, F3x4 and F64.
"""
# TODO:
# Stop codon should not be allowed according to Yang.
# Try to modify this!
pi = {}
if cmethod == "F1x4":
fcodon = {"A": 0, "G": 0, "C": 0, "T": 0}
for i in seq1 + seq2:
if i != "---":
for c in i:
fcodon[c] += 1
tot = sum(fcodon.values())
fcodon = {j: k / tot for j, k in fcodon.items()}
for i in codon_table.forward_table.keys() + codon_table.stop_codons:
if "U" not in i:
pi[i] = fcodon[i[0]] * fcodon[i[1]] * fcodon[i[2]]
elif cmethod == "F3x4":
# three codon position
fcodon = [
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
{"A": 0, "G": 0, "C": 0, "T": 0},
]
for i in seq1 + seq2:
if i != "---":
fcodon[0][i[0]] += 1
fcodon[1][i[1]] += 1
fcodon[2][i[2]] += 1
for i in range(3):
tot = sum(fcodon[i].values())
fcodon[i] = {j: k / tot for j, k in fcodon[i].items()}
for i in list(codon_table.forward_table.keys()) + codon_table.stop_codons:
if "U" not in i:
pi[i] = fcodon[0][i[0]] * fcodon[1][i[1]] * fcodon[2][i[2]]
elif cmethod == "F61":
for i in codon_table.forward_table.keys() + codon_table.stop_codons:
if "U" not in i:
pi[i] = 0.1
for i in seq1 + seq2:
if i != "---":
pi[i] += 1
tot = sum(pi.values())
pi = {j: k / tot for j, k in pi.items()}
return pi |
Q matrix for codon substitution (PRIVATE).
Arguments:
- i, j : three letter codon string
- pi : expected codon frequency
- k : transition/transversion ratio
- w : nonsynonymous/synonymous rate ratio
- codon_table: Bio.Data.CodonTable object | def _q(i, j, pi, k, w, codon_table):
"""Q matrix for codon substitution (PRIVATE).
Arguments:
- i, j : three letter codon string
- pi : expected codon frequency
- k : transition/transversion ratio
- w : nonsynonymous/synonymous rate ratio
- codon_table: Bio.Data.CodonTable object
"""
if i == j:
# diagonal elements is the sum of all other elements
return 0
if i in codon_table.stop_codons or j in codon_table.stop_codons:
return 0
if (i not in pi) or (j not in pi):
return 0
purine = ("A", "G")
pyrimidine = ("T", "C")
diff = []
for n, (c1, c2) in enumerate(zip(i, j)):
if c1 != c2:
diff.append((n, c1, c2))
if len(diff) >= 2:
return 0
if codon_table.forward_table[i] == codon_table.forward_table[j]:
# synonymous substitution
if diff[0][1] in purine and diff[0][2] in purine:
# transition
return k * pi[j]
elif diff[0][1] in pyrimidine and diff[0][2] in pyrimidine:
# transition
return k * pi[j]
else:
# transversion
return pi[j]
else:
# nonsynonymous substitution
if diff[0][1] in purine and diff[0][2] in purine:
# transition
return w * k * pi[j]
elif diff[0][1] in pyrimidine and diff[0][2] in pyrimidine:
# transition
return w * k * pi[j]
else:
# transversion
return w * pi[j] |
Q matrix for codon substitution (PRIVATE). | def _get_Q(pi, k, w, codon_lst, codon_table):
"""Q matrix for codon substitution (PRIVATE)."""
import numpy as np
codon_num = len(codon_lst)
Q = np.zeros((codon_num, codon_num))
for i in range(codon_num):
for j in range(codon_num):
if i != j:
Q[i, j] = _q(
codon_lst[i], codon_lst[j], pi, k, w, codon_table=codon_table
)
nucl_substitutions = 0
for i in range(codon_num):
Q[i, i] = -sum(Q[i, :])
try:
nucl_substitutions += pi[codon_lst[i]] * (-Q[i, i])
except KeyError:
pass
Q = Q / nucl_substitutions
return Q |
Likelihood function for ML method (PRIVATE). | def _likelihood_func(t, k, w, pi, codon_cnt, codon_lst, codon_table):
"""Likelihood function for ML method (PRIVATE)."""
from scipy.linalg import expm
Q = _get_Q(pi, k, w, codon_lst, codon_table)
P = expm(Q * t)
likelihood = 0
for i, c1 in enumerate(codon_lst):
for j, c2 in enumerate(codon_lst):
if (c1, c2) in codon_cnt:
if P[i, j] * pi[c1] <= 0:
likelihood += codon_cnt[(c1, c2)] * 0
else:
likelihood += codon_cnt[(c1, c2)] * log(pi[c1] * P[i, j])
return likelihood |
Build a codon alignment from protein alignment and corresponding nucleotides.
Arguments:
- pro_align - a protein MultipleSeqAlignment object
- nucl_seqs - an object returned by SeqIO.parse or SeqIO.index
or a collection of SeqRecord.
- corr_dict - a dict that maps protein id to nucleotide id
- complete_protein - whether the sequence begins with a start
codon
Return a CodonAlignment object.
The example below answers this Biostars question: https://www.biostars.org/p/89741/
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> from Bio.codonalign import build
>>> seq1 = SeqRecord(Seq('ATGTCTCGT'), id='pro1')
>>> seq2 = SeqRecord(Seq('ATGCGT'), id='pro2')
>>> pro1 = SeqRecord(Seq('MSR'), id='pro1')
>>> pro2 = SeqRecord(Seq('M-R'), id='pro2')
>>> aln = MultipleSeqAlignment([pro1, pro2])
>>> codon_aln = build(aln, [seq1, seq2])
>>> print(codon_aln)
CodonAlignment with 2 rows and 9 columns (3 codons)
ATGTCTCGT pro1
ATG---CGT pro2 | def build(
pro_align,
nucl_seqs,
corr_dict=None,
gap_char="-",
unknown="X",
codon_table=None,
complete_protein=False,
anchor_len=10,
max_score=10,
):
"""Build a codon alignment from protein alignment and corresponding nucleotides.
Arguments:
- pro_align - a protein MultipleSeqAlignment object
- nucl_seqs - an object returned by SeqIO.parse or SeqIO.index
or a collection of SeqRecord.
- corr_dict - a dict that maps protein id to nucleotide id
- complete_protein - whether the sequence begins with a start
codon
Return a CodonAlignment object.
The example below answers this Biostars question: https://www.biostars.org/p/89741/
>>> from Bio.Seq import Seq
>>> from Bio.SeqRecord import SeqRecord
>>> from Bio.Align import MultipleSeqAlignment
>>> from Bio.codonalign import build
>>> seq1 = SeqRecord(Seq('ATGTCTCGT'), id='pro1')
>>> seq2 = SeqRecord(Seq('ATGCGT'), id='pro2')
>>> pro1 = SeqRecord(Seq('MSR'), id='pro1')
>>> pro2 = SeqRecord(Seq('M-R'), id='pro2')
>>> aln = MultipleSeqAlignment([pro1, pro2])
>>> codon_aln = build(aln, [seq1, seq2])
>>> print(codon_aln)
CodonAlignment with 2 rows and 9 columns (3 codons)
ATGTCTCGT pro1
ATG---CGT pro2
"""
# TODO
# add an option to allow the user to specify the returned object?
from Bio.Align import MultipleSeqAlignment
# check the type of object of pro_align
if not isinstance(pro_align, MultipleSeqAlignment):
raise TypeError("the first argument should be a MultipleSeqAlignment object")
# check whether the number of seqs in pro_align and nucl_seqs is
# the same
pro_num = len(pro_align)
if corr_dict is None:
try:
nucl_num = len(nucl_seqs)
except TypeError:
# nucl_seqs will be an iterator if returned by SeqIO.parse()
nucl_seqs = tuple(nucl_seqs)
nucl_num = len(nucl_seqs)
if pro_num > nucl_num:
raise ValueError(
f"Higher Number of SeqRecords in Protein Alignment ({pro_num}) "
f"than the Number of Nucleotide SeqRecords ({nucl_num}) are found!"
)
# Determine the protein sequences and nucl sequences
# correspondence. If nucl_seqs is a list, tuple or read by
# SeqIO.parse(), we assume the order of sequences in pro_align
# and nucl_seqs are the same. If nucl_seqs is a dict or read by
# SeqIO.index(), we match seqs in pro_align and those in
# nucl_seq by their id.
if isinstance(nucl_seqs, Mapping):
corr_method = 1
elif isinstance(nucl_seqs, Iterable):
corr_method = 0
else:
raise TypeError(
"Nucl Sequences Error, Unknown type to assign correspondence method"
)
else:
if not isinstance(corr_dict, dict):
raise TypeError(
"corr_dict should be a dict that corresponds "
"protein id to nucleotide id!"
)
if len(corr_dict) >= pro_num:
if isinstance(nucl_seqs, Mapping):
pass
else:
d = {}
for record in nucl_seqs:
key = record.id
if key in d:
raise ValueError(f"Duplicate key '{key}'")
d[key] = record
nucl_seqs = d
corr_method = 2
else:
raise RuntimeError(
f"Number of items in corr_dict ({len(corr_dict)}) "
f"is less than number of protein records ({pro_num})"
)
# set up pro-nucl correspondence based on corr_method
# corr_method = 0, consecutive pairing
if corr_method == 0:
pro_nucl_pair = zip(pro_align, nucl_seqs)
# corr_method = 1, keyword pairing
elif corr_method == 1:
nucl_id = set(nucl_seqs.keys())
pro_id = {i.id for i in pro_align}
# check if there is pro_id that does not have a nucleotide match
if pro_id - nucl_id:
diff = pro_id - nucl_id
raise ValueError(
f"Protein Record {', '.join(diff)} cannot find a "
"nucleotide sequence match, please check the id"
)
else:
pro_nucl_pair = []
for pro_rec in pro_align:
pro_nucl_pair.append((pro_rec, nucl_seqs[pro_rec.id]))
# corr_method = 2, dict pairing
elif corr_method == 2:
pro_nucl_pair = []
for pro_rec in pro_align:
try:
nucl_id = corr_dict[pro_rec.id]
except KeyError:
print(f"Protein record ({pro_rec.id}) is not in corr_dict!")
exit(1)
pro_nucl_pair.append((pro_rec, nucl_seqs[nucl_id]))
if codon_table is None:
codon_table = CodonTable.generic_by_id[1]
codon_aln = []
shift = False
for pair in pro_nucl_pair:
# Beware that the following span corresponds to an ungapped
# nucleotide sequence.
corr_span = _check_corr(
pair[0],
pair[1],
gap_char=gap_char,
codon_table=codon_table,
complete_protein=complete_protein,
anchor_len=anchor_len,
)
if not corr_span:
raise ValueError(
f"Protein Record {pair[0].id} and "
f"Nucleotide Record {pair[1].id} do not match!"
)
else:
codon_rec = _get_codon_rec(
pair[0],
pair[1],
corr_span,
gap_char=gap_char,
complete_protein=complete_protein,
codon_table=codon_table,
max_score=max_score,
)
codon_aln.append(codon_rec)
if corr_span[1] == 2:
shift = True
if shift:
return CodonAlignment(_align_shift_recs(codon_aln))
else:
return CodonAlignment(codon_aln) |
Generate regular expression based on a given list of codons (PRIVATE). | def _codons2re(codons):
"""Generate regular expression based on a given list of codons (PRIVATE)."""
reg = ""
for i in zip(*codons):
if len(set(i)) == 1:
reg += "".join(set(i))
else:
reg += "[" + "".join(set(i)) + "]"
return reg |
Set up the regular expression of a given CodonTable (PRIVATE).
>>> from Bio.Data.CodonTable import generic_by_id
>>> p = generic_by_id[1]
>>> t = _get_aa_regex(p)
>>> print(t['A'][0])
G
>>> print(t['A'][1])
C
>>> print(sorted(list(t['A'][2:])))
['A', 'C', 'G', 'T', 'U', '[', ']']
>>> print(sorted(list(t['L'][:5])))
['C', 'T', 'U', '[', ']']
>>> print(sorted(list(t['L'][5:9])))
['T', 'U', '[', ']']
>>> print(sorted(list(t['L'][9:])))
['A', 'C', 'G', 'T', 'U', '[', ']'] | def _get_aa_regex(codon_table, stop="*", unknown="X"):
"""Set up the regular expression of a given CodonTable (PRIVATE).
>>> from Bio.Data.CodonTable import generic_by_id
>>> p = generic_by_id[1]
>>> t = _get_aa_regex(p)
>>> print(t['A'][0])
G
>>> print(t['A'][1])
C
>>> print(sorted(list(t['A'][2:])))
['A', 'C', 'G', 'T', 'U', '[', ']']
>>> print(sorted(list(t['L'][:5])))
['C', 'T', 'U', '[', ']']
>>> print(sorted(list(t['L'][5:9])))
['T', 'U', '[', ']']
>>> print(sorted(list(t['L'][9:])))
['A', 'C', 'G', 'T', 'U', '[', ']']
"""
from Bio.Data.CodonTable import CodonTable
if not isinstance(codon_table, CodonTable):
raise TypeError("Input table is not a instance of Bio.Data.CodonTable object")
aa2codon = {}
for codon, aa in codon_table.forward_table.items():
aa2codon.setdefault(aa, []).append(codon)
for aa, codons in aa2codon.items():
aa2codon[aa] = _codons2re(codons)
aa2codon[stop] = _codons2re(codon_table.stop_codons)
aa2codon[unknown] = "..."
return aa2codon |
Check if the nucleotide can be translated into the protein (PRIVATE).
Expects two SeqRecord objects. | def _check_corr(
pro, nucl, gap_char, codon_table, complete_protein=False, anchor_len=10
):
"""Check if the nucleotide can be translated into the protein (PRIVATE).
Expects two SeqRecord objects.
"""
import re
if not isinstance(pro, SeqRecord) or not isinstance(nucl, SeqRecord):
raise TypeError(
"_check_corr accepts two SeqRecord object. Please check your input."
)
aa2re = _get_aa_regex(codon_table)
pro_re = ""
for aa in pro.seq:
if aa != gap_char:
pro_re += aa2re[aa]
nucl_seq = str(nucl.seq.upper().replace(gap_char, ""))
match = re.search(pro_re, nucl_seq)
if match:
# mode = 0, direct match
return (match.span(), 0)
else:
# Might caused by mismatches or frameshift, using anchors to
# have a try
# anchor_len = 10 # adjust this value to test performance
pro_seq = str(pro.seq).replace(gap_char, "")
anchors = [
pro_seq[i : (i + anchor_len)] for i in range(0, len(pro_seq), anchor_len)
]
# if the last anchor is less than the specified anchor
# size, we combine the penultimate and the last anchor
# together as the last one.
# TODO: modify this to deal with short sequence with only
# one anchor.
if len(anchors[-1]) < anchor_len:
anchors[-1] = anchors[-2] + anchors[-1]
pro_re = []
anchor_distance = 0
anchor_pos = []
for i, anchor in enumerate(anchors):
this_anchor_len = len(anchor)
qcodon = ""
fncodon = ""
# dirty code to deal with the last anchor
# as the last anchor is combined in the steps
# above, we need to get the true last anchor to
# pro_re
if this_anchor_len == anchor_len:
for aa in anchor:
if complete_protein and i == 0:
qcodon += _codons2re(codon_table.start_codons)
fncodon += aa2re["X"]
continue
qcodon += aa2re[aa]
fncodon += aa2re["X"]
match = re.search(qcodon, nucl_seq)
elif this_anchor_len > anchor_len:
last_qcodon = ""
last_fcodon = ""
for j in range(anchor_len, len(anchor)):
last_qcodon += aa2re[anchor[j]]
last_fcodon += aa2re["X"]
match = re.search(last_qcodon, nucl_seq)
# build full_pro_re from anchors
if match:
anchor_pos.append((match.start(), match.end(), i))
if this_anchor_len == anchor_len:
pro_re.append(qcodon)
else:
pro_re.append(last_qcodon)
else:
if this_anchor_len == anchor_len:
pro_re.append(fncodon)
else:
pro_re.append(last_fcodon)
full_pro_re = "".join(pro_re)
match = re.search(full_pro_re, nucl_seq)
if match:
# mode = 1, mismatch
return (match.span(), 1)
else:
# check frames of anchors
# ten frameshift events are allowed in a sequence
first_anchor = True
shift_id_pos = 0
# check the first anchor
if first_anchor and anchor_pos[0][2] != 0:
shift_val_lst = [1, 2, 3 * anchor_len - 2, 3 * anchor_len - 1, 0]
sh_anc = anchors[0]
for shift_val in shift_val_lst:
if shift_val == 0:
qcodon = None
break
if shift_val in (1, 2):
sh_nuc_len = anchor_len * 3 + shift_val
elif shift_val in (3 * anchor_len - 2, 3 * anchor_len - 1):
sh_nuc_len = anchor_len * 3 - (3 * anchor_len - shift_val)
if anchor_pos[0][0] >= sh_nuc_len:
sh_nuc = nucl_seq[
anchor_pos[0][0] - sh_nuc_len : anchor_pos[0][0]
]
else:
# this is unlikely to produce the correct output
sh_nuc = nucl_seq[: anchor_pos[0][0]]
qcodon, shift_id_pos = _get_shift_anchor_re(
sh_anc, sh_nuc, shift_val, aa2re, anchor_len, shift_id_pos
)
if qcodon is not None and qcodon != -1:
# pro_re[0] should be '.'*anchor_len, therefore I
# replace it.
pro_re[0] = qcodon
break
if qcodon == -1:
warnings.warn(
f"first frameshift detection failed for {nucl.id}",
BiopythonWarning,
)
# check anchors in the middle
for i in range(len(anchor_pos) - 1):
shift_val = (anchor_pos[i + 1][0] - anchor_pos[i][0]) % (3 * anchor_len)
sh_anc = "".join(anchors[anchor_pos[i][2] : anchor_pos[i + 1][2]])
sh_nuc = nucl_seq[anchor_pos[i][0] : anchor_pos[i + 1][0]]
qcodon = None
if shift_val != 0:
qcodon, shift_id_pos = _get_shift_anchor_re(
sh_anc, sh_nuc, shift_val, aa2re, anchor_len, shift_id_pos
)
if qcodon is not None and qcodon != -1:
pro_re[anchor_pos[i][2] : anchor_pos[i + 1][2]] = [qcodon]
qcodon = None
elif qcodon == -1:
warnings.warn(
f"middle frameshift detection failed for {nucl.id}",
BiopythonWarning,
)
# check the last anchor
if anchor_pos[-1][2] + 1 == len(anchors) - 1:
sh_anc = anchors[-1]
this_anchor_len = len(sh_anc)
shift_val_lst = [
1,
2,
3 * this_anchor_len - 2,
3 * this_anchor_len - 1,
0,
]
for shift_val in shift_val_lst:
if shift_val == 0:
qcodon = None
break
if shift_val in (1, 2):
sh_nuc_len = this_anchor_len * 3 + shift_val
elif shift_val in (
3 * this_anchor_len - 2,
3 * this_anchor_len - 1,
):
sh_nuc_len = this_anchor_len * 3 - (
3 * this_anchor_len - shift_val
)
if len(nucl_seq) - anchor_pos[-1][0] >= sh_nuc_len:
sh_nuc = nucl_seq[
anchor_pos[-1][0] : anchor_pos[-1][0] + sh_nuc_len
]
else:
# this is unlikely to produce the correct output
sh_nuc = nucl_seq[anchor_pos[-1][0] :]
qcodon, shift_id_pos = _get_shift_anchor_re(
sh_anc, sh_nuc, shift_val, aa2re, this_anchor_len, shift_id_pos
)
if qcodon is not None and qcodon != -1:
pro_re.pop()
pro_re[-1] = qcodon
break
if qcodon == -1:
warnings.warn(
f"last frameshift detection failed for {nucl.id}",
BiopythonWarning,
)
# try global match
full_pro_re = "".join(pro_re)
match = re.search(full_pro_re, nucl_seq)
if match:
return (match.span(), 2, match)
else:
raise RuntimeError(
f"Protein SeqRecord ({pro.id}) and "
f"Nucleotide SeqRecord ({nucl.id}) do not match!"
) |
Find a regular expression matching a potentially shifted anchor (PRIVATE).
Arguments:
- sh_anc - shifted anchor sequence
- sh_nuc - potentially corresponding nucleotide sequence
of sh_anc
- shift_val - 1 or 2 indicates forward frame shift, whereas
3*anchor_len-1 or 3*anchor_len-2 indicates
backward shift
- aa2re - aa to codon re dict
- anchor_len - length of the anchor
- shift_id_pos - specify current shift name we are at | def _get_shift_anchor_re(sh_anc, sh_nuc, shift_val, aa2re, anchor_len, shift_id_pos):
"""Find a regular expression matching a potentially shifted anchor (PRIVATE).
Arguments:
- sh_anc - shifted anchor sequence
- sh_nuc - potentially corresponding nucleotide sequence
of sh_anc
- shift_val - 1 or 2 indicates forward frame shift, whereas
3*anchor_len-1 or 3*anchor_len-2 indicates
backward shift
- aa2re - aa to codon re dict
- anchor_len - length of the anchor
- shift_id_pos - specify current shift name we are at
"""
import re
shift_id = [chr(i) for i in range(97, 107)]
if 0 < shift_val < 3 * anchor_len - 2:
# if shift_val in (1, 2):
for j in range(len(sh_anc)):
qcodon = "^"
for k, aa in enumerate(sh_anc):
if k == j:
qcodon += aa2re[aa] + "(?P<" + shift_id[shift_id_pos] + ">..*)"
else:
qcodon += aa2re[aa]
qcodon += "$"
match = re.search(qcodon, sh_nuc)
if match:
qcodon = qcodon.replace("^", "").replace("$", "")
shift_id_pos += 1
return qcodon, shift_id_pos
if not match:
# failed to find a match (frameshift)
return -1, shift_id_pos
elif shift_val in (3 * anchor_len - 1, 3 * anchor_len - 2):
shift_val = 3 * anchor_len - shift_val
# obtain shifted anchor and corresponding nucl
# first check if the shifted pos is just at the end of the
# previous anchor.
for j in range(1, len(sh_anc)):
qcodon = "^"
for k, aa in enumerate(sh_anc):
if k == j - 1:
# will be considered in the next step
pass
elif k == j:
qcodon += _merge_aa2re(
sh_anc[j - 1],
sh_anc[j],
shift_val,
aa2re,
shift_id[shift_id_pos].upper(),
)
else:
qcodon += aa2re[aa]
qcodon += "$"
match = re.search(qcodon, sh_nuc)
if match:
qcodon = qcodon.replace("^", "").replace("$", "")
shift_id_pos += 1
return qcodon, shift_id_pos
if not match:
# failed to find a match (frameshift)
return -1, shift_id_pos |
Merge two amino acids based on detected frame shift value (PRIVATE). | def _merge_aa2re(aa1, aa2, shift_val, aa2re, reid):
"""Merge two amino acids based on detected frame shift value (PRIVATE)."""
def get_aa_from_codonre(re_aa):
aas = []
m = 0
for i in re_aa:
if i == "[":
m = -1
aas.append("")
elif i == "]":
m = 0
continue
elif m == -1:
aas[-1] = aas[-1] + i
elif m == 0:
aas.append(i)
return aas
scodon = list(map(get_aa_from_codonre, (aa2re[aa1], aa2re[aa2])))
if shift_val == 1:
intersect = "".join(set(scodon[0][2]) & set(scodon[1][0]))
scodonre = "(?P<" + reid + ">"
scodonre += (
"["
+ scodon[0][0]
+ "]"
+ "["
+ scodon[0][1]
+ "]"
+ "["
+ intersect
+ "]"
+ "["
+ scodon[1][1]
+ "]"
+ "["
+ scodon[1][2]
+ "]"
)
elif shift_val == 2:
intersect1 = "".join(set(scodon[0][1]) & set(scodon[1][0]))
intersect2 = "".join(set(scodon[0][2]) & set(scodon[1][1]))
scodonre = "(?P<" + reid + ">"
scodonre += (
"["
+ scodon[0][0]
+ "]"
+ "["
+ intersect1
+ "]"
+ "["
+ intersect2
+ "]"
+ "["
+ scodon[1][2]
+ "]"
)
scodonre += ")"
return scodonre |
Generate codon alignment based on regular re match (PRIVATE).
span_mode is a tuple returned by _check_corr. The first element
is the span of a re search, and the second element is the mode
for the match.
mode
- 0: direct match
- 1: mismatch (no indels)
- 2: frameshift | def _get_codon_rec(
pro, nucl, span_mode, gap_char, codon_table, complete_protein=False, max_score=10
):
"""Generate codon alignment based on regular re match (PRIVATE).
span_mode is a tuple returned by _check_corr. The first element
is the span of a re search, and the second element is the mode
for the match.
mode
- 0: direct match
- 1: mismatch (no indels)
- 2: frameshift
"""
import re
from Bio.Seq import Seq
nucl_seq = nucl.seq.replace(gap_char, "")
span = span_mode[0]
mode = span_mode[1]
aa2re = _get_aa_regex(codon_table)
if mode in (0, 1):
if len(pro.seq.replace(gap_char, "")) * 3 != (span[1] - span[0]):
raise ValueError(
f"Protein Record {pro.id} and "
f"Nucleotide Record {nucl.id} do not match!"
)
aa_num = 0
codon_seq = CodonSeq()
for aa in pro.seq:
if aa == "-":
codon_seq += "---"
elif complete_protein and aa_num == 0:
this_codon = nucl_seq[span[0] : span[0] + 3]
if not re.search(
_codons2re(codon_table.start_codons), str(this_codon.upper())
):
max_score -= 1
warnings.warn(
f"start codon of {pro.id} ({aa} {aa_num}) does not "
f"correspond to {nucl.id} ({this_codon})",
BiopythonWarning,
)
if max_score == 0:
raise RuntimeError(
f"max_score reached for {nucl.id}! Please raise up "
"the tolerance to get an alignment in anyway"
)
codon_seq += this_codon
aa_num += 1
else:
this_codon = nucl_seq[span[0] + 3 * aa_num : span[0] + 3 * (aa_num + 1)]
if this_codon.upper().translate(table=codon_table) != aa:
max_score -= 1
warnings.warn(
"%s(%s %d) does not correspond to %s(%s)"
% (pro.id, aa, aa_num, nucl.id, this_codon),
BiopythonWarning,
)
if max_score == 0:
raise RuntimeError(
f"max_score reached for {nucl.id}! Please raise up "
"the tolerance to get an alignment in anyway"
)
codon_seq += this_codon
aa_num += 1
return SeqRecord(codon_seq, id=nucl.id)
elif mode == 2:
from collections import deque
shift_pos = deque([])
shift_start = []
match = span_mode[2]
m_groupdict = list(match.groupdict().keys())
# backward frameshift
for i in m_groupdict:
shift_pos.append(match.span(i))
shift_start.append(match.start(i))
rf_table = []
i = match.start()
while True:
rf_table.append(i)
i += 3
if i in shift_start and m_groupdict[shift_start.index(i)].isupper():
shift_index = shift_start.index(i)
shift_val = 6 - (shift_pos[shift_index][1] - shift_pos[shift_index][0])
rf_table.append(i)
rf_table.append(i + 3 - shift_val)
i = shift_pos[shift_index][1]
elif i in shift_start and m_groupdict[shift_start.index(i)].islower():
i = shift_pos[shift_start.index(i)][1]
if i >= match.end():
break
codon_seq = CodonSeq()
aa_num = 0
for aa in pro.seq:
if aa == "-":
codon_seq += "---"
elif complete_protein and aa_num == 0:
this_codon = nucl_seq[rf_table[0] : rf_table[0] + 3]
if not re.search(
_codons2re(codon_table.start_codons), str(this_codon.upper())
):
max_score -= 1
warnings.warn(
f"start codon of {pro.id}({aa} {aa_num}) does not "
f"correspond to {nucl.id}({this_codon})",
BiopythonWarning,
)
codon_seq += this_codon
aa_num += 1
else:
if (
aa_num < len(pro.seq.replace("-", "")) - 1
and rf_table[aa_num + 1] - rf_table[aa_num] - 3 < 0
):
max_score -= 1
start = rf_table[aa_num]
end = start + (3 - shift_val)
ngap = shift_val
this_codon = nucl_seq[start:end] + "-" * ngap
elif rf_table[aa_num] - rf_table[aa_num - 1] - 3 > 0:
max_score -= 1
start = rf_table[aa_num - 1] + 3
end = rf_table[aa_num]
ngap = 3 - (rf_table[aa_num] - rf_table[aa_num - 1] - 3)
this_codon = (
nucl_seq[start:end]
+ "-" * ngap
+ nucl_seq[rf_table[aa_num] : rf_table[aa_num] + 3]
)
else:
start = rf_table[aa_num]
end = start + 3
this_codon = nucl_seq[start:end]
if this_codon.upper().translate(table=codon_table) != aa:
max_score -= 1
warnings.warn(
f"Codon of {pro.id}({aa} {aa_num}) does not "
f"correspond to {nucl.id}({this_codon})",
BiopythonWarning,
)
if max_score == 0:
raise RuntimeError(
f"max_score reached for {nucl.id}! Please raise up "
"the tolerance to get an alignment in anyway"
)
codon_seq += this_codon
aa_num += 1
codon_seq.rf_table = rf_table
return SeqRecord(codon_seq, id=nucl.id) |
Build alignment according to the frameshift detected by _check_corr (PRIVATE).
Argument:
- recs - a list of SeqRecords containing a CodonSeq dictated
by a rf_table (with frameshift in some of them). | def _align_shift_recs(recs):
"""Build alignment according to the frameshift detected by _check_corr (PRIVATE).
Argument:
- recs - a list of SeqRecords containing a CodonSeq dictated
by a rf_table (with frameshift in some of them).
"""
def find_next_int(k, lst):
idx = lst.index(k)
p = 0
while True:
if isinstance(lst[idx + p], int):
return lst[idx + p], p
p += 1
full_rf_table_lst = [rec.seq.get_full_rf_table() for rec in recs]
rf_num = [0] * len(recs)
for k, rec in enumerate(recs):
for i in rec.seq.get_full_rf_table():
if isinstance(i, int):
rf_num[k] += 1
# isinstance(i, float) should be True
elif rec.seq[int(i) : int(i) + 3] == "---":
rf_num[k] += 1
if len(set(rf_num)) != 1:
raise RuntimeError("Number of alignable codons unequal in given records")
i = 0
rec_num = len(recs)
while True:
add_lst = []
try:
col_rf_lst = [k[i] for k in full_rf_table_lst]
except IndexError:
# we probably reached the last codon
break
for j, k in enumerate(col_rf_lst):
add_lst.append((j, int(k)))
if isinstance(k, float) and recs[j].seq[int(k) : int(k) + 3] != "---":
m, p = find_next_int(k, full_rf_table_lst[j])
if (m - k) % 3 != 0:
gap_num = 3 - (m - k) % 3
else:
gap_num = 0
if gap_num != 0:
gaps = "-" * int(gap_num)
seq = CodonSeq(rf_table=recs[j].seq.rf_table)
seq += recs[j].seq[: int(k)] + gaps + recs[j].seq[int(k) :]
full_rf_table = full_rf_table_lst[j]
bp = full_rf_table.index(k)
full_rf_table = full_rf_table[:bp] + [
v + int(gap_num) for v in full_rf_table[bp + 1 :]
]
full_rf_table_lst[j] = full_rf_table
recs[j].seq = seq
add_lst.pop()
gap_num += m - k
i += p - 1
if len(add_lst) != rec_num:
for j, k in add_lst:
seq = CodonSeq(rf_table=recs[j].seq.rf_table)
gaps = "-" * int(gap_num)
seq += recs[j].seq[: int(k)] + gaps + recs[j].seq[int(k) :]
full_rf_table = full_rf_table_lst[j]
bp = full_rf_table.index(k)
inter_rf = []
for t in range(0, len(gaps), 3):
inter_rf.append(k + t + 3.0)
full_rf_table = (
full_rf_table[:bp]
+ inter_rf
+ [v + int(gap_num) for v in full_rf_table[bp:]]
)
full_rf_table_lst[j] = full_rf_table
recs[j].seq = seq
i += 1
return recs |
Read a COMPASS file containing one COMPASS record. | def read(handle):
"""Read a COMPASS file containing one COMPASS record."""
record = None
try:
line = next(handle)
record = Record()
__read_names(record, line)
line = next(handle)
__read_threshold(record, line)
line = next(handle)
__read_lengths(record, line)
line = next(handle)
__read_profilewidth(record, line)
line = next(handle)
__read_scores(record, line)
except StopIteration:
if not record:
raise ValueError("No record found in handle") from None
else:
raise ValueError("Unexpected end of stream.") from None
for line in handle:
if not line.strip(): # skip empty lines
continue
__read_query_alignment(record, line)
try:
line = next(handle)
__read_positive_alignment(record, line)
line = next(handle)
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.") from None
return record |
Iterate over records in a COMPASS file. | def parse(handle):
"""Iterate over records in a COMPASS file."""
record = None
try:
line = next(handle)
except StopIteration:
return
while True:
try:
record = Record()
__read_names(record, line)
line = next(handle)
__read_threshold(record, line)
line = next(handle)
__read_lengths(record, line)
line = next(handle)
__read_profilewidth(record, line)
line = next(handle)
__read_scores(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.") from None
for line in handle:
if not line.strip():
continue
if "Ali1:" in line:
yield record
break
__read_query_alignment(record, line)
try:
line = next(handle)
__read_positive_alignment(record, line)
line = next(handle)
__read_hit_alignment(record, line)
except StopIteration:
raise ValueError("Unexpected end of stream.") from None
else:
yield record
break |
Back a back-table (naive single codon mapping).
ONLY RETURNS A SINGLE CODON, chosen from the possible alternatives
based on their sort order. | def make_back_table(table, default_stop_codon):
"""Back a back-table (naive single codon mapping).
ONLY RETURNS A SINGLE CODON, chosen from the possible alternatives
based on their sort order.
"""
# Do the sort so changes in the hash implementation won't affect
# the result when one amino acid is coded by more than one codon.
back_table = {}
for key in sorted(table):
back_table[table[key]] = key
back_table[None] = default_stop_codon
return back_table |
Return all possible encoded amino acids for ambiguous codon. | def list_possible_proteins(codon, forward_table, ambiguous_nucleotide_values):
"""Return all possible encoded amino acids for ambiguous codon."""
c1, c2, c3 = codon
x1 = ambiguous_nucleotide_values[c1]
x2 = ambiguous_nucleotide_values[c2]
x3 = ambiguous_nucleotide_values[c3]
possible = {}
stops = []
for y1 in x1:
for y2 in x2:
for y3 in x3:
try:
possible[forward_table[y1 + y2 + y3]] = 1
except KeyError:
# If tripping over a stop codon
stops.append(y1 + y2 + y3)
if stops:
if possible:
raise TranslationError(
f"ambiguous codon {codon!r} codes for both proteins and stop codons"
)
# This is a true stop codon - tell the caller about it
raise KeyError(codon)
return list(possible) |
Extend a codon list to include all possible ambiguous codons.
e.g.::
['TAG', 'TAA'] -> ['TAG', 'TAA', 'TAR']
['UAG', 'UGA'] -> ['UAG', 'UGA', 'URA']
Note that ['TAG', 'TGA'] -> ['TAG', 'TGA'], this does not add 'TRR'
(which could also mean 'TAA' or 'TGG').
Thus only two more codons are added in the following:
e.g.::
['TGA', 'TAA', 'TAG'] -> ['TGA', 'TAA', 'TAG', 'TRA', 'TAR']
Returns a new (longer) list of codon strings. | def list_ambiguous_codons(codons, ambiguous_nucleotide_values):
"""Extend a codon list to include all possible ambiguous codons.
e.g.::
['TAG', 'TAA'] -> ['TAG', 'TAA', 'TAR']
['UAG', 'UGA'] -> ['UAG', 'UGA', 'URA']
Note that ['TAG', 'TGA'] -> ['TAG', 'TGA'], this does not add 'TRR'
(which could also mean 'TAA' or 'TGG').
Thus only two more codons are added in the following:
e.g.::
['TGA', 'TAA', 'TAG'] -> ['TGA', 'TAA', 'TAG', 'TRA', 'TAR']
Returns a new (longer) list of codon strings.
"""
# Note ambiguous_nucleotide_values['R'] = 'AG' (etc)
# This will generate things like 'TRR' from ['TAG', 'TGA'], which
# we don't want to include:
c1_list = sorted(
letter
for letter, meanings in ambiguous_nucleotide_values.items()
if {codon[0] for codon in codons}.issuperset(set(meanings))
)
c2_list = sorted(
letter
for letter, meanings in ambiguous_nucleotide_values.items()
if {codon[1] for codon in codons}.issuperset(set(meanings))
)
c3_list = sorted(
letter
for letter, meanings in ambiguous_nucleotide_values.items()
if {codon[2] for codon in codons}.issuperset(set(meanings))
)
# candidates is a list (not a set) to preserve the iteration order
candidates = []
for c1 in c1_list:
for c2 in c2_list:
for c3 in c3_list:
codon = c1 + c2 + c3
if codon not in candidates and codon not in codons:
candidates.append(codon)
answer = codons[:] # copy
# print("Have %i new candidates" % len(candidates))
for ambig_codon in candidates:
wanted = True
# e.g. 'TRR' -> 'TAA', 'TAG', 'TGA', 'TGG'
for codon in [
c1 + c2 + c3
for c1 in ambiguous_nucleotide_values[ambig_codon[0]]
for c2 in ambiguous_nucleotide_values[ambig_codon[1]]
for c3 in ambiguous_nucleotide_values[ambig_codon[2]]
]:
if codon not in codons:
# This ambiguous codon can code for a non-stop, exclude it!
wanted = False
# print("Rejecting %s" % ambig_codon)
continue
if wanted:
answer.append(ambig_codon)
return answer |
Turn codon table data into objects (PRIVATE).
The data is stored in the dictionaries. | def register_ncbi_table(name, alt_name, id, table, start_codons, stop_codons):
"""Turn codon table data into objects (PRIVATE).
The data is stored in the dictionaries.
"""
# In most cases names are divided by "; ", however there is also
# Table 11 'Bacterial, Archaeal and Plant Plastid Code', previously
# 'Bacterial and Plant Plastid' which used to be just 'Bacterial'
names = [
x.strip() for x in name.replace(" and ", "; ").replace(", ", "; ").split("; ")
]
dna = NCBICodonTableDNA(id, names + [alt_name], table, start_codons, stop_codons)
ambig_dna = AmbiguousCodonTable(
dna,
IUPACData.ambiguous_dna_letters,
IUPACData.ambiguous_dna_values,
IUPACData.extended_protein_letters,
IUPACData.extended_protein_values,
)
# replace all T's with U's for the RNA tables
rna_table = {}
generic_table = {}
for codon, val in table.items():
generic_table[codon] = val
codon = codon.replace("T", "U")
generic_table[codon] = val
rna_table[codon] = val
rna_start_codons = []
generic_start_codons = []
for codon in start_codons:
generic_start_codons.append(codon)
# We need to check if 'T' is in the codon, otherwise
# generic_start_codons may contain duplicates
if "T" in codon:
codon = codon.replace("T", "U")
generic_start_codons.append(codon)
rna_start_codons.append(codon)
rna_stop_codons = []
generic_stop_codons = []
for codon in stop_codons:
generic_stop_codons.append(codon)
if "T" in codon:
codon = codon.replace("T", "U")
generic_stop_codons.append(codon)
rna_stop_codons.append(codon)
generic = NCBICodonTable(
id, names + [alt_name], generic_table, generic_start_codons, generic_stop_codons
)
# The following isn't very elegant, but seems to work nicely.
_merged_values = dict(IUPACData.ambiguous_rna_values.items())
_merged_values["T"] = "U"
ambig_generic = AmbiguousCodonTable(
generic,
None,
_merged_values,
IUPACData.extended_protein_letters,
IUPACData.extended_protein_values,
)
rna = NCBICodonTableRNA(
id, names + [alt_name], rna_table, rna_start_codons, rna_stop_codons
)
ambig_rna = AmbiguousCodonTable(
rna,
IUPACData.ambiguous_rna_letters,
IUPACData.ambiguous_rna_values,
IUPACData.extended_protein_letters,
IUPACData.extended_protein_values,
)
if id == 1:
global standard_dna_table, standard_rna_table
standard_dna_table = dna
standard_rna_table = rna
unambiguous_dna_by_id[id] = dna
unambiguous_rna_by_id[id] = rna
generic_by_id[id] = generic
ambiguous_dna_by_id[id] = ambig_dna
ambiguous_rna_by_id[id] = ambig_rna
ambiguous_generic_by_id[id] = ambig_generic
if alt_name is not None:
names.append(alt_name)
for name in names:
unambiguous_dna_by_name[name] = dna
unambiguous_rna_by_name[name] = rna
generic_by_name[name] = generic
ambiguous_dna_by_name[name] = ambig_dna
ambiguous_rna_by_name[name] = ambig_rna
ambiguous_generic_by_name[name] = ambig_generic |
Iterate over primer3 output as Bio.Emboss.Primer3.Record objects. | def parse(handle):
"""Iterate over primer3 output as Bio.Emboss.Primer3.Record objects."""
# Skip blank lines at head of file
while True:
line = handle.readline()
if line.strip():
break # Starting a record
# Read each record
record = None
primer = None
while True:
if line.startswith(("# EPRIMER3", "# PRIMER3")):
# Record data
if record is not None:
yield record
record = Record()
record.comments += line
primer = None
elif line.startswith("#"):
if (
line.strip()
!= "# Start Len Tm GC% Sequence"
):
record.comments += line
elif not line.strip():
pass
elif line[5:19] == "PRODUCT SIZE: ":
primer = Primers()
primer.size = int(line[19:])
record.primers.append(primer)
elif line[5:19] == "FORWARD PRIMER":
words = line.split()
if not primer or primer.size == 0:
primer = Primers()
record.primers.append(primer)
primer.forward_start = int(words[2])
primer.forward_length = int(words[3])
primer.forward_tm = float(words[4])
primer.forward_gc = float(words[5])
primer.forward_seq = words[6]
elif line[5:19] == "REVERSE PRIMER":
words = line.split()
if not primer or primer.size == 0:
primer = Primers()
record.primers.append(primer)
primer.reverse_start = int(words[2])
primer.reverse_length = int(words[3])
primer.reverse_tm = float(words[4])
primer.reverse_gc = float(words[5])
primer.reverse_seq = words[6]
elif line[5:19] == "INTERNAL OLIGO":
words = line.split()
if not primer or primer.size == 0:
primer = Primers()
record.primers.append(primer)
primer.internal_start = int(words[2])
primer.internal_length = int(words[3])
primer.internal_tm = float(words[4])
primer.internal_gc = float(words[5])
try:
primer.internal_seq = words[6]
except IndexError: # eprimer3 reports oligo without sequence
primer.internal_seq = ""
try:
line = next(handle)
except StopIteration:
break
if record:
yield record |
Parse primer3 output into a Bio.Emboss.Primer3.Record object.
This is for when there is one and only one target sequence. If
designing primers for multiple sequences, use the parse function. | def read(handle):
"""Parse primer3 output into a Bio.Emboss.Primer3.Record object.
This is for when there is one and only one target sequence. If
designing primers for multiple sequences, use the parse function.
"""
iterator = parse(handle)
try:
record = next(iterator)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(iterator)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
return record |
Get output from primersearch into a PrimerSearchOutputRecord. | def read(handle):
"""Get output from primersearch into a PrimerSearchOutputRecord."""
record = OutputRecord()
for line in handle:
if not line.strip():
continue
elif line.startswith("Primer name"):
name = line.split()[-1]
record.amplifiers[name] = []
elif line.startswith("Amplimer"):
amplifier = Amplifier()
record.amplifiers[name].append(amplifier)
elif line.startswith("\tSequence: "):
amplifier.hit_info = line.replace("\tSequence: ", "")
elif line.startswith("\tAmplimer length: "):
length = line.split()[-2]
amplifier.length = int(length)
else:
amplifier.hit_info += line
for name in record.amplifiers:
for amplifier in record.amplifiers[name]:
amplifier.hit_info = amplifier.hit_info.rstrip()
return record |
Post a file of identifiers for future use.
Posts a file containing a list of UIs for future use in the user's
environment to use with subsequent search strategies.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EPost
:returns: Handle to the results.
:raises urllib.error.URLError: If there's a network error. | def epost(db, **keywds):
"""Post a file of identifiers for future use.
Posts a file containing a list of UIs for future use in the user's
environment to use with subsequent search strategies.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EPost
:returns: Handle to the results.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/epost.fcgi"
variables = {"db": db}
variables.update(keywds)
request = _build_request(cgi, variables, post=True)
return _open(request) |
Fetch Entrez results which are returned as a handle.
EFetch retrieves records in the requested format from a list or set of one or
more UIs or from user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.efetch(db="nucleotide", id="AY851612", rettype="gb", retmode="text")
>>> print(handle.readline().strip())
LOCUS AY851612 892 bp DNA linear PLN 10-APR-2007
>>> handle.close()
This will automatically use an HTTP POST rather than HTTP GET if there
are over 200 identifiers as recommended by the NCBI.
**Warning:** The NCBI changed the default retmode in Feb 2012, so many
databases which previously returned text output now give XML.
:returns: Handle to the results.
:raises urllib.error.URLError: If there's a network error. | def efetch(db, **keywords):
"""Fetch Entrez results which are returned as a handle.
EFetch retrieves records in the requested format from a list or set of one or
more UIs or from user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.efetch(db="nucleotide", id="AY851612", rettype="gb", retmode="text")
>>> print(handle.readline().strip())
LOCUS AY851612 892 bp DNA linear PLN 10-APR-2007
>>> handle.close()
This will automatically use an HTTP POST rather than HTTP GET if there
are over 200 identifiers as recommended by the NCBI.
**Warning:** The NCBI changed the default retmode in Feb 2012, so many
databases which previously returned text output now give XML.
:returns: Handle to the results.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi"
variables = {"db": db}
variables.update(keywords)
request = _build_request(cgi, variables)
return _open(request) |
Run an Entrez search and return a handle to the results.
ESearch searches and retrieves primary IDs (for use in EFetch, ELink
and ESummary) and term translations, and optionally retains results
for future use in the user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.esearch(
... db="nucleotide", retmax=10, idtype="acc",
... term="opuntia[ORGN] accD 2007[Publication Date]"
... )
...
>>> record = Entrez.read(handle)
>>> handle.close()
>>> int(record["Count"]) >= 2
True
>>> "EF590893.1" in record["IdList"]
True
>>> "EF590892.1" in record["IdList"]
True
:returns: Handle to the results, which are always in XML format.
:raises urllib.error.URLError: If there's a network error. | def esearch(db, term, **keywds):
"""Run an Entrez search and return a handle to the results.
ESearch searches and retrieves primary IDs (for use in EFetch, ELink
and ESummary) and term translations, and optionally retains results
for future use in the user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.esearch(
... db="nucleotide", retmax=10, idtype="acc",
... term="opuntia[ORGN] accD 2007[Publication Date]"
... )
...
>>> record = Entrez.read(handle)
>>> handle.close()
>>> int(record["Count"]) >= 2
True
>>> "EF590893.1" in record["IdList"]
True
>>> "EF590892.1" in record["IdList"]
True
:returns: Handle to the results, which are always in XML format.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi"
variables = {"db": db, "term": term}
variables.update(keywds)
request = _build_request(cgi, variables)
return _open(request) |
Check for linked external articles and return a handle.
ELink checks for the existence of an external or Related Articles link
from a list of one or more primary IDs; retrieves IDs and relevancy
scores for links to Entrez databases or Related Articles; creates a
hyperlink to the primary LinkOut provider for a specific ID and
database, or lists LinkOut URLs and attributes for multiple IDs.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ELink
Note that ELink treats the "id" parameter differently than the other
tools when multiple values are given. You should generally pass multiple
UIDs as a list of strings or integers. This will provide a "one-to-one"
mapping from source database UIDs to destination database UIDs in the
result. If multiple source UIDs are passed as a single comma-delimited
string all destination UIDs will be mixed together in the result.
This example finds articles related to the Biopython application
note's entry in the PubMed database:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> pmid = "19304878"
>>> handle = Entrez.elink(dbfrom="pubmed", id=pmid, linkname="pubmed_pubmed")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> print(record[0]["LinkSetDb"][0]["LinkName"])
pubmed_pubmed
>>> linked = [link["Id"] for link in record[0]["LinkSetDb"][0]["Link"]]
>>> "14630660" in linked
True
This is explained in much more detail in the Biopython Tutorial.
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error. | def elink(**keywds):
"""Check for linked external articles and return a handle.
ELink checks for the existence of an external or Related Articles link
from a list of one or more primary IDs; retrieves IDs and relevancy
scores for links to Entrez databases or Related Articles; creates a
hyperlink to the primary LinkOut provider for a specific ID and
database, or lists LinkOut URLs and attributes for multiple IDs.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ELink
Note that ELink treats the "id" parameter differently than the other
tools when multiple values are given. You should generally pass multiple
UIDs as a list of strings or integers. This will provide a "one-to-one"
mapping from source database UIDs to destination database UIDs in the
result. If multiple source UIDs are passed as a single comma-delimited
string all destination UIDs will be mixed together in the result.
This example finds articles related to the Biopython application
note's entry in the PubMed database:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> pmid = "19304878"
>>> handle = Entrez.elink(dbfrom="pubmed", id=pmid, linkname="pubmed_pubmed")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> print(record[0]["LinkSetDb"][0]["LinkName"])
pubmed_pubmed
>>> linked = [link["Id"] for link in record[0]["LinkSetDb"][0]["Link"]]
>>> "14630660" in linked
True
This is explained in much more detail in the Biopython Tutorial.
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi"
variables = {}
variables.update(keywds)
request = _build_request(cgi, variables, join_ids=False)
return _open(request) |
Return a summary of the Entrez databases as a results handle.
EInfo provides field names, index term counts, last update, and
available links for each Entrez database.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EInfo
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> record = Entrez.read(Entrez.einfo())
>>> 'pubmed' in record['DbList']
True
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error. | def einfo(**keywds):
"""Return a summary of the Entrez databases as a results handle.
EInfo provides field names, index term counts, last update, and
available links for each Entrez database.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EInfo
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> record = Entrez.read(Entrez.einfo())
>>> 'pubmed' in record['DbList']
True
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/einfo.fcgi"
variables = {}
variables.update(keywds)
request = _build_request(cgi, variables)
return _open(request) |
Retrieve document summaries as a results handle.
ESummary retrieves document summaries from a list of primary IDs or
from the user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESummary
This example discovers more about entry 19923 in the structure
database:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.esummary(db="structure", id="19923")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> print(record[0]["Id"])
19923
>>> print(record[0]["PdbDescr"])
CRYSTAL STRUCTURE OF E. COLI ACONITASE B
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error. | def esummary(**keywds):
"""Retrieve document summaries as a results handle.
ESummary retrieves document summaries from a list of primary IDs or
from the user's environment.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESummary
This example discovers more about entry 19923 in the structure
database:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.esummary(db="structure", id="19923")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> print(record[0]["Id"])
19923
>>> print(record[0]["PdbDescr"])
CRYSTAL STRUCTURE OF E. COLI ACONITASE B
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi"
variables = {}
variables.update(keywds)
request = _build_request(cgi, variables)
return _open(request) |
Provide Entrez database counts for a global search.
EGQuery provides Entrez database counts in XML for a single search
using Global Query.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EGQuery
This quick example based on a longer version from the Biopython
Tutorial just checks there are over 60 matches for 'Biopython'
in PubMedCentral:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.egquery(term="biopython")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> for row in record["eGQueryResult"]:
... if "pmc" in row["DbName"]:
... print(int(row["Count"]) > 60)
True
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error. | def egquery(**keywds):
"""Provide Entrez database counts for a global search.
EGQuery provides Entrez database counts in XML for a single search
using Global Query.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EGQuery
This quick example based on a longer version from the Biopython
Tutorial just checks there are over 60 matches for 'Biopython'
in PubMedCentral:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> handle = Entrez.egquery(term="biopython")
>>> record = Entrez.read(handle)
>>> handle.close()
>>> for row in record["eGQueryResult"]:
... if "pmc" in row["DbName"]:
... print(int(row["Count"]) > 60)
True
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/egquery.fcgi"
variables = {}
variables.update(keywds)
request = _build_request(cgi, variables)
return _open(request) |
Retrieve spelling suggestions as a results handle.
ESpell retrieves spelling suggestions, if available.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESpell
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> record = Entrez.read(Entrez.espell(term="biopythooon"))
>>> print(record["Query"])
biopythooon
>>> print(record["CorrectedQuery"])
biopython
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error. | def espell(**keywds):
"""Retrieve spelling suggestions as a results handle.
ESpell retrieves spelling suggestions, if available.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESpell
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> record = Entrez.read(Entrez.espell(term="biopythooon"))
>>> print(record["Query"])
biopythooon
>>> print(record["CorrectedQuery"])
biopython
:returns: Handle to the results, by default in XML format.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/espell.fcgi"
variables = {}
variables.update(keywds)
request = _build_request(cgi, variables)
return _open(request) |
Retrieve PMIDs for input citation strings, returned as a handle.
ECitMatch retrieves PubMed IDs (PMIDs) that correspond to a set of input
citation strings.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ECitMatch
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> citation_1 = {"journal_title": "proc natl acad sci u s a",
... "year": "1991", "volume": "88", "first_page": "3248",
... "author_name": "mann bj", "key": "citation_1"}
>>> handle = Entrez.ecitmatch(db="pubmed", bdata=[citation_1])
>>> print(handle.read().strip().split("|"))
['proc natl acad sci u s a', '1991', '88', '3248', 'mann bj', 'citation_1', '2014248']
>>> handle.close()
:returns: Handle to the results, by default in plain text.
:raises urllib.error.URLError: If there's a network error. | def ecitmatch(**keywds):
"""Retrieve PMIDs for input citation strings, returned as a handle.
ECitMatch retrieves PubMed IDs (PMIDs) that correspond to a set of input
citation strings.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ECitMatch
Short example:
>>> from Bio import Entrez
>>> Entrez.email = "[email protected]"
>>> citation_1 = {"journal_title": "proc natl acad sci u s a",
... "year": "1991", "volume": "88", "first_page": "3248",
... "author_name": "mann bj", "key": "citation_1"}
>>> handle = Entrez.ecitmatch(db="pubmed", bdata=[citation_1])
>>> print(handle.read().strip().split("|"))
['proc natl acad sci u s a', '1991', '88', '3248', 'mann bj', 'citation_1', '2014248']
>>> handle.close()
:returns: Handle to the results, by default in plain text.
:raises urllib.error.URLError: If there's a network error.
"""
cgi = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/ecitmatch.cgi"
variables = _update_ecitmatch_variables(keywds)
request = _build_request(cgi, variables, ecitmatch=True)
return _open(request) |
Parse an XML file from the NCBI Entrez Utilities into python objects.
This function parses an XML file created by NCBI's Entrez Utilities,
returning a multilevel data structure of Python lists and dictionaries.
Most XML files returned by NCBI's Entrez Utilities can be parsed by
this function, provided its DTD is available. Biopython includes the
DTDs for most commonly used Entrez Utilities.
The argument ``source`` must be a file or file-like object opened in binary
mode, or a filename. The parser detects the encoding from the XML file, and
uses it to convert all text in the XML to the correct Unicode string. The
functions in Bio.Entrez to access NCBI Entrez will automatically return XML
data in binary mode. For files, use mode "rb" when opening the file, as in
>>> from Bio import Entrez
>>> path = "Entrez/esearch1.xml"
>>> stream = open(path, "rb") # opened in binary mode
>>> record = Entrez.read(stream)
>>> print(record['QueryTranslation'])
biopython[All Fields]
>>> stream.close()
Alternatively, you can use the filename directly, as in
>>> record = Entrez.read(path)
>>> print(record['QueryTranslation'])
biopython[All Fields]
which is safer, as the file stream will automatically be closed after the
record has been read, or if an error occurs.
If validate is True (default), the parser will validate the XML file
against the DTD, and raise an error if the XML file contains tags that
are not represented in the DTD. If validate is False, the parser will
simply skip such tags.
If escape is True, all characters that are not valid HTML are replaced
by HTML escape characters to guarantee that the returned strings are
valid HTML fragments. For example, a less-than sign (<) is replaced by
<. If escape is False (default), the string is returned as is.
If ignore_errors is False (default), any error messages in the XML file
will raise a RuntimeError. If ignore_errors is True, error messages will
be stored as ErrorElement items, without raising an exception.
Whereas the data structure seems to consist of generic Python lists,
dictionaries, strings, and so on, each of these is actually a class
derived from the base type. This allows us to store the attributes
(if any) of each element in a dictionary my_element.attributes, and
the tag name in my_element.tag. | def read(source, validate=True, escape=False, ignore_errors=False):
"""Parse an XML file from the NCBI Entrez Utilities into python objects.
This function parses an XML file created by NCBI's Entrez Utilities,
returning a multilevel data structure of Python lists and dictionaries.
Most XML files returned by NCBI's Entrez Utilities can be parsed by
this function, provided its DTD is available. Biopython includes the
DTDs for most commonly used Entrez Utilities.
The argument ``source`` must be a file or file-like object opened in binary
mode, or a filename. The parser detects the encoding from the XML file, and
uses it to convert all text in the XML to the correct Unicode string. The
functions in Bio.Entrez to access NCBI Entrez will automatically return XML
data in binary mode. For files, use mode "rb" when opening the file, as in
>>> from Bio import Entrez
>>> path = "Entrez/esearch1.xml"
>>> stream = open(path, "rb") # opened in binary mode
>>> record = Entrez.read(stream)
>>> print(record['QueryTranslation'])
biopython[All Fields]
>>> stream.close()
Alternatively, you can use the filename directly, as in
>>> record = Entrez.read(path)
>>> print(record['QueryTranslation'])
biopython[All Fields]
which is safer, as the file stream will automatically be closed after the
record has been read, or if an error occurs.
If validate is True (default), the parser will validate the XML file
against the DTD, and raise an error if the XML file contains tags that
are not represented in the DTD. If validate is False, the parser will
simply skip such tags.
If escape is True, all characters that are not valid HTML are replaced
by HTML escape characters to guarantee that the returned strings are
valid HTML fragments. For example, a less-than sign (<) is replaced by
<. If escape is False (default), the string is returned as is.
If ignore_errors is False (default), any error messages in the XML file
will raise a RuntimeError. If ignore_errors is True, error messages will
be stored as ErrorElement items, without raising an exception.
Whereas the data structure seems to consist of generic Python lists,
dictionaries, strings, and so on, each of these is actually a class
derived from the base type. This allows us to store the attributes
(if any) of each element in a dictionary my_element.attributes, and
the tag name in my_element.tag.
"""
from .Parser import DataHandler
handler = DataHandler(validate, escape, ignore_errors)
record = handler.read(source)
return record |
Parse an XML file from the NCBI Entrez Utilities into python objects.
This function parses an XML file created by NCBI's Entrez Utilities,
returning a multilevel data structure of Python lists and dictionaries.
This function is suitable for XML files that (in Python) can be represented
as a list of individual records. Whereas 'read' reads the complete file
and returns a single Python list, 'parse' is a generator function that
returns the records one by one. This function is therefore particularly
useful for parsing large files.
Most XML files returned by NCBI's Entrez Utilities can be parsed by
this function, provided its DTD is available. Biopython includes the
DTDs for most commonly used Entrez Utilities.
The argument ``source`` must be a file or file-like object opened in binary
mode, or a filename. The parser detects the encoding from the XML file, and
uses it to convert all text in the XML to the correct Unicode string. The
functions in Bio.Entrez to access NCBI Entrez will automatically return XML
data in binary mode. For files, use mode "rb" when opening the file, as in
>>> from Bio import Entrez
>>> path = "Entrez/pubmed1.xml"
>>> stream = open(path, "rb") # opened in binary mode
>>> records = Entrez.parse(stream)
>>> for record in records:
... print(record['MedlineCitation']['Article']['Journal']['Title'])
...
Social justice (San Francisco, Calif.)
Biochimica et biophysica acta
>>> stream.close()
Alternatively, you can use the filename directly, as in
>>> records = Entrez.parse(path)
>>> for record in records:
... print(record['MedlineCitation']['Article']['Journal']['Title'])
...
Social justice (San Francisco, Calif.)
Biochimica et biophysica acta
which is safer, as the file stream will automatically be closed after all
the records have been read, or if an error occurs.
If validate is True (default), the parser will validate the XML file
against the DTD, and raise an error if the XML file contains tags that
are not represented in the DTD. If validate is False, the parser will
simply skip such tags.
If escape is True, all characters that are not valid HTML are replaced
by HTML escape characters to guarantee that the returned strings are
valid HTML fragments. For example, a less-than sign (<) is replaced by
<. If escape is False (default), the string is returned as is.
If ignore_errors is False (default), any error messages in the XML file
will raise a RuntimeError. If ignore_errors is True, error messages will
be stored as ErrorElement items, without raising an exception.
Whereas the data structure seems to consist of generic Python lists,
dictionaries, strings, and so on, each of these is actually a class
derived from the base type. This allows us to store the attributes
(if any) of each element in a dictionary my_element.attributes, and
the tag name in my_element.tag. | def parse(source, validate=True, escape=False, ignore_errors=False):
"""Parse an XML file from the NCBI Entrez Utilities into python objects.
This function parses an XML file created by NCBI's Entrez Utilities,
returning a multilevel data structure of Python lists and dictionaries.
This function is suitable for XML files that (in Python) can be represented
as a list of individual records. Whereas 'read' reads the complete file
and returns a single Python list, 'parse' is a generator function that
returns the records one by one. This function is therefore particularly
useful for parsing large files.
Most XML files returned by NCBI's Entrez Utilities can be parsed by
this function, provided its DTD is available. Biopython includes the
DTDs for most commonly used Entrez Utilities.
The argument ``source`` must be a file or file-like object opened in binary
mode, or a filename. The parser detects the encoding from the XML file, and
uses it to convert all text in the XML to the correct Unicode string. The
functions in Bio.Entrez to access NCBI Entrez will automatically return XML
data in binary mode. For files, use mode "rb" when opening the file, as in
>>> from Bio import Entrez
>>> path = "Entrez/pubmed1.xml"
>>> stream = open(path, "rb") # opened in binary mode
>>> records = Entrez.parse(stream)
>>> for record in records:
... print(record['MedlineCitation']['Article']['Journal']['Title'])
...
Social justice (San Francisco, Calif.)
Biochimica et biophysica acta
>>> stream.close()
Alternatively, you can use the filename directly, as in
>>> records = Entrez.parse(path)
>>> for record in records:
... print(record['MedlineCitation']['Article']['Journal']['Title'])
...
Social justice (San Francisco, Calif.)
Biochimica et biophysica acta
which is safer, as the file stream will automatically be closed after all
the records have been read, or if an error occurs.
If validate is True (default), the parser will validate the XML file
against the DTD, and raise an error if the XML file contains tags that
are not represented in the DTD. If validate is False, the parser will
simply skip such tags.
If escape is True, all characters that are not valid HTML are replaced
by HTML escape characters to guarantee that the returned strings are
valid HTML fragments. For example, a less-than sign (<) is replaced by
<. If escape is False (default), the string is returned as is.
If ignore_errors is False (default), any error messages in the XML file
will raise a RuntimeError. If ignore_errors is True, error messages will
be stored as ErrorElement items, without raising an exception.
Whereas the data structure seems to consist of generic Python lists,
dictionaries, strings, and so on, each of these is actually a class
derived from the base type. This allows us to store the attributes
(if any) of each element in a dictionary my_element.attributes, and
the tag name in my_element.tag.
"""
from .Parser import DataHandler
handler = DataHandler(validate, escape, ignore_errors)
records = handler.parse(source)
return records |
Make an HTTP request to Entrez, handling errors and enforcing rate limiting (PRIVATE).
Does some simple error checking and will try again after certain types of errors, up to
``max_retries`` times. This function also enforces the "up to three queries per second
rule" to avoid abusing the NCBI servers (this limit is increased to 10 if using an API key).
:param req_or_cgi: A Request object returned by ``_build_request``.
:type req_or_cgi: urllib.request.Request
:returns: Handle to HTTP response as returned by ``urllib.request.urlopen``. Will be wrapped in
an ``io.TextIOWrapper`` if its content type is plain text.
:rtype: http.client.HTTPResponse or io.TextIOWrapper
:raises urllib.error.URLError: Errors raised by ``urlopen`` past the maximum number of retries. | def _open(request):
"""Make an HTTP request to Entrez, handling errors and enforcing rate limiting (PRIVATE).
Does some simple error checking and will try again after certain types of errors, up to
``max_retries`` times. This function also enforces the "up to three queries per second
rule" to avoid abusing the NCBI servers (this limit is increased to 10 if using an API key).
:param req_or_cgi: A Request object returned by ``_build_request``.
:type req_or_cgi: urllib.request.Request
:returns: Handle to HTTP response as returned by ``urllib.request.urlopen``. Will be wrapped in
an ``io.TextIOWrapper`` if its content type is plain text.
:rtype: http.client.HTTPResponse or io.TextIOWrapper
:raises urllib.error.URLError: Errors raised by ``urlopen`` past the maximum number of retries.
"""
# NCBI requirement: At most three queries per second if no API key is provided.
# Equivalently, at least a third of second between queries
# Using just 0.333333334 seconds sometimes hit the NCBI rate limit,
# the slightly longer pause of 0.37 seconds has been more reliable.
delay = 0.1 if _has_api_key(request) else 0.37
current = time.time()
wait = _open.previous + delay - current
if wait > 0:
time.sleep(wait)
_open.previous = current + wait
else:
_open.previous = current
for i in range(max_tries):
try:
handle = urlopen(request)
except HTTPError as exception:
# Reraise if the final try fails
if i >= max_tries - 1:
raise
# Reraise if the exception is triggered by a HTTP 4XX error
# indicating some kind of bad request, UNLESS it's specifically a
# 429 "Too Many Requests" response. NCBI seems to sometimes
# erroneously return 429s even when their rate limit is
# honored (and indeed even with the rate-limit-related fudging
# higher up in this function in place), so the best we can do is
# treat them as a serverside error and try again after sleeping
# for a bit.
if exception.code // 100 == 4 and exception.code != 429:
raise
except URLError:
# Reraise if the final try fails
if i >= max_tries - 1:
raise
# Treat as a transient error and try again after a brief delay:
time.sleep(sleep_between_tries)
else:
break
subtype = handle.headers.get_content_subtype()
if subtype == "plain":
url = handle.url
handle = io.TextIOWrapper(handle, encoding="UTF-8")
handle.url = url
return handle |
Build a Request object for an E-utility.
:param str cgi: base URL for the CGI script to access.
:param params: Mapping containing options to pass to the CGI script. Keys must be strings.
:type params: dict or None
:param bool post: Whether to use the HTTP POST method rather than GET. By default (``post=None``),
POST is used if the URL encoded parameters would be over 1000 characters long, as is
suggested in the E-Utilities documentation.
:param bool ecitmatch: Don't URL-encode pipe ("|") characters, this is expected by the ecitmatch
tool.
:param bool join_ids: Passed to ``_construct_params``.
:returns: A request object ready to be passed to ``_open``.
:rtype: urllib.request.Request | def _build_request(cgi, params=None, post=None, ecitmatch=False, join_ids=True):
"""Build a Request object for an E-utility.
:param str cgi: base URL for the CGI script to access.
:param params: Mapping containing options to pass to the CGI script. Keys must be strings.
:type params: dict or None
:param bool post: Whether to use the HTTP POST method rather than GET. By default (``post=None``),
POST is used if the URL encoded parameters would be over 1000 characters long, as is
suggested in the E-Utilities documentation.
:param bool ecitmatch: Don't URL-encode pipe ("|") characters, this is expected by the ecitmatch
tool.
:param bool join_ids: Passed to ``_construct_params``.
:returns: A request object ready to be passed to ``_open``.
:rtype: urllib.request.Request
"""
params = _construct_params(params, join_ids=join_ids)
params_str = urlencode(params, doseq=True)
if ecitmatch:
params_str = params_str.replace("%7C", "|")
# By default, post is None. Set to a boolean to over-ride length choice:
if post is None and len(params_str) > 1000:
post = True
# NCBI prefers an HTTP POST instead of an HTTP GET if there are more than about 200 IDs
if post is None and "id" in params:
idcount = params["id"].count(",") + 1
if idcount >= 200:
post = True
if post:
return Request(cgi, data=params_str.encode("utf8"), method="POST")
else:
return Request(cgi + "?" + params_str, method="GET") |
Construct/format parameter dict for an Entrez request.
:param params: User-supplied parameters.
:type params: dict or None
:param bool join_ids: If True and the "id" key of ``params`` is a list
containing multiple UIDs, join them into a single comma-delimited string.
:returns: Parameters with defaults added and keys with None values removed.
:rtype: dict | def _construct_params(params, join_ids=True):
"""Construct/format parameter dict for an Entrez request.
:param params: User-supplied parameters.
:type params: dict or None
:param bool join_ids: If True and the "id" key of ``params`` is a list
containing multiple UIDs, join them into a single comma-delimited string.
:returns: Parameters with defaults added and keys with None values removed.
:rtype: dict
"""
if params is None:
params = {}
# Tell Entrez that we are using Biopython (or whatever the user has
# specified explicitly in the parameters or by changing the default)
params.setdefault("tool", tool)
# Tell Entrez who we are
params.setdefault("email", email)
params.setdefault("api_key", api_key)
# Remove None values from the parameters
for key, value in list(params.items()):
if value is None:
del params[key]
# Warn if email not set
if "email" not in params:
warnings.warn(
"""
Email address is not specified.
To make use of NCBI's E-utilities, NCBI requires you to specify your
email address with each request. As an example, if your email address
is [email protected], you can specify it as follows:
from Bio import Entrez
Entrez.email = '[email protected]'
In case of excessive usage of the E-utilities, NCBI will attempt to contact
a user at the email address provided before blocking access to the
E-utilities.""",
UserWarning,
)
# Format "id" parameter properly
if join_ids and "id" in params:
params["id"] = _format_ids(params["id"])
return params |
Convert one or more UIDs to a single comma-delimited string.
Input may be a single ID as an integer or string, an iterable of strings/ints,
or a string of IDs already separated by commas. | def _format_ids(ids):
"""Convert one or more UIDs to a single comma-delimited string.
Input may be a single ID as an integer or string, an iterable of strings/ints,
or a string of IDs already separated by commas.
"""
if isinstance(ids, int):
# Single integer, just convert to str
return str(ids)
if isinstance(ids, str):
# String which represents one or more IDs joined by commas
# Remove any whitespace around commas if they are present
return ",".join(id.strip() for id in ids.split(","))
# Not a string or integer, assume iterable
return ",".join(map(str, ids)) |
Check if a Request has the api_key parameter set, to set the rate limit.
Works with GET or POST requests. | def _has_api_key(request):
"""Check if a Request has the api_key parameter set, to set the rate limit.
Works with GET or POST requests.
"""
if request.method == "POST":
return b"api_key=" in request.data
return "api_key=" in request.full_url |
Parse cell line records.
This function is for parsing cell line files containing multiple
records.
Arguments:
- handle - handle to the file. | def parse(handle):
"""Parse cell line records.
This function is for parsing cell line files containing multiple
records.
Arguments:
- handle - handle to the file.
"""
while True:
record = __read(handle)
if not record:
break
yield record |
Read one cell line record.
This function is for parsing cell line files containing
exactly one record.
Arguments:
- handle - handle to the file. | def read(handle):
"""Read one cell line record.
This function is for parsing cell line files containing
exactly one record.
Arguments:
- handle - handle to the file.
"""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one cell line record found")
return record |
Parse ENZYME records.
This function is for parsing ENZYME files containing multiple
records.
Arguments:
- handle - handle to the file. | def parse(handle):
"""Parse ENZYME records.
This function is for parsing ENZYME files containing multiple
records.
Arguments:
- handle - handle to the file.
"""
while True:
record = __read(handle)
if not record:
break
yield record |
Read one ENZYME record.
This function is for parsing ENZYME files containing
exactly one record.
Arguments:
- handle - handle to the file. | def read(handle):
"""Read one ENZYME record.
This function is for parsing ENZYME files containing
exactly one record.
Arguments:
- handle - handle to the file.
"""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one ENZYME record found")
return record |
Read in a record from a file with exactly one Prodoc record. | def read(handle):
"""Read in a record from a file with exactly one Prodoc record."""
record = __read(handle)
# We should have reached the end of the record by now
line = handle.readline()
if line:
raise ValueError("More than one Prodoc record found")
return record |
Iterate over the records in a Prodoc file. | def parse(handle):
"""Iterate over the records in a Prodoc file."""
while True:
record = __read(handle)
if not record:
return
yield record |
Parse Prosite records.
This function is for parsing Prosite files containing multiple
records.
Arguments:
- handle - handle to the file. | def parse(handle):
"""Parse Prosite records.
This function is for parsing Prosite files containing multiple
records.
Arguments:
- handle - handle to the file.
"""
while True:
record = __read(handle)
if not record:
break
yield record |
Read one Prosite record.
This function is for parsing Prosite files containing
exactly one record.
Arguments:
- handle - handle to the file. | def read(handle):
"""Read one Prosite record.
This function is for parsing Prosite files containing
exactly one record.
Arguments:
- handle - handle to the file.
"""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one Prosite record found")
return record |
Execute a ScanProsite search.
Arguments:
- mirror: The ScanProsite mirror to be used
(default: https://prosite.expasy.org).
- seq: The query sequence, or UniProtKB (Swiss-Prot,
TrEMBL) accession
- output: Format of the search results
(default: xml)
Further search parameters can be passed as keywords; see the
documentation for programmatic access to ScanProsite at
https://prosite.expasy.org/scanprosite/scanprosite_doc.html
for a description of such parameters.
This function returns a handle to the search results returned by
ScanProsite. Search results in the XML format can be parsed into a
Python object, by using the Bio.ExPASy.ScanProsite.read function. | def scan(seq="", mirror="https://prosite.expasy.org", output="xml", **keywords):
"""Execute a ScanProsite search.
Arguments:
- mirror: The ScanProsite mirror to be used
(default: https://prosite.expasy.org).
- seq: The query sequence, or UniProtKB (Swiss-Prot,
TrEMBL) accession
- output: Format of the search results
(default: xml)
Further search parameters can be passed as keywords; see the
documentation for programmatic access to ScanProsite at
https://prosite.expasy.org/scanprosite/scanprosite_doc.html
for a description of such parameters.
This function returns a handle to the search results returned by
ScanProsite. Search results in the XML format can be parsed into a
Python object, by using the Bio.ExPASy.ScanProsite.read function.
"""
parameters = {"seq": seq, "output": output}
for key, value in keywords.items():
if value is not None:
parameters[key] = value
command = urlencode(parameters)
url = f"{mirror}/cgi-bin/prosite/scanprosite/PSScan.cgi?{command}"
handle = urlopen(url)
return handle |
Parse search results returned by ScanProsite into a Python object. | def read(handle):
"""Parse search results returned by ScanProsite into a Python object."""
content_handler = ContentHandler()
saxparser = Parser()
saxparser.setContentHandler(content_handler)
saxparser.parse(handle)
record = content_handler.record
return record |
Get a text handle to a PRODOC entry at ExPASy in HTML format.
>>> from Bio import ExPASy
>>> import os
>>> with ExPASy.get_prodoc_entry('PDOC00001') as in_handle:
... html = in_handle.read()
...
>>> with open("myprodocrecord.html", "w") as out_handle:
... length = out_handle.write(html)
...
>>> os.remove("myprodocrecord.html") # tidy up
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this text: 'There is currently no PROSITE entry for' | def get_prodoc_entry(
id, cgi="https://prosite.expasy.org/cgi-bin/prosite/get-prodoc-entry"
):
"""Get a text handle to a PRODOC entry at ExPASy in HTML format.
>>> from Bio import ExPASy
>>> import os
>>> with ExPASy.get_prodoc_entry('PDOC00001') as in_handle:
... html = in_handle.read()
...
>>> with open("myprodocrecord.html", "w") as out_handle:
... length = out_handle.write(html)
...
>>> os.remove("myprodocrecord.html") # tidy up
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this text: 'There is currently no PROSITE entry for'
"""
return _open(f"{cgi}?{id}") |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.