response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Get a text handle to a PROSITE entry at ExPASy in HTML format.
>>> from Bio import ExPASy
>>> import os
>>> with ExPASy.get_prosite_entry('PS00001') as in_handle:
... html = in_handle.read()
...
>>> with open("myprositerecord.html", "w") as out_handle:
... length = out_handle.write(html)
...
>>> os.remove("myprositerecord.html") # tidy up
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this text: 'There is currently no PROSITE entry for' | def get_prosite_entry(
id, cgi="https://prosite.expasy.org/cgi-bin/prosite/get-prosite-entry"
):
"""Get a text handle to a PROSITE entry at ExPASy in HTML format.
>>> from Bio import ExPASy
>>> import os
>>> with ExPASy.get_prosite_entry('PS00001') as in_handle:
... html = in_handle.read()
...
>>> with open("myprositerecord.html", "w") as out_handle:
... length = out_handle.write(html)
...
>>> os.remove("myprositerecord.html") # tidy up
For a non-existing key XXX, ExPASy returns an HTML-formatted page
containing this text: 'There is currently no PROSITE entry for'
"""
return _open(f"{cgi}?{id}") |
Get a text handle to a raw PROSITE or PRODOC record at ExPASy.
The cgi argument is deprecated due to changes in the ExPASy
website.
>>> from Bio import ExPASy
>>> from Bio.ExPASy import Prosite
>>> with ExPASy.get_prosite_raw('PS00001') as handle:
... record = Prosite.read(handle)
...
>>> print(record.accession)
PS00001
This function raises a ValueError if the identifier does not exist:
>>> handle = ExPASy.get_prosite_raw("DOES_NOT_EXIST")
Traceback (most recent call last):
...
ValueError: Failed to find entry 'DOES_NOT_EXIST' on ExPASy | def get_prosite_raw(id, cgi=None):
"""Get a text handle to a raw PROSITE or PRODOC record at ExPASy.
The cgi argument is deprecated due to changes in the ExPASy
website.
>>> from Bio import ExPASy
>>> from Bio.ExPASy import Prosite
>>> with ExPASy.get_prosite_raw('PS00001') as handle:
... record = Prosite.read(handle)
...
>>> print(record.accession)
PS00001
This function raises a ValueError if the identifier does not exist:
>>> handle = ExPASy.get_prosite_raw("DOES_NOT_EXIST")
Traceback (most recent call last):
...
ValueError: Failed to find entry 'DOES_NOT_EXIST' on ExPASy
"""
handle = _open(f"https://prosite.expasy.org/{id}.txt")
if handle.url == "https://www.expasy.org/":
raise ValueError(f"Failed to find entry '{id}' on ExPASy") from None
return handle |
Get a text handle to a raw SwissProt entry at ExPASy.
For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt
(as per the https://www.expasy.org/expasy_urls.html documentation).
>>> from Bio import ExPASy
>>> from Bio import SwissProt
>>> with ExPASy.get_sprot_raw("O23729") as handle:
... record = SwissProt.read(handle)
...
>>> print(record.entry_name)
CHS3_BROFI
This function raises a ValueError if the identifier does not exist:
>>> ExPASy.get_sprot_raw("DOES_NOT_EXIST")
Traceback (most recent call last):
...
ValueError: Failed to find SwissProt entry 'DOES_NOT_EXIST' | def get_sprot_raw(id):
"""Get a text handle to a raw SwissProt entry at ExPASy.
For an ID of XXX, fetches http://www.uniprot.org/uniprot/XXX.txt
(as per the https://www.expasy.org/expasy_urls.html documentation).
>>> from Bio import ExPASy
>>> from Bio import SwissProt
>>> with ExPASy.get_sprot_raw("O23729") as handle:
... record = SwissProt.read(handle)
...
>>> print(record.entry_name)
CHS3_BROFI
This function raises a ValueError if the identifier does not exist:
>>> ExPASy.get_sprot_raw("DOES_NOT_EXIST")
Traceback (most recent call last):
...
ValueError: Failed to find SwissProt entry 'DOES_NOT_EXIST'
"""
try:
handle = _open(f"http://www.uniprot.org/uniprot/{id}.txt")
except HTTPError as exception:
if exception.code in (400, 404):
raise ValueError(f"Failed to find SwissProt entry '{id}'") from None
else:
raise
return handle |
Open URL and convert to text assuming UTF-8 encoding (PRIVATE). | def _open(url):
"""Open URL and convert to text assuming UTF-8 encoding (PRIVATE)."""
handle = urlopen(url)
text_handle = io.TextIOWrapper(handle, encoding="UTF-8")
text_handle.url = handle.url
return text_handle |
Write a line of GenBank info that can wrap over multiple lines (PRIVATE).
This takes a line of information which can potentially wrap over
multiple lines, and breaks it up with carriage returns and
indentation so it fits properly into a GenBank record.
Arguments:
- information - The string holding the information we want
wrapped in GenBank method.
- indent - The indentation on the lines we are writing.
- wrap_space - Whether or not to wrap only on spaces in the
information.
- split_char - A specific character to split the lines on. By default
spaces are used. | def _wrapped_genbank(information, indent, wrap_space=1, split_char=" "):
"""Write a line of GenBank info that can wrap over multiple lines (PRIVATE).
This takes a line of information which can potentially wrap over
multiple lines, and breaks it up with carriage returns and
indentation so it fits properly into a GenBank record.
Arguments:
- information - The string holding the information we want
wrapped in GenBank method.
- indent - The indentation on the lines we are writing.
- wrap_space - Whether or not to wrap only on spaces in the
information.
- split_char - A specific character to split the lines on. By default
spaces are used.
"""
info_length = Record.GB_LINE_LENGTH - indent
if not information:
# GenBank files use "." for missing data
return ".\n"
if wrap_space:
info_parts = information.split(split_char)
else:
cur_pos = 0
info_parts = []
while cur_pos < len(information):
info_parts.append(information[cur_pos : cur_pos + info_length])
cur_pos += info_length
# first get the information string split up by line
output_parts = []
cur_part = ""
for info_part in info_parts:
if len(cur_part) + 1 + len(info_part) > info_length:
if cur_part:
if split_char != " ":
cur_part += split_char
output_parts.append(cur_part)
cur_part = info_part
else:
if cur_part == "":
cur_part = info_part
else:
cur_part += split_char + info_part
# add the last bit of information to the output
if cur_part:
output_parts.append(cur_part)
# now format the information string for return
output_info = output_parts[0] + "\n"
for output_part in output_parts[1:]:
output_info += " " * indent + output_part + "\n"
return output_info |
Write out information with the specified indent (PRIVATE).
Unlike _wrapped_genbank, this function makes no attempt to wrap
lines -- it assumes that the information already has newlines in the
appropriate places, and will add the specified indent to the start of
each line. | def _indent_genbank(information, indent):
"""Write out information with the specified indent (PRIVATE).
Unlike _wrapped_genbank, this function makes no attempt to wrap
lines -- it assumes that the information already has newlines in the
appropriate places, and will add the specified indent to the start of
each line.
"""
# split the info into lines based on line breaks
info_parts = information.split("\n")
# the first line will have no indent
output_info = info_parts[0] + "\n"
for info_part in info_parts[1:]:
output_info += " " * indent + info_part + "\n"
return output_info |
Iterate over GenBank formatted entries as Record objects.
>>> from Bio import GenBank
>>> with open("GenBank/NC_000932.gb") as handle:
... for record in GenBank.parse(handle):
... print(record.accession)
['NC_000932']
To get SeqRecord objects use Bio.SeqIO.parse(..., format="gb")
instead. | def parse(handle):
"""Iterate over GenBank formatted entries as Record objects.
>>> from Bio import GenBank
>>> with open("GenBank/NC_000932.gb") as handle:
... for record in GenBank.parse(handle):
... print(record.accession)
['NC_000932']
To get SeqRecord objects use Bio.SeqIO.parse(..., format="gb")
instead.
"""
return iter(Iterator(handle, RecordParser())) |
Read a handle containing a single GenBank entry as a Record object.
>>> from Bio import GenBank
>>> with open("GenBank/NC_000932.gb") as handle:
... record = GenBank.read(handle)
... print(record.accession)
['NC_000932']
To get a SeqRecord object use Bio.SeqIO.read(..., format="gb")
instead. | def read(handle):
"""Read a handle containing a single GenBank entry as a Record object.
>>> from Bio import GenBank
>>> with open("GenBank/NC_000932.gb") as handle:
... record = GenBank.read(handle)
... print(record.accession)
['NC_000932']
To get a SeqRecord object use Bio.SeqIO.read(..., format="gb")
instead.
"""
iterator = parse(handle)
try:
record = next(iterator)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(iterator)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
return record |
Format text in blocks of 80 chars with an additional optional prefix. | def out_block(text, prefix=""):
"""Format text in blocks of 80 chars with an additional optional prefix."""
output = ""
for j in range(0, len(text), 80):
output += f"{prefix}{text[j : j + 80]}\n"
output += "\n"
return output |
Read Gene Expression Omnibus records from file handle.
Returns a generator object which yields Bio.Geo.Record() objects. | def parse(handle):
"""Read Gene Expression Omnibus records from file handle.
Returns a generator object which yields Bio.Geo.Record() objects.
"""
record = None
for line in handle:
line = line.strip("\n").strip("\r")
if not line:
continue # Ignore empty lines
c = line[0]
if c == "^":
if record:
yield record
record = Record.Record()
record.entity_type, record.entity_id = _read_key_value(line)
elif c == "!":
if line in (
"!Sample_table_begin",
"!Sample_table_end",
"!Platform_table_begin",
"!Platform_table_end",
):
continue
key, value = _read_key_value(line)
if key in record.entity_attributes:
if isinstance(record.entity_attributes[key], list):
record.entity_attributes[key].append(value)
else:
existing = record.entity_attributes[key]
record.entity_attributes[key] = [existing, value]
else:
record.entity_attributes[key] = value
elif c == "#":
key, value = _read_key_value(line)
assert key not in record.col_defs
record.col_defs[key] = value
else:
row = line.split("\t")
record.table_rows.append(row)
yield record |
Try to layout label coordinates or other floats (PRIVATE).
Originally written for the y-axis vertical positioning of labels on a
chromosome diagram (where the minimum gap between y-axis coordinates is
the label height), it could also potentially be used for x-axis placement,
or indeed radial placement for circular chromosomes within GenomeDiagram.
In essence this is an optimisation problem, balancing the desire to have
each label as close as possible to its data point, but also to spread out
the labels to avoid overlaps. This could be described with a cost function
(modelling the label distance from the desired placement, and the inter-
label separations as springs) and solved as a multi-variable minimization
problem - perhaps with NumPy or SciPy.
For now however, the implementation is a somewhat crude ad hoc algorithm.
NOTE - This expects the input data to have been sorted! | def _spring_layout(desired, minimum, maximum, gap=0):
"""Try to layout label coordinates or other floats (PRIVATE).
Originally written for the y-axis vertical positioning of labels on a
chromosome diagram (where the minimum gap between y-axis coordinates is
the label height), it could also potentially be used for x-axis placement,
or indeed radial placement for circular chromosomes within GenomeDiagram.
In essence this is an optimisation problem, balancing the desire to have
each label as close as possible to its data point, but also to spread out
the labels to avoid overlaps. This could be described with a cost function
(modelling the label distance from the desired placement, and the inter-
label separations as springs) and solved as a multi-variable minimization
problem - perhaps with NumPy or SciPy.
For now however, the implementation is a somewhat crude ad hoc algorithm.
NOTE - This expects the input data to have been sorted!
"""
count = len(desired)
if count <= 1:
return desired # Easy!
if minimum >= maximum:
raise ValueError(f"Bad min/max {minimum:f} and {maximum:f}")
if min(desired) < minimum or max(desired) > maximum:
raise ValueError(
"Data %f to %f out of bounds (%f to %f)"
% (min(desired), max(desired), minimum, maximum)
)
equal_step = (maximum - minimum) / (count - 1)
if equal_step < gap:
import warnings
from Bio import BiopythonWarning
warnings.warn("Too many labels to avoid overlap", BiopythonWarning)
# Crudest solution
return [minimum + i * equal_step for i in range(count)]
good = True
if gap:
prev = desired[0]
for next in desired[1:]:
if prev - next < gap:
good = False
break
if good:
return desired
span = maximum - minimum
for split in [0.5 * span, span / 3.0, 2 * span / 3.0, 0.25 * span, 0.75 * span]:
midpoint = minimum + split
low = [x for x in desired if x <= midpoint - 0.5 * gap]
high = [x for x in desired if x > midpoint + 0.5 * gap]
if len(low) + len(high) < count:
# Bad split point, points right on boundary
continue
elif not low and len(high) * gap <= (span - split) + 0.5 * gap:
# Give a little of the unused low space to the high points
return _spring_layout(high, midpoint + 0.5 * gap, maximum, gap)
elif not high and len(low) * gap <= split + 0.5 * gap:
# Give a little of the unused highspace to the low points
return _spring_layout(low, minimum, midpoint - 0.5 * gap, gap)
elif (
len(low) * gap <= split - 0.5 * gap
and len(high) * gap <= (span - split) - 0.5 * gap
):
return _spring_layout(
low, minimum, midpoint - 0.5 * gap, gap
) + _spring_layout(high, midpoint + 0.5 * gap, maximum, gap)
# This can be count-productive now we can split out into the telomere or
# spacer-segment's vertical space...
# Try not to spread out as far as the min/max unless needed
low = min(desired)
high = max(desired)
if (high - low) / (count - 1) >= gap:
# Good, we don't need the full range, and can position the
# min and max exactly as well :)
equal_step = (high - low) / (count - 1)
return [low + i * equal_step for i in range(count)]
low = 0.5 * (minimum + min(desired))
high = 0.5 * (max(desired) + maximum)
if (high - low) / (count - 1) >= gap:
# Good, we don't need the full range
equal_step = (high - low) / (count - 1)
return [low + i * equal_step for i in range(count)]
# Crudest solution
return [minimum + i * equal_step for i in range(count)] |
Return k colours selected by the ColorSpiral object, as a generator.
Arguments:
- k - the number of colours to return
- kwargs - pass-through arguments to the ColorSpiral object | def get_colors(k, **kwargs):
"""Return k colours selected by the ColorSpiral object, as a generator.
Arguments:
- k - the number of colours to return
- kwargs - pass-through arguments to the ColorSpiral object
"""
cs = ColorSpiral(**kwargs)
return cs.get_colors(k) |
Return a dictionary of colours using the provided values as keys.
Returns a dictionary, keyed by the members of iterable l, with a
colour assigned to each member.
Arguments:
- l - an iterable representing classes to be coloured
- kwargs - pass-through arguments to the ColorSpiral object | def get_color_dict(l, **kwargs): # noqa: E741
"""Return a dictionary of colours using the provided values as keys.
Returns a dictionary, keyed by the members of iterable l, with a
colour assigned to each member.
Arguments:
- l - an iterable representing classes to be coloured
- kwargs - pass-through arguments to the ColorSpiral object
"""
cs = ColorSpiral(**kwargs)
colors = cs.get_colors(len(l))
dict = {}
for item in l:
dict[item] = next(colors)
return dict |
Return darkened color as a ReportLab RGB color.
Take a passed color and returns a Reportlab color that is darker by the
factor indicated in the parameter. | def darken(color, factor=0.7):
"""Return darkened color as a ReportLab RGB color.
Take a passed color and returns a Reportlab color that is darker by the
factor indicated in the parameter.
"""
newcol = color_to_reportlab(color)
for a in ["red", "green", "blue"]:
setattr(newcol, a, factor * getattr(newcol, a))
return newcol |
Return the passed color in Reportlab Color format.
We allow colors to be specified as hex values, tuples, or Reportlab Color
objects, and with or without an alpha channel. This function acts as a
Rosetta stone for conversion of those formats to a Reportlab Color
object, with alpha value.
Any other color specification is returned directly | def color_to_reportlab(color):
"""Return the passed color in Reportlab Color format.
We allow colors to be specified as hex values, tuples, or Reportlab Color
objects, and with or without an alpha channel. This function acts as a
Rosetta stone for conversion of those formats to a Reportlab Color
object, with alpha value.
Any other color specification is returned directly
"""
# Reportlab Color objects are in the format we want already
if isinstance(color, colors.Color):
return color
elif isinstance(color, str): # String implies hex color
if color.startswith("0x"): # Standardise to octothorpe
color.replace("0x", "#")
if len(color) == 7:
return colors.HexColor(color)
else:
try:
return colors.HexColor(color, hasAlpha=True)
except TypeError: # Catch pre-2.7 Reportlab
raise RuntimeError(
"Your reportlab seems to be too old, try 2.7 onwards"
) from None
elif isinstance(color, tuple): # Tuple implies RGB(alpha) tuple
return colors.Color(*color)
return color |
Return filename of temporary file containing downloaded image.
Create a new temporary file to hold the image file at the passed URL
and return the filename. | def get_temp_imagefilename(url):
"""Return filename of temporary file containing downloaded image.
Create a new temporary file to hold the image file at the passed URL
and return the filename.
"""
img = urlopen(url).read()
im = Image.open(BytesIO(img))
# im.transpose(Image.FLIP_TOP_BOTTOM)
f = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
fname = f.name
f.close()
im.save(fname, "PNG")
return fname |
Standardize output to files (PRIVATE).
Writes the provided drawing out to a file in a prescribed format.
- drawing - suitable ReportLab drawing object.
- output_file - a handle to write to, or a filename to write to.
- format - String indicating output format, one of PS, PDF, SVG,
or provided the ReportLab renderPM module is installed,
one of the bitmap formats JPG, BMP, GIF, PNG, TIFF or TIFF.
The format can be given in any case.
- dpi - Resolution (dots per inch) for bitmap formats.
No return value. | def _write(drawing, output_file, format, dpi=72):
"""Standardize output to files (PRIVATE).
Writes the provided drawing out to a file in a prescribed format.
- drawing - suitable ReportLab drawing object.
- output_file - a handle to write to, or a filename to write to.
- format - String indicating output format, one of PS, PDF, SVG,
or provided the ReportLab renderPM module is installed,
one of the bitmap formats JPG, BMP, GIF, PNG, TIFF or TIFF.
The format can be given in any case.
- dpi - Resolution (dots per inch) for bitmap formats.
No return value.
"""
from reportlab.graphics import renderPS, renderPDF, renderSVG
try:
from reportlab.graphics import renderPM
except ImportError:
# This is an optional part of ReportLab, so may not be installed.
# We'll raise a missing dependency error if rendering to a
# bitmap format is attempted.
renderPM = None
formatdict = {
"PS": renderPS,
"EPS": renderPS,
# not sure which you actually get, PS or EPS, but
# GenomeDiagram used PS while other modules used EPS.
"PDF": renderPDF,
"SVG": renderSVG,
"JPG": renderPM,
"BMP": renderPM,
"GIF": renderPM,
"PNG": renderPM,
"TIFF": renderPM,
"TIF": renderPM,
}
try:
# If output is not a string, then .upper() will trigger
# an attribute error...
drawmethod = formatdict[format.upper()] # select drawing method
except (KeyError, AttributeError):
raise ValueError(
f"Output format should be one of {', '.join(formatdict)}"
) from None
if drawmethod is None:
# i.e. We wanted renderPM but it isn't installed
# See the import at the top of the function.
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError("Please install ReportLab's renderPM module")
if drawmethod == renderPM:
# This has a different API to the other render objects
return drawmethod.drawToFile(drawing, output_file, format, dpi=dpi)
else:
return drawmethod.drawToFile(drawing, output_file) |
Convert size string into a Reportlab pagesize.
Arguments:
- size - A string representing a standard page size, eg 'A4' or 'LETTER' | def page_sizes(size):
"""Convert size string into a Reportlab pagesize.
Arguments:
- size - A string representing a standard page size, eg 'A4' or 'LETTER'
"""
sizes = { # ReportLab pagesizes, keyed by ISO string
"A0": pagesizes.A0,
"A1": pagesizes.A1,
"A2": pagesizes.A2,
"A3": pagesizes.A3,
"A4": pagesizes.A4,
"A5": pagesizes.A5,
"A6": pagesizes.A6,
"B0": pagesizes.B0,
"B1": pagesizes.B1,
"B2": pagesizes.B2,
"B3": pagesizes.B3,
"B4": pagesizes.B4,
"B5": pagesizes.B5,
"B6": pagesizes.B6,
"ELEVENSEVENTEEN": pagesizes.ELEVENSEVENTEEN,
"LEGAL": pagesizes.LEGAL,
"LETTER": pagesizes.LETTER,
}
try:
return sizes[size]
except KeyError:
raise ValueError(f"{size} not in list of page sizes") from None |
Deal with border and fill colors (PRIVATE). | def _stroke_and_fill_colors(color, border):
"""Deal with border and fill colors (PRIVATE)."""
if not isinstance(color, colors.Color):
raise ValueError(f"Invalid color {color!r}")
if color == colors.white and border is None:
# Force black border on white boxes with undefined border
strokecolor = colors.black
elif border is None:
strokecolor = color # use fill color
elif border:
if not isinstance(border, colors.Color):
raise ValueError(f"Invalid border color {border!r}")
strokecolor = border
else:
# e.g. False
strokecolor = None
return strokecolor, color |
Draw a box.
Arguments:
- point1, point2 - coordinates for opposite corners of the box
(x,y tuples)
- color /colour - The color for the box (colour takes priority
over color)
- border - Border color for the box
Returns a closed path object, beginning at (x1,y1) going round
the four points in order, and filling with the passed color. | def draw_box(
point1, point2, color=colors.lightgreen, border=None, colour=None, **kwargs
):
"""Draw a box.
Arguments:
- point1, point2 - coordinates for opposite corners of the box
(x,y tuples)
- color /colour - The color for the box (colour takes priority
over color)
- border - Border color for the box
Returns a closed path object, beginning at (x1,y1) going round
the four points in order, and filling with the passed color.
"""
x1, y1 = point1
x2, y2 = point2
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
del colour
strokecolor, color = _stroke_and_fill_colors(color, border)
x1, y1, x2, y2 = min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)
return Polygon(
[x1, y1, x2, y1, x2, y2, x1, y2],
strokeColor=strokecolor,
fillColor=color,
strokewidth=0,
**kwargs,
) |
Draw a box with the corners cut off. | def draw_cut_corner_box(
point1, point2, corner=0.5, color=colors.lightgreen, border=None, **kwargs
):
"""Draw a box with the corners cut off."""
x1, y1 = point1
x2, y2 = point2
if not corner:
return draw_box(point1, point2, color, border)
elif corner < 0:
raise ValueError("Arrow head length ratio should be positive")
strokecolor, color = _stroke_and_fill_colors(color, border)
boxheight = y2 - y1
boxwidth = x2 - x1
x_corner = min(boxheight * 0.5 * corner, boxwidth * 0.5)
y_corner = min(boxheight * 0.5 * corner, boxheight * 0.5)
points = [
x1,
y1 + y_corner,
x1,
y2 - y_corner,
x1 + x_corner,
y2,
x2 - x_corner,
y2,
x2,
y2 - y_corner,
x2,
y1 + y_corner,
x2 - x_corner,
y1,
x1 + x_corner,
y1,
]
return Polygon(
deduplicate(points),
strokeColor=strokecolor,
strokeWidth=1,
strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs,
) |
Draw polygon.
Arguments:
- list_of_point - list of (x,y) tuples for the corner coordinates
- color / colour - The color for the box
Returns a closed path object, beginning at (x1,y1) going round
the four points in order, and filling with the passed colour. | def draw_polygon(
list_of_points, color=colors.lightgreen, border=None, colour=None, **kwargs
):
"""Draw polygon.
Arguments:
- list_of_point - list of (x,y) tuples for the corner coordinates
- color / colour - The color for the box
Returns a closed path object, beginning at (x1,y1) going round
the four points in order, and filling with the passed colour.
"""
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
del colour
strokecolor, color = _stroke_and_fill_colors(color, border)
xy_list = []
for x, y in list_of_points:
xy_list.append(x)
xy_list.append(y)
return Polygon(
deduplicate(xy_list),
strokeColor=strokecolor,
fillColor=color,
strokewidth=0,
**kwargs,
) |
Draw an arrow.
Returns a closed path object representing an arrow enclosed by the
box with corners at {point1=(x1,y1), point2=(x2,y2)}, a shaft height
given by shaft_height_ratio (relative to box height), a head length
given by head_length_ratio (also relative to box height), and
an orientation that may be 'left' or 'right'. | def draw_arrow(
point1,
point2,
color=colors.lightgreen,
border=None,
shaft_height_ratio=0.4,
head_length_ratio=0.5,
orientation="right",
colour=None,
**kwargs,
):
"""Draw an arrow.
Returns a closed path object representing an arrow enclosed by the
box with corners at {point1=(x1,y1), point2=(x2,y2)}, a shaft height
given by shaft_height_ratio (relative to box height), a head length
given by head_length_ratio (also relative to box height), and
an orientation that may be 'left' or 'right'.
"""
x1, y1 = point1
x2, y2 = point2
if shaft_height_ratio < 0 or 1 < shaft_height_ratio:
raise ValueError("Arrow shaft height ratio should be in range 0 to 1")
if head_length_ratio < 0:
raise ValueError("Arrow head length ratio should be positive")
# Let the UK spelling (colour) override the USA spelling (color)
if colour is not None:
color = colour
del colour
strokecolor, color = _stroke_and_fill_colors(color, border)
# Depending on the orientation, we define the bottom left (x1, y1) and
# top right (x2, y2) coordinates differently, but still draw the box
# using the same relative coordinates:
xmin, ymin = min(x1, x2), min(y1, y2)
xmax, ymax = max(x1, x2), max(y1, y2)
if orientation == "right":
x1, x2, y1, y2 = xmin, xmax, ymin, ymax
elif orientation == "left":
x1, x2, y1, y2 = xmax, xmin, ymin, ymax
else:
raise ValueError(
f"Invalid orientation {orientation!r}, should be 'left' or 'right'"
)
# We define boxheight and boxwidth accordingly, and calculate the shaft
# height from these. We also ensure that the maximum head length is
# the width of the box enclosure
boxheight = y2 - y1
boxwidth = x2 - x1
shaftheight = boxheight * shaft_height_ratio
headlength = min(abs(boxheight) * head_length_ratio, abs(boxwidth))
if boxwidth < 0:
headlength *= -1 # reverse it
shafttop = 0.5 * (boxheight + shaftheight)
shaftbase = boxheight - shafttop
headbase = boxwidth - headlength
midheight = 0.5 * boxheight
points = [
x1,
y1 + shafttop,
x1 + headbase,
y1 + shafttop,
x1 + headbase,
y2,
x2,
y1 + midheight,
x1 + headbase,
y1,
x1 + headbase,
y1 + shaftbase,
x1,
y1 + shaftbase,
]
return Polygon(
deduplicate(points),
strokeColor=strokecolor,
# strokeWidth=max(1, int(boxheight/40.)),
strokeWidth=1,
# default is mitre/miter which can stick out too much:
strokeLineJoin=1, # 1=round
fillColor=color,
**kwargs,
) |
Remove adjacent duplicate points.
This is important for use with the Polygon class since reportlab has a
bug with duplicate points.
Arguments:
- points - list of points [x1, y1, x2, y2,...]
Returns a list in the same format with consecutive duplicates removed | def deduplicate(points):
"""Remove adjacent duplicate points.
This is important for use with the Polygon class since reportlab has a
bug with duplicate points.
Arguments:
- points - list of points [x1, y1, x2, y2,...]
Returns a list in the same format with consecutive duplicates removed
"""
assert len(points) % 2 == 0
if len(points) < 2:
return points
newpoints = points[0:2]
for x, y in zip(islice(points, 2, None, 2), islice(points, 3, None, 2)):
if x != newpoints[-2] or y != newpoints[-1]:
newpoints.append(x)
newpoints.append(y)
return newpoints |
Convert angle to a reportlab ready tuple.
Arguments:
- theta - Angle in degrees, counter clockwise from horizontal
Returns a representation of the passed angle in a format suitable
for ReportLab rotations (i.e. cos(theta), sin(theta), -sin(theta),
cos(theta) tuple) | def angle2trig(theta):
"""Convert angle to a reportlab ready tuple.
Arguments:
- theta - Angle in degrees, counter clockwise from horizontal
Returns a representation of the passed angle in a format suitable
for ReportLab rotations (i.e. cos(theta), sin(theta), -sin(theta),
cos(theta) tuple)
"""
c = cos(theta * pi / 180)
s = sin(theta * pi / 180)
return (c, s, -s, c) |
Generate intermediate points describing provided graph data..
Returns a list of (start, end, value) tuples describing the passed
graph data as 'bins' between position midpoints. | def intermediate_points(start, end, graph_data):
"""Generate intermediate points describing provided graph data..
Returns a list of (start, end, value) tuples describing the passed
graph data as 'bins' between position midpoints.
"""
newdata = [] # data in form (X0, X1, val)
# add first block
newdata.append(
(
start,
graph_data[0][0] + (graph_data[1][0] - graph_data[0][0]) / 2.0,
graph_data[0][1],
)
)
# add middle set
for index in range(1, len(graph_data) - 1):
lastxval, lastyval = graph_data[index - 1]
xval, yval = graph_data[index]
nextxval, nextyval = graph_data[index + 1]
newdata.append(
(lastxval + (xval - lastxval) / 2.0, xval + (nextxval - xval) / 2.0, yval)
)
# add last block
newdata.append((xval + (nextxval - xval) / 2.0, end, graph_data[-1][1]))
return newdata |
Return the first non-null argument (PRIVATE). | def _first_defined(*args):
"""Return the first non-null argument (PRIVATE)."""
for arg in args:
if arg is not None:
return arg
return None |
Return an array of n random numbers summing to 1.0 (PRIVATE). | def _gen_random_array(n):
"""Return an array of n random numbers summing to 1.0 (PRIVATE)."""
randArray = [random.random() for _ in range(n)]
total = sum(randArray)
return [x / total for x in randArray] |
Calculate which symbols can be emitted in each state (PRIVATE). | def _calculate_emissions(emission_probs):
"""Calculate which symbols can be emitted in each state (PRIVATE)."""
# loop over all of the state-symbol duples, mapping states to
# lists of emitted symbols
emissions = defaultdict(list)
for state, symbol in emission_probs:
emissions[state].append(symbol)
return emissions |
Calculate which 'from transitions' are allowed for each state (PRIVATE).
This looks through all of the trans_probs, and uses this dictionary
to determine allowed transitions. It converts this information into
a dictionary, whose keys are source states and whose values are
lists of destination states reachable from the source state via a
transition. | def _calculate_from_transitions(trans_probs):
"""Calculate which 'from transitions' are allowed for each state (PRIVATE).
This looks through all of the trans_probs, and uses this dictionary
to determine allowed transitions. It converts this information into
a dictionary, whose keys are source states and whose values are
lists of destination states reachable from the source state via a
transition.
"""
transitions = defaultdict(list)
for from_state, to_state in trans_probs:
transitions[from_state].append(to_state)
return transitions |
Calculate which 'to transitions' are allowed for each state (PRIVATE).
This looks through all of the trans_probs, and uses this dictionary
to determine allowed transitions. It converts this information into
a dictionary, whose keys are destination states and whose values are
lists of source states from which the destination is reachable via a
transition. | def _calculate_to_transitions(trans_probs):
"""Calculate which 'to transitions' are allowed for each state (PRIVATE).
This looks through all of the trans_probs, and uses this dictionary
to determine allowed transitions. It converts this information into
a dictionary, whose keys are destination states and whose values are
lists of source states from which the destination is reachable via a
transition.
"""
transitions = defaultdict(list)
for from_state, to_state in trans_probs:
transitions[to_state].append(from_state)
return transitions |
Print out a state sequence prediction in a nice manner.
Arguments:
- emissions -- The sequence of emissions of the sequence you are
dealing with.
- real_state -- The actual state path that generated the emissions.
- predicted_state -- A state path predicted by some kind of HMM model. | def pretty_print_prediction(
emissions,
real_state,
predicted_state,
emission_title="Emissions",
real_title="Real State",
predicted_title="Predicted State",
line_width=75,
):
"""Print out a state sequence prediction in a nice manner.
Arguments:
- emissions -- The sequence of emissions of the sequence you are
dealing with.
- real_state -- The actual state path that generated the emissions.
- predicted_state -- A state path predicted by some kind of HMM model.
"""
# calculate the length of the titles and sequences
title_length = max(len(emission_title), len(real_title), len(predicted_title)) + 1
seq_length = line_width - title_length
# set up the titles so they'll print right
emission_title = emission_title.ljust(title_length)
real_title = real_title.ljust(title_length)
predicted_title = predicted_title.ljust(title_length)
cur_position = 0
# while we still have more than seq_length characters to print
while True:
if (cur_position + seq_length) < len(emissions):
extension = seq_length
else:
extension = len(emissions) - cur_position
print(f"{emission_title}{emissions[cur_position:cur_position + seq_length]}")
print(f"{real_title}{real_state[cur_position : cur_position + seq_length]}")
print(
"%s%s\n"
% (
predicted_title,
predicted_state[cur_position : cur_position + seq_length],
)
)
if len(emissions) < (cur_position + seq_length):
break
cur_position += seq_length |
KEGG info - Displays the current statistics of a given database.
db - database or organism (string)
The argument db can be a KEGG database name (e.g. 'pathway' or its
official abbreviation, 'path'), or a KEGG organism code or T number
(e.g. 'hsa' or 'T01001' for human).
A valid list of organism codes and their T numbers can be obtained
via kegg_info('organism') or https://rest.kegg.jp/list/organism | def kegg_info(database):
"""KEGG info - Displays the current statistics of a given database.
db - database or organism (string)
The argument db can be a KEGG database name (e.g. 'pathway' or its
official abbreviation, 'path'), or a KEGG organism code or T number
(e.g. 'hsa' or 'T01001' for human).
A valid list of organism codes and their T numbers can be obtained
via kegg_info('organism') or https://rest.kegg.jp/list/organism
"""
# TODO - return a string (rather than the handle?)
# TODO - cache and validate the organism code / T numbers?
# TODO - can we parse the somewhat formatted output?
#
# https://rest.kegg.jp/info/<database>
#
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome |<org> | compound | glycan | reaction |
# rpair | rclass | enzyme | genomes | genes | ligand | kegg
# <org> = KEGG organism code or T number
return _q("info", database) |
KEGG list - Entry list for database, or specified database entries.
db - database or organism (string)
org - optional organism (string), see below.
For the pathway and module databases the optional organism can be
used to restrict the results. | def kegg_list(database, org=None):
"""KEGG list - Entry list for database, or specified database entries.
db - database or organism (string)
org - optional organism (string), see below.
For the pathway and module databases the optional organism can be
used to restrict the results.
"""
# TODO - split into two functions (dbentries seems separate)?
#
# https://rest.kegg.jp/list/<database>/<org>
#
# <database> = pathway | module
# <org> = KEGG organism code
if database in ("pathway", "module") and org:
resp = _q("list", database, org)
elif isinstance(database, str) and database and org:
raise ValueError("Invalid database arg for kegg list request.")
# https://rest.kegg.jp/list/<database>
#
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome | <org> | compound | glycan | reaction |
# rpair | rclass | enzyme | organism
# <org> = KEGG organism code or T number
#
#
# https://rest.kegg.jp/list/<dbentries>
#
# <dbentries> = KEGG database entries involving the following <database>
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome | <org> | compound | glycan | reaction |
# rpair | rclass | enzyme
# <org> = KEGG organism code or T number
else:
if isinstance(database, list):
if len(database) > 100:
raise ValueError(
"Maximum number of databases is 100 for kegg list query"
)
database = ("+").join(database)
resp = _q("list", database)
return resp |
KEGG find - Data search.
Finds entries with matching query keywords or other query data in
a given database.
db - database or organism (string)
query - search terms (string)
option - search option (string), see below.
For the compound and drug database, set option to the string 'formula',
'exact_mass' or 'mol_weight' to search on that field only. The
chemical formula search is a partial match irrespective of the order
of atoms given. The exact mass (or molecular weight) is checked by
rounding off to the same decimal place as the query data. A range of
values may also be specified with the minus(-) sign. | def kegg_find(database, query, option=None):
"""KEGG find - Data search.
Finds entries with matching query keywords or other query data in
a given database.
db - database or organism (string)
query - search terms (string)
option - search option (string), see below.
For the compound and drug database, set option to the string 'formula',
'exact_mass' or 'mol_weight' to search on that field only. The
chemical formula search is a partial match irrespective of the order
of atoms given. The exact mass (or molecular weight) is checked by
rounding off to the same decimal place as the query data. A range of
values may also be specified with the minus(-) sign.
"""
# TODO - return list of tuples?
#
# https://rest.kegg.jp/find/<database>/<query>/<option>
#
# <database> = compound | drug
# <option> = formula | exact_mass | mol_weight
if database in ["compound", "drug"] and option in [
"formula",
"exact_mass",
"mol_weight",
]:
resp = _q("find", database, query, option)
elif option:
raise ValueError("Invalid option arg for kegg find request.")
# https://rest.kegg.jp/find/<database>/<query>
#
# <database> = pathway | module | disease | drug | environ | ko |
# genome | <org> | compound | glycan | reaction | rpair |
# rclass | enzyme | genes | ligand
# <org> = KEGG organism code or T number
else:
if isinstance(query, list):
query = "+".join(query)
resp = _q("find", database, query)
return resp |
KEGG get - Data retrieval.
dbentries - Identifiers (single string, or list of strings), see below.
option - One of "aaseq", "ntseq", "mol", "kcf", "image", "kgml" (string)
The input is limited up to 10 entries.
The input is limited to one pathway entry with the image or kgml option.
The input is limited to one compound/glycan/drug entry with the image option.
Returns a handle. | def kegg_get(dbentries, option=None):
"""KEGG get - Data retrieval.
dbentries - Identifiers (single string, or list of strings), see below.
option - One of "aaseq", "ntseq", "mol", "kcf", "image", "kgml" (string)
The input is limited up to 10 entries.
The input is limited to one pathway entry with the image or kgml option.
The input is limited to one compound/glycan/drug entry with the image option.
Returns a handle.
"""
if isinstance(dbentries, list) and len(dbentries) <= 10:
dbentries = "+".join(dbentries)
elif isinstance(dbentries, list) and len(dbentries) > 10:
raise ValueError("Maximum number of dbentries is 10 for kegg get query")
# https://rest.kegg.jp/get/<dbentries>[/<option>]
#
# <dbentries> = KEGG database entries involving the following <database>
# <database> = pathway | brite | module | disease | drug | environ |
# ko | genome | <org> | compound | glycan | reaction |
# rpair | rclass | enzyme
# <org> = KEGG organism code or T number
#
# <option> = aaseq | ntseq | mol | kcf | image
if option in ["aaseq", "ntseq", "mol", "kcf", "image", "kgml", "json"]:
resp = _q("get", dbentries, option)
elif option:
raise ValueError("Invalid option arg for kegg get request.")
else:
resp = _q("get", dbentries)
return resp |
KEGG conv - convert KEGG identifiers to/from outside identifiers.
Arguments:
- target_db - Target database
- source_db_or_dbentries - source database or database entries
- option - Can be "turtle" or "n-triple" (string). | def kegg_conv(target_db, source_db, option=None):
"""KEGG conv - convert KEGG identifiers to/from outside identifiers.
Arguments:
- target_db - Target database
- source_db_or_dbentries - source database or database entries
- option - Can be "turtle" or "n-triple" (string).
"""
# https://rest.kegg.jp/conv/<target_db>/<source_db>[/<option>]
#
# (<target_db> <source_db>) = (<kegg_db> <outside_db>) |
# (<outside_db> <kegg_db>)
#
# For gene identifiers:
# <kegg_db> = <org>
# <org> = KEGG organism code or T number
# <outside_db> = ncbi-gi | ncbi-geneid | uniprot
#
# For chemical substance identifiers:
# <kegg_db> = drug | compound | glycan
# <outside_db> = pubchem | chebi
#
# <option> = turtle | n-triple
#
# https://rest.kegg.jp/conv/<target_db>/<dbentries>[/<option>]
#
# For gene identifiers:
# <dbentries> = database entries involving the following <database>
# <database> = <org> | ncbi-gi | ncbi-geneid | uniprot
# <org> = KEGG organism code or T number
#
# For chemical substance identifiers:
# <dbentries> = database entries involving the following <database>
# <database> = drug | compound | glycan | pubchem | chebi
#
# <option> = turtle | n-triple
if option and option not in ["turtle", "n-triple"]:
raise ValueError("Invalid option arg for kegg conv request.")
if isinstance(source_db, list):
source_db = "+".join(source_db)
if (
target_db in ["ncbi-gi", "ncbi-geneid", "uniprot"]
or source_db in ["ncbi-gi", "ncbi-geneid", "uniprot"]
or (
target_db in ["drug", "compound", "glycan"]
and source_db in ["pubchem", "glycan"]
)
or (
target_db in ["pubchem", "glycan"]
and source_db in ["drug", "compound", "glycan"]
)
):
if option:
resp = _q("conv", target_db, source_db, option)
else:
resp = _q("conv", target_db, source_db)
return resp
else:
raise ValueError("Bad argument target_db or source_db for kegg conv request.") |
KEGG link - find related entries by using database cross-references.
target_db - Target database
source_db_or_dbentries - source database
option - Can be "turtle" or "n-triple" (string). | def kegg_link(target_db, source_db, option=None):
"""KEGG link - find related entries by using database cross-references.
target_db - Target database
source_db_or_dbentries - source database
option - Can be "turtle" or "n-triple" (string).
"""
# https://rest.kegg.jp/link/<target_db>/<source_db>[/<option>]
#
# <target_db> = <database>
# <source_db> = <database>
#
# <database> = pathway | brite | module | ko | genome | <org> | compound |
# glycan | reaction | rpair | rclass | enzyme | disease |
# drug | dgroup | environ
#
# <option> = turtle | n-triple
# https://rest.kegg.jp/link/<target_db>/<dbentries>[/<option>]
#
# <dbentries> = KEGG database entries involving the following <database>
# <database> = pathway | brite | module | ko | genome | <org> | compound |
# glycan | reaction | rpair | rclass | enzyme | disease |
# drug | dgroup | environ | genes
#
# <option> = turtle | n-triple
if option and option not in ["turtle", "n-triple"]:
raise ValueError("Invalid option arg for kegg conv request.")
if isinstance(source_db, list):
source_db = "+".join(source_db)
if option:
resp = _q("link", target_db, source_db, option)
else:
resp = _q("link", target_db, source_db)
return resp |
Return default wrap rule for _wrap_kegg (PRIVATE).
A wrap rule is a list with the following elements:
[indent, connect, (splitstr, connect, splitafter, keep), ...] | def _default_wrap(indent):
"""Return default wrap rule for _wrap_kegg (PRIVATE).
A wrap rule is a list with the following elements:
[indent, connect, (splitstr, connect, splitafter, keep), ...]
"""
return [indent, "", (" ", "", 1, 0)] |
Return wrap rule for KEGG STRUCTURE (PRIVATE). | def _struct_wrap(indent):
"""Return wrap rule for KEGG STRUCTURE (PRIVATE)."""
return [indent, "", (" ", "", 1, 1)] |
Wrap the input line for KEGG output (PRIVATE).
Arguments:
- info - String holding the information we want wrapped
for KEGG output.
- max_width - Maximum width of a line.
- wrap_rule - A wrap rule (see above) for deciding how to split
strings that must be wrapped. | def _wrap_kegg(line, max_width=KEGG_DATA_LENGTH, wrap_rule=_default_wrap):
"""Wrap the input line for KEGG output (PRIVATE).
Arguments:
- info - String holding the information we want wrapped
for KEGG output.
- max_width - Maximum width of a line.
- wrap_rule - A wrap rule (see above) for deciding how to split
strings that must be wrapped.
"""
s = ""
wrapped_line = ""
indent = " " * wrap_rule[0]
connect = wrap_rule[1]
rules = wrap_rule[2:]
while True:
if len(line) <= max_width:
wrapped_line = wrapped_line + line
s = s + wrapped_line
break
else:
did_split = 0
for rule in rules:
to = max_width
if not rule[2]:
to = to + len(rule[0])
split_idx = line.rfind(rule[0], 0, to)
if split_idx > -1:
if rule[2] and rule[3]:
split_idx = split_idx + len(rule[0])
wrapped_line = wrapped_line + line[0:split_idx] + "\n"
if not rule[3]:
split_idx = split_idx + len(rule[0])
line = indent + rule[1] + line[split_idx:]
did_split = 1
break
if not did_split:
wrapped_line = wrapped_line + line[0:max_width] + "\n"
line = indent + connect + line[max_width:]
return s |
Write a indented KEGG record item (PRIVATE).
Arguments:
- item - The name of the item to be written.
- info - The (wrapped) information to write.
- indent - Width of item field. | def _write_kegg(item, info, indent=KEGG_ITEM_LENGTH):
"""Write a indented KEGG record item (PRIVATE).
Arguments:
- item - The name of the item to be written.
- info - The (wrapped) information to write.
- indent - Width of item field.
"""
s = ""
for line in info:
partial_lines = line.splitlines()
for partial in partial_lines:
s += item.ljust(indent) + partial + "\n"
if item: # ensure item is only written on first line
item = ""
return s |
Parse a KEGG Ligan/Compound file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/compound.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
C00023 Iron
C00017 Protein
C00099 beta-Alanine
C00294 Inosine
C00298 Trypsin
C00348 all-trans-Undecaprenyl phosphate
C00349 2-Methyl-3-oxopropanoate
C01386 NH2Mec | def parse(handle):
"""Parse a KEGG Ligan/Compound file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/compound.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
C00023 Iron
C00017 Protein
C00099 beta-Alanine
C00294 Inosine
C00298 Trypsin
C00348 all-trans-Undecaprenyl phosphate
C00349 2-Methyl-3-oxopropanoate
C01386 NH2Mec
"""
record = Record()
for line in handle:
if line[:3] == "///":
yield record
record = Record()
continue
if line[:12] != " ":
keyword = line[:12]
data = line[12:].strip()
if keyword == "ENTRY ":
words = data.split()
record.entry = words[0]
elif keyword == "NAME ":
data = data.strip(";")
record.name.append(data)
elif keyword == "ENZYME ":
while data:
column = data[:16]
data = data[16:]
enzyme = column.strip()
record.enzyme.append(enzyme)
elif keyword == "PATHWAY ":
map, name = data.split(" ")
pathway = ("PATH", map, name)
record.pathway.append(pathway)
elif keyword == "FORMULA ":
record.formula = data
elif keyword in ("MASS ", "EXACT_MASS "):
record.mass = data
elif keyword == "DBLINKS ":
if ":" in data:
key, values = data.split(":")
values = values.split()
row = (key, values)
record.dblinks.append(row)
else:
row = record.dblinks[-1]
key, values = row
values.extend(data.split())
row = key, values
record.dblinks[-1] = row |
Parse a KEGG Enzyme file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/enzyme.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
1.1.1.1 alcohol dehydrogenase
1.1.1.62 17beta-estradiol 17-dehydrogenase
1.1.1.68 Transferred to 1.5.1.20
1.6.5.3 NADH:ubiquinone reductase (H+-translocating)
1.14.13.28 3,9-dihydroxypterocarpan 6a-monooxygenase
2.4.1.68 glycoprotein 6-alpha-L-fucosyltransferase
3.1.1.6 acetylesterase
2.7.2.1 acetate kinase | def parse(handle):
"""Parse a KEGG Enzyme file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/enzyme.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
1.1.1.1 alcohol dehydrogenase
1.1.1.62 17beta-estradiol 17-dehydrogenase
1.1.1.68 Transferred to 1.5.1.20
1.6.5.3 NADH:ubiquinone reductase (H+-translocating)
1.14.13.28 3,9-dihydroxypterocarpan 6a-monooxygenase
2.4.1.68 glycoprotein 6-alpha-L-fucosyltransferase
3.1.1.6 acetylesterase
2.7.2.1 acetate kinase
"""
record = Record()
for line in handle:
if line[:3] == "///":
yield record
record = Record()
continue
if line[:12] != " ":
keyword = line[:12]
data = line[12:].strip()
if keyword == "ENTRY ":
words = data.split()
record.entry = words[1]
elif keyword == "CLASS ":
record.classname.append(data)
elif keyword == "COFACTOR ":
record.cofactor.append(data)
elif keyword == "COMMENT ":
record.comment.append(data)
elif keyword == "DBLINKS ":
if ":" in data:
key, values = data.split(":")
values = values.split()
row = (key, values)
record.dblinks.append(row)
else:
row = record.dblinks[-1]
key, values = row
values.extend(data.split())
row = key, values
record.dblinks[-1] = row
elif keyword == "DISEASE ":
if ":" in data:
database, data = data.split(":")
number, name = data.split(None, 1)
row = (database, number, name)
record.disease.append(row)
else:
row = record.disease[-1]
database, number, name = row
name = name + " " + data
row = database, number, name
record.disease[-1] = row
elif keyword == "EFFECTOR ":
record.effector.append(data.strip(";"))
elif keyword == "GENES ":
if data[3:5] == ": " or data[4:6] == ": ":
key, values = data.split(":", 1)
values = [value.split("(")[0] for value in values.split()]
row = (key, values)
record.genes.append(row)
else:
row = record.genes[-1]
key, values = row
for value in data.split():
value = value.split("(")[0]
values.append(value)
row = key, values
record.genes[-1] = row
elif keyword == "INHIBITOR ":
record.inhibitor.append(data.strip(";"))
elif keyword == "NAME ":
record.name.append(data.strip(";"))
elif keyword == "PATHWAY ":
if data[:5] == "PATH:":
_, map_num, name = data.split(None, 2)
pathway = ("PATH", map_num, name)
record.pathway.append(pathway)
else:
ec_num, name = data.split(None, 1)
pathway = "PATH", ec_num, name
record.pathway.append(pathway)
elif keyword == "PRODUCT ":
record.product.append(data.strip(";"))
elif keyword == "REACTION ":
record.reaction.append(data.strip(";"))
elif keyword == "STRUCTURES ":
if data[:4] == "PDB:":
database = data[:3]
accessions = data[4:].split()
row = (database, accessions)
record.structures.append(row)
else:
row = record.structures[-1]
database, accessions = row
accessions.extend(data.split())
row = (database, accessions)
record.structures[-1] = row
elif keyword == "SUBSTRATE ":
record.substrate.append(data.strip(";"))
elif keyword == "SYSNAME ":
record.sysname.append(data.strip(";")) |
Parse a KEGG Enzyme file with exactly one entry.
If the handle contains no records, or more than one record,
an exception is raised. For example:
>>> with open("KEGG/enzyme.new") as handle:
... record = read(handle)
... print("%s %s" % (record.entry, record.name[0]))
...
6.2.1.25 benzoate---CoA ligase | def read(handle):
"""Parse a KEGG Enzyme file with exactly one entry.
If the handle contains no records, or more than one record,
an exception is raised. For example:
>>> with open("KEGG/enzyme.new") as handle:
... record = read(handle)
... print("%s %s" % (record.entry, record.name[0]))
...
6.2.1.25 benzoate---CoA ligase
"""
records = parse(handle)
try:
record = next(records)
except StopIteration:
raise ValueError("No records found in handle") from None
try:
next(records)
raise ValueError("More than one record found in handle")
except StopIteration:
pass
return record |
Parse a KEGG Gene file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/gene.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
b1174 minE
b1175 minD | def parse(handle):
"""Parse a KEGG Gene file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/gene.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
b1174 minE
b1175 minD
"""
record = Record()
for line in handle:
if line[:3] == "///":
yield record
record = Record()
continue
if line[:12] != " ":
keyword = line[:12]
data = line[12:].strip()
if keyword == "ENTRY ":
words = data.split()
record.entry = words[0]
elif keyword == "NAME ":
data = data.strip(";")
record.name.append(data)
elif keyword == "DEFINITION ":
record.definition = data
elif keyword == "ORTHOLOGY ":
id, name = data.split(" ")
orthology = (id, name)
record.orthology.append(orthology)
elif keyword == "ORGANISM ":
id, name = data.split(" ")
organism = (id, name)
record.organism = organism
elif keyword == "POSITION ":
record.position = data
elif keyword == "MOTIF ":
key, values = data.split(": ")
values = values.split()
row = (key, values)
record.motif.append(row)
elif keyword == "DBLINKS ":
if ":" in data:
key, values = data.split(": ")
values = values.split()
row = (key, values)
record.dblinks.append(row)
else:
row = record.dblinks[-1]
key, values = row
values.extend(data.split())
row = key, values
record.dblinks[-1] = row |
Parse a single KEGG Pathway from given file handle.
Returns a single Pathway object. There should be one and only
one pathway in each file, but there may well be pathological
examples out there. | def read(handle):
"""Parse a single KEGG Pathway from given file handle.
Returns a single Pathway object. There should be one and only
one pathway in each file, but there may well be pathological
examples out there.
"""
pathways = parse(handle)
try:
pathway = next(pathways)
except StopIteration:
raise ValueError("No pathways found in handle") from None
try:
next(pathways)
raise ValueError("More than one pathway found in handle")
except StopIteration:
pass
return pathway |
Return an iterator over Pathway elements.
Arguments:
- handle - file handle to a KGML file for parsing, or a KGML string
This is a generator for the return of multiple Pathway objects. | def parse(handle):
"""Return an iterator over Pathway elements.
Arguments:
- handle - file handle to a KGML file for parsing, or a KGML string
This is a generator for the return of multiple Pathway objects.
"""
# Check handle
try:
handle.read(0)
except AttributeError:
try:
handle = StringIO(handle)
except TypeError:
raise TypeError(
"An XML-containing handle or an XML string must be provided"
) from None
# Parse XML and return each Pathway
for event, elem in ElementTree.iterparse(handle, events=("start", "end")):
if event == "end" and elem.tag == "pathway":
yield KGMLParser(elem).parse()
elem.clear() |
Parse a KEGG pathway map. | def parse(handle):
"""Parse a KEGG pathway map."""
for line in handle:
data, catalysts, reaction = line.split(":")
catalysts = [(catalysts,)]
reactants = {}
before, after = reaction.split("<=>")
compounds = before.split(" + ")
for compound in compounds:
compound = compound.strip()
try:
number, compound = compound.split()
number = -int(number)
except ValueError:
number = -1
reactants[compound] = number
compounds = after.split(" + ")
for compound in compounds:
compound = compound.strip()
try:
number, compound = compound.split()
number = int(number)
except ValueError:
number = +1
reactants[compound] = number
yield Reaction(reactants, catalysts, True, data) |
Read Medline records one by one from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing one or more Medline records.
Typical usage::
>>> from Bio import Medline
>>> with open("Medline/pubmed_result2.txt") as handle:
... records = Medline.parse(handle)
... for record in records:
... print(record['TI'])
...
A high level interface to SCOP and ASTRAL ...
GenomeDiagram: a python package for the visualization of ...
Open source clustering software.
PDB file parser and structure class implemented in Python. | def parse(handle):
"""Read Medline records one by one from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing one or more Medline records.
Typical usage::
>>> from Bio import Medline
>>> with open("Medline/pubmed_result2.txt") as handle:
... records = Medline.parse(handle)
... for record in records:
... print(record['TI'])
...
A high level interface to SCOP and ASTRAL ...
GenomeDiagram: a python package for the visualization of ...
Open source clustering software.
PDB file parser and structure class implemented in Python.
"""
# These keys point to string values
textkeys = (
"ID",
"PMID",
"SO",
"RF",
"NI",
"JC",
"TA",
"IS",
"CY",
"TT",
"CA",
"IP",
"VI",
"DP",
"YR",
"PG",
"LID",
"DA",
"LR",
"OWN",
"STAT",
"DCOM",
"PUBM",
"DEP",
"PL",
"JID",
"SB",
"PMC",
"EDAT",
"MHDA",
"PST",
"AB",
"EA",
"TI",
"JT",
)
handle = iter(handle)
key = ""
record = Record()
for line in handle:
if line[:6] == " ": # continuation line
line = line.rstrip()
if line == "":
# All blank continuation lines should be considered a new line.
# See issue #4557
line = " \n"
if key in ["MH", "AD"]:
# Multi-line MESH term, want to append to last entry in list
record[key][-1] += line[5:] # including space using line[5:]
else:
record[key].append(line[6:])
elif line != "\n" and line != "\r\n":
line = line.rstrip()
key = line[:4].rstrip()
if key not in record:
record[key] = []
record[key].append(line[6:])
elif record:
# End of the record
# Join each list of strings into one string.
for key in record:
if key in textkeys:
record[key] = " ".join(record[key])
yield record
record = Record()
if record: # catch last one
for key in record:
if key in textkeys:
record[key] = " ".join(record[key])
yield record |
Read a single Medline record from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing a Medline record.
Typical usage:
>>> from Bio import Medline
>>> with open("Medline/pubmed_result1.txt") as handle:
... record = Medline.read(handle)
... print(record['TI'])
...
The Bio* toolkits--a brief overview. | def read(handle):
"""Read a single Medline record from the handle.
The handle is either is a Medline file, a file-like object, or a list
of lines describing a Medline record.
Typical usage:
>>> from Bio import Medline
>>> with open("Medline/pubmed_result1.txt") as handle:
... record = Medline.read(handle)
... print(record['TI'])
...
The Bio* toolkits--a brief overview.
"""
records = parse(handle)
return next(records) |
Parse an AlignACE format handle as a Record object. | def read(handle):
"""Parse an AlignACE format handle as a Record object."""
record = Record()
line = next(handle)
record.version = line.strip()
line = next(handle)
record.command = line.strip()
mask = None
number = None
for line in handle:
line = line.strip()
if line == "":
pass
elif line[:4] == "Para":
record.parameters = {}
elif line[0] == "#":
seq_name = line.split("\t")[1]
record.sequences.append(seq_name)
elif "=" in line:
par_name, par_value = line.split("=")
par_name = par_name.strip()
par_value = par_value.strip()
record.parameters[par_name] = par_value
elif line[:5] == "Input":
record.sequences = []
elif line[:5] == "Motif":
words = line.split()
assert words[0] == "Motif"
number = int(words[1])
instances = []
elif line[:3] == "MAP":
alphabet = "ACGT"
alignment = Alignment(instances)
motif = Motif(alphabet, alignment)
motif.score = float(line.split()[-1])
motif.number = number
motif.mask = mask
record.append(motif)
elif len(line.split("\t")) == 4:
seq = Seq(line.split("\t")[0])
instances.append(seq)
elif "*" in line:
mask = line.strip("\r\n")
else:
raise ValueError(line)
return record |
Read motifs in Cluster Buster position frequency matrix format from a file handle.
Cluster Buster motif format: http://zlab.bu.edu/cluster-buster/help/cis-format.html | def read(handle):
"""Read motifs in Cluster Buster position frequency matrix format from a file handle.
Cluster Buster motif format: http://zlab.bu.edu/cluster-buster/help/cis-format.html
"""
motif_nbr = 0
record = Record()
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
motif_name = ""
for line in handle:
line = line.strip()
if line:
if line.startswith(">"):
if motif_nbr != 0:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_name = line[1:].strip()
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
motif_nbr += 1
else:
if line.startswith("#"):
continue
matrix_columns = line.split()
if len(matrix_columns) == 4:
[
nucleotide_counts[nucleotide].append(float(nucleotide_count))
for nucleotide, nucleotide_count in zip(
["A", "C", "G", "T"], matrix_columns
)
]
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
return record |
Return the representation of motifs in Cluster Buster position frequency matrix format. | def write(motifs):
"""Return the representation of motifs in Cluster Buster position frequency matrix format."""
lines = []
for m in motifs:
line = f">{m.name}\n"
lines.append(line)
for ACGT_counts in zip(
m.counts["A"], m.counts["C"], m.counts["G"], m.counts["T"]
):
lines.append("{:0.0f}\t{:0.0f}\t{:0.0f}\t{:0.0f}\n".format(*ACGT_counts))
# Finished; glue the lines together.
text = "".join(lines)
return text |
Parse a MAST XML format handle as a Record object. | def read(handle):
"""Parse a MAST XML format handle as a Record object."""
record = Record()
try:
xml_tree = ET.parse(handle)
except ET.ParseError:
raise ValueError(
"Improper MAST XML input file. XML root tag should start with <mast version= ..."
)
__read_metadata(record, xml_tree)
__read_sequences(record, xml_tree)
return record |
Read sequences from XML ElementTree object. | def __read_sequences(record, xml_tree):
"""Read sequences from XML ElementTree object."""
for sequence_tree in xml_tree.find("sequences").findall("sequence"):
sequence_name = sequence_tree.get("name")
record.sequences.append(sequence_name)
diagram_str = __make_diagram(record, sequence_tree)
record.diagrams[sequence_name] = diagram_str |
Make diagram string found in text file based on motif hit info. | def __make_diagram(record, sequence_tree):
"""Make diagram string found in text file based on motif hit info."""
sequence_length = int(sequence_tree.get("length"))
hit_eles, hit_motifs, gaps = [], [], []
for seg_tree in sequence_tree.findall("seg"):
for hit_ele in seg_tree.findall("hit"):
hit_pos = int(hit_ele.get("pos"))
if not hit_eles:
gap = hit_pos - 1
else:
gap = hit_pos - int(hit_eles[-1].get("pos")) - hit_motifs[-1].length
gaps.append(gap)
hit_motifs.append(record[int(hit_ele.get("idx"))])
hit_eles.append(hit_ele)
if not hit_eles:
return str(sequence_length)
if record.strand_handling == "combine":
motif_strs = [
f"[{'-' if hit_ele.get('rc') == 'y' else '+'}{hit_motif.name}]"
for hit_ele, hit_motif in zip(hit_eles, hit_motifs)
]
elif record.strand_handling == "unstranded":
motif_strs = [
f"[{hit_motif.name}]" for hit_ele, hit_motif in zip(hit_eles, hit_motifs)
]
else:
# TODO - more strand_handling possibilities?
raise Exception(f"Strand handling option {record.strand_handling} not parsable")
tail_length = (
sequence_length - int(hit_eles[-1].get("pos")) - hit_motifs[-1].length + 1
)
motifs_with_gaps = [str(s) for pair in zip(gaps, motif_strs) for s in pair] + [
str(tail_length)
]
# remove 0-length gaps
motifs_with_gaps = [s for s in motifs_with_gaps if s != "0"]
return "-".join(motifs_with_gaps) |
Parse the text output of the MEME program into a meme.Record object.
Examples
--------
>>> from Bio.motifs import meme
>>> with open("motifs/meme.INO_up800.classic.oops.xml") as f:
... record = meme.read(f)
>>> for motif in record:
... for sequence in motif.alignment.sequences:
... print(sequence.motif_name, sequence.sequence_name, sequence.sequence_id, sequence.strand, sequence.pvalue)
GSKGCATGTGAAA INO1 sequence_5 + 1.21e-08
GSKGCATGTGAAA FAS1 sequence_2 - 1.87e-08
GSKGCATGTGAAA ACC1 sequence_4 - 6.62e-08
GSKGCATGTGAAA CHO2 sequence_1 - 1.05e-07
GSKGCATGTGAAA CHO1 sequence_0 - 1.69e-07
GSKGCATGTGAAA FAS2 sequence_3 - 5.62e-07
GSKGCATGTGAAA OPI3 sequence_6 + 1.08e-06
TTGACWCYTGCYCWG CHO2 sequence_1 + 7.2e-10
TTGACWCYTGCYCWG OPI3 sequence_6 - 2.56e-08
TTGACWCYTGCYCWG ACC1 sequence_4 - 1.59e-07
TTGACWCYTGCYCWG CHO1 sequence_0 + 2.05e-07
TTGACWCYTGCYCWG FAS1 sequence_2 + 3.85e-07
TTGACWCYTGCYCWG FAS2 sequence_3 - 5.11e-07
TTGACWCYTGCYCWG INO1 sequence_5 + 8.01e-07 | def read(handle):
"""Parse the text output of the MEME program into a meme.Record object.
Examples
--------
>>> from Bio.motifs import meme
>>> with open("motifs/meme.INO_up800.classic.oops.xml") as f:
... record = meme.read(f)
>>> for motif in record:
... for sequence in motif.alignment.sequences:
... print(sequence.motif_name, sequence.sequence_name, sequence.sequence_id, sequence.strand, sequence.pvalue)
GSKGCATGTGAAA INO1 sequence_5 + 1.21e-08
GSKGCATGTGAAA FAS1 sequence_2 - 1.87e-08
GSKGCATGTGAAA ACC1 sequence_4 - 6.62e-08
GSKGCATGTGAAA CHO2 sequence_1 - 1.05e-07
GSKGCATGTGAAA CHO1 sequence_0 - 1.69e-07
GSKGCATGTGAAA FAS2 sequence_3 - 5.62e-07
GSKGCATGTGAAA OPI3 sequence_6 + 1.08e-06
TTGACWCYTGCYCWG CHO2 sequence_1 + 7.2e-10
TTGACWCYTGCYCWG OPI3 sequence_6 - 2.56e-08
TTGACWCYTGCYCWG ACC1 sequence_4 - 1.59e-07
TTGACWCYTGCYCWG CHO1 sequence_0 + 2.05e-07
TTGACWCYTGCYCWG FAS1 sequence_2 + 3.85e-07
TTGACWCYTGCYCWG FAS2 sequence_3 - 5.11e-07
TTGACWCYTGCYCWG INO1 sequence_5 + 8.01e-07
"""
record = Record()
try:
xml_tree = ET.parse(handle)
except ET.ParseError:
raise ValueError(
"Improper MEME XML input file. XML root tag should start with <MEME version= ..."
)
__read_metadata(record, xml_tree)
__read_alphabet(record, xml_tree)
sequence_id_name_map = __get_sequence_id_name_map(xml_tree)
record.sequences = list(sequence_id_name_map.keys())
__read_motifs(record, xml_tree, sequence_id_name_map)
return record |
Convert strand (+/-) from XML if present.
Default: + | def __convert_strand(strand):
"""Convert strand (+/-) from XML if present.
Default: +
"""
if strand == "minus":
return "-"
if strand == "plus" or strand == "none":
return "+" |
Parse the text output of the MEME program into a meme.Record object.
Examples
--------
>>> from Bio.motifs import minimal
>>> with open("motifs/meme.out") as f:
... record = minimal.read(f)
...
>>> for motif in record:
... print(motif.name, motif.evalue)
...
1 1.1e-22
You can access individual motifs in the record by their index or find a motif
by its name:
>>> from Bio import motifs
>>> with open("motifs/minimal_test.meme") as f:
... record = motifs.parse(f, 'minimal')
...
>>> motif = record[0]
>>> print(motif.name)
KRP
>>> motif = record['IFXA']
>>> print(motif.name)
IFXA
This function won't retrieve instances, as there are none in minimal meme format. | def read(handle):
"""Parse the text output of the MEME program into a meme.Record object.
Examples
--------
>>> from Bio.motifs import minimal
>>> with open("motifs/meme.out") as f:
... record = minimal.read(f)
...
>>> for motif in record:
... print(motif.name, motif.evalue)
...
1 1.1e-22
You can access individual motifs in the record by their index or find a motif
by its name:
>>> from Bio import motifs
>>> with open("motifs/minimal_test.meme") as f:
... record = motifs.parse(f, 'minimal')
...
>>> motif = record[0]
>>> print(motif.name)
KRP
>>> motif = record['IFXA']
>>> print(motif.name)
IFXA
This function won't retrieve instances, as there are none in minimal meme format.
"""
motif_number = 0
record = Record()
_read_version(record, handle)
_read_alphabet(record, handle)
_read_background(record, handle)
while True:
for line in handle:
if line.startswith("MOTIF"):
break
else:
return record
name = line.split()[1]
motif_number += 1
length, num_occurrences, evalue = _read_motif_statistics(handle)
counts = _read_lpm(handle, num_occurrences)
# {'A': 0.25, 'C': 0.25, 'T': 0.25, 'G': 0.25}
motif = motifs.Motif(alphabet=record.alphabet, counts=counts)
motif.background = record.background
motif.length = length
motif.num_occurrences = num_occurrences
motif.evalue = evalue
motif.name = name
record.append(motif)
assert len(record) == motif_number
return record |
Read background letter frequencies (PRIVATE). | def _read_background(record, handle):
"""Read background letter frequencies (PRIVATE)."""
for line in handle:
if line.startswith("Background letter frequencies"):
break
else:
raise ValueError(
"Improper input file. File should contain a line starting background frequencies."
)
try:
line = next(handle)
except StopIteration:
raise ValueError(
"Unexpected end of stream: Expected to find line starting background frequencies."
)
line = line.strip()
ls = line.split()
A, C, G, T = float(ls[1]), float(ls[3]), float(ls[5]), float(ls[7])
record.background = {"A": A, "C": C, "G": G, "T": T} |
Read MEME version (PRIVATE). | def _read_version(record, handle):
"""Read MEME version (PRIVATE)."""
for line in handle:
if line.startswith("MEME version"):
break
else:
raise ValueError(
"Improper input file. File should contain a line starting MEME version."
)
line = line.strip()
ls = line.split()
record.version = ls[2] |
Read alphabet (PRIVATE). | def _read_alphabet(record, handle):
"""Read alphabet (PRIVATE)."""
for line in handle:
if line.startswith("ALPHABET"):
break
else:
raise ValueError(
"Unexpected end of stream: Expected to find line starting with 'ALPHABET'"
)
if not line.startswith("ALPHABET= "):
raise ValueError("Line does not start with 'ALPHABET':\n%s" % line)
line = line.strip().replace("ALPHABET= ", "")
if line == "ACGT":
al = "ACGT"
else:
al = "ACDEFGHIKLMNPQRSTVWY"
record.alphabet = al |
Read letter probability matrix (PRIVATE). | def _read_lpm(handle, num_occurrences):
"""Read letter probability matrix (PRIVATE)."""
counts = [[], [], [], []]
for line in handle:
freqs = line.split()
if len(freqs) != 4:
break
counts[0].append(round(float(freqs[0]) * num_occurrences))
counts[1].append(round(float(freqs[1]) * num_occurrences))
counts[2].append(round(float(freqs[2]) * num_occurrences))
counts[3].append(round(float(freqs[3]) * num_occurrences))
c = {}
c["A"] = counts[0]
c["C"] = counts[1]
c["G"] = counts[2]
c["T"] = counts[3]
return c |
Read motif statistics (PRIVATE). | def _read_motif_statistics(handle):
"""Read motif statistics (PRIVATE)."""
# minimal :
# letter-probability matrix: alength= 4 w= 19 nsites= 17 E= 4.1e-009
for line in handle:
if line.startswith("letter-probability matrix:"):
break
num_occurrences = int(line.split("nsites=")[1].split()[0])
length = int(line.split("w=")[1].split()[0])
evalue = float(line.split("E=")[1].split()[0])
return length, num_occurrences, evalue |
Read motif name (PRIVATE). | def _read_motif_name(handle):
"""Read motif name (PRIVATE)."""
for line in handle:
if "sorted by position p-value" in line:
break
else:
raise ValueError("Unexpected end of stream: Failed to find motif name")
line = line.strip()
words = line.split()
name = " ".join(words[0:2])
return name |
Read motif(s) from a file in various position frequency matrix formats.
Return the record of PFM(s).
Call the appropriate routine based on the format passed. | def read(handle, pfm_format):
"""Read motif(s) from a file in various position frequency matrix formats.
Return the record of PFM(s).
Call the appropriate routine based on the format passed.
"""
# Supporting underscores here for backward compatibility
pfm_format = pfm_format.lower().replace("_", "-")
if pfm_format == "pfm-four-columns":
record = _read_pfm_four_columns(handle)
return record
elif pfm_format == "pfm-four-rows":
record = _read_pfm_four_rows(handle)
return record
else:
raise ValueError("Unknown Position Frequency matrix format '%s'" % pfm_format) |
Read motifs in position frequency matrix format (4 columns) from a file handle.
# cisbp
Pos A C G T
1 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
2 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
3 0.971153846153846 0.00961538461538462 0.00961538461538462 0.00961538461538462
4 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
5 0.00961538461538462 0.971153846153846 0.00961538461538462 0.00961538461538462
6 0.971153846153846 0.00961538461538462 0.00961538461538462 0.00961538461538462
7 0.00961538461538462 0.971153846153846 0.00961538461538462 0.00961538461538462
8 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
# c2h2 zfs
Gene ENSG00000197372
Pos A C G T
1 0.341303 0.132427 0.117054 0.409215
2 0.283785 0.077066 0.364552 0.274597
3 0.491055 0.078208 0.310520 0.120217
4 0.492621 0.076117 0.131007 0.300256
5 0.250645 0.361464 0.176504 0.211387
6 0.276694 0.498070 0.197793 0.027444
7 0.056317 0.014631 0.926202 0.002850
8 0.004470 0.007769 0.983797 0.003964
9 0.936213 0.058787 0.002387 0.002613
10 0.004352 0.004030 0.002418 0.989200
11 0.013277 0.008165 0.001991 0.976567
12 0.968132 0.002263 0.002868 0.026737
13 0.397623 0.052017 0.350783 0.199577
14 0.000000 0.000000 1.000000 0.000000
15 1.000000 0.000000 0.000000 0.000000
16 0.000000 0.000000 1.000000 0.000000
17 0.000000 0.000000 1.000000 0.000000
18 1.000000 0.000000 0.000000 0.000000
19 0.000000 1.000000 0.000000 0.000000
20 1.000000 0.000000 0.000000 0.000000
# c2h2 zfs
Gene FBgn0000210
Motif M1734_0.90
Pos A C G T
1 0.25 0.0833333 0.0833333 0.583333
2 0.75 0.166667 0.0833333 0
3 0.833333 0 0 0.166667
4 1 0 0 0
5 0 0.833333 0.0833333 0.0833333
6 0.333333 0 0 0.666667
7 0.833333 0 0 0.166667
8 0.5 0 0.333333 0.166667
9 0.5 0.0833333 0.166667 0.25
10 0.333333 0.25 0.166667 0.25
11 0.166667 0.25 0.416667 0.166667
# flyfactorsurvey (cluster buster)
>AbdA_Cell_FBgn0000014
1 3 0 14
0 0 0 18
16 0 0 2
18 0 0 0
1 0 0 17
0 0 6 12
15 1 2 0
# homer
>ATGACTCATC AP-1(bZIP)/ThioMac-PU.1-ChIP-Seq(GSE21512)/Homer 6.049537 -1.782996e+03 0 9805.3,5781.0,3085.1,2715.0,0.00e+00
0.419 0.275 0.277 0.028
0.001 0.001 0.001 0.997
0.010 0.002 0.965 0.023
0.984 0.003 0.001 0.012
0.062 0.579 0.305 0.054
0.026 0.001 0.001 0.972
0.043 0.943 0.001 0.012
0.980 0.005 0.001 0.014
0.050 0.172 0.307 0.471
0.149 0.444 0.211 0.195
# hocomoco
> AHR_si
40.51343240527031 18.259112547756697 56.41253757072521 38.77363485291994
10.877470982533044 11.870876719950774 34.66312982331297 96.54723985087516
21.7165707818416 43.883079837598544 20.706746561638717 67.6523201955933
2.5465132509466635 1.3171620263517245 145.8637051322628 4.231336967110781
0.0 150.35847450464382 1.4927836298652875 2.1074592421627525
3.441039751299748 0.7902972158110341 149.37613720253387 0.3512432070271259
0.0 3.441039751299748 0.7024864140542533 149.81519121131782
0.0 0.0 153.95871737667187 0.0
43.07922333291745 66.87558226865211 16.159862546986584 27.844049228115868
# neph
UW.Motif.0001 atgactca
0.772949 0.089579 0.098612 0.038860
0.026652 0.004653 0.025056 0.943639
0.017663 0.023344 0.918728 0.040264
0.919596 0.025414 0.029759 0.025231
0.060312 0.772259 0.104968 0.062462
0.037406 0.020643 0.006667 0.935284
0.047316 0.899024 0.026928 0.026732
0.948639 0.019497 0.005737 0.026128
# tiffin
T A G C
30 0 28 40
0 0 0 99
0 55 14 29
0 99 0 0
20 78 0 0
0 52 7 39
19 46 11 22
0 60 38 0
0 33 0 66
73 0 25 0
99 0 0 0 | def _read_pfm_four_columns(handle):
"""Read motifs in position frequency matrix format (4 columns) from a file handle.
# cisbp
Pos A C G T
1 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
2 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
3 0.971153846153846 0.00961538461538462 0.00961538461538462 0.00961538461538462
4 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
5 0.00961538461538462 0.971153846153846 0.00961538461538462 0.00961538461538462
6 0.971153846153846 0.00961538461538462 0.00961538461538462 0.00961538461538462
7 0.00961538461538462 0.971153846153846 0.00961538461538462 0.00961538461538462
8 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
# c2h2 zfs
Gene ENSG00000197372
Pos A C G T
1 0.341303 0.132427 0.117054 0.409215
2 0.283785 0.077066 0.364552 0.274597
3 0.491055 0.078208 0.310520 0.120217
4 0.492621 0.076117 0.131007 0.300256
5 0.250645 0.361464 0.176504 0.211387
6 0.276694 0.498070 0.197793 0.027444
7 0.056317 0.014631 0.926202 0.002850
8 0.004470 0.007769 0.983797 0.003964
9 0.936213 0.058787 0.002387 0.002613
10 0.004352 0.004030 0.002418 0.989200
11 0.013277 0.008165 0.001991 0.976567
12 0.968132 0.002263 0.002868 0.026737
13 0.397623 0.052017 0.350783 0.199577
14 0.000000 0.000000 1.000000 0.000000
15 1.000000 0.000000 0.000000 0.000000
16 0.000000 0.000000 1.000000 0.000000
17 0.000000 0.000000 1.000000 0.000000
18 1.000000 0.000000 0.000000 0.000000
19 0.000000 1.000000 0.000000 0.000000
20 1.000000 0.000000 0.000000 0.000000
# c2h2 zfs
Gene FBgn0000210
Motif M1734_0.90
Pos A C G T
1 0.25 0.0833333 0.0833333 0.583333
2 0.75 0.166667 0.0833333 0
3 0.833333 0 0 0.166667
4 1 0 0 0
5 0 0.833333 0.0833333 0.0833333
6 0.333333 0 0 0.666667
7 0.833333 0 0 0.166667
8 0.5 0 0.333333 0.166667
9 0.5 0.0833333 0.166667 0.25
10 0.333333 0.25 0.166667 0.25
11 0.166667 0.25 0.416667 0.166667
# flyfactorsurvey (cluster buster)
>AbdA_Cell_FBgn0000014
1 3 0 14
0 0 0 18
16 0 0 2
18 0 0 0
1 0 0 17
0 0 6 12
15 1 2 0
# homer
>ATGACTCATC AP-1(bZIP)/ThioMac-PU.1-ChIP-Seq(GSE21512)/Homer 6.049537 -1.782996e+03 0 9805.3,5781.0,3085.1,2715.0,0.00e+00
0.419 0.275 0.277 0.028
0.001 0.001 0.001 0.997
0.010 0.002 0.965 0.023
0.984 0.003 0.001 0.012
0.062 0.579 0.305 0.054
0.026 0.001 0.001 0.972
0.043 0.943 0.001 0.012
0.980 0.005 0.001 0.014
0.050 0.172 0.307 0.471
0.149 0.444 0.211 0.195
# hocomoco
> AHR_si
40.51343240527031 18.259112547756697 56.41253757072521 38.77363485291994
10.877470982533044 11.870876719950774 34.66312982331297 96.54723985087516
21.7165707818416 43.883079837598544 20.706746561638717 67.6523201955933
2.5465132509466635 1.3171620263517245 145.8637051322628 4.231336967110781
0.0 150.35847450464382 1.4927836298652875 2.1074592421627525
3.441039751299748 0.7902972158110341 149.37613720253387 0.3512432070271259
0.0 3.441039751299748 0.7024864140542533 149.81519121131782
0.0 0.0 153.95871737667187 0.0
43.07922333291745 66.87558226865211 16.159862546986584 27.844049228115868
# neph
UW.Motif.0001 atgactca
0.772949 0.089579 0.098612 0.038860
0.026652 0.004653 0.025056 0.943639
0.017663 0.023344 0.918728 0.040264
0.919596 0.025414 0.029759 0.025231
0.060312 0.772259 0.104968 0.062462
0.037406 0.020643 0.006667 0.935284
0.047316 0.899024 0.026928 0.026732
0.948639 0.019497 0.005737 0.026128
# tiffin
T A G C
30 0 28 40
0 0 0 99
0 55 14 29
0 99 0 0
20 78 0 0
0 52 7 39
19 46 11 22
0 60 38 0
0 33 0 66
73 0 25 0
99 0 0 0
"""
record = Record()
motif_name = None
motif_nbr = 0
motif_nbr_added = 0
default_nucleotide_order = ["A", "C", "G", "T"]
nucleotide_order = default_nucleotide_order
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
for line in handle:
line = line.strip()
if line:
columns = line.split()
nbr_columns = len(columns)
if line.startswith("#"):
# Skip comment lines.
continue
elif line.startswith(">"):
# Parse ">AbdA_Cell_FBgn0000014" and "> AHR_si" like lines and put the part after ">" as motif name.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = line[1:].strip()
nucleotide_order = default_nucleotide_order
elif columns[0] == "Gene":
# Parse "Gene ENSG00000197372" like lines and put the gene name as motif name.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = columns[1]
nucleotide_order = default_nucleotide_order
elif columns[0] == "Motif":
# Parse "Motif M1734_0.90" like lines.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = columns[1]
nucleotide_order = default_nucleotide_order
elif columns[0] == "Pos":
# Parse "Pos A C G T" like lines and change nucleotide order if necessary.
if nbr_columns == 5:
# If the previous line was not a "Gene ENSG00000197372" like line, a new motif starts here.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
nucleotide_order = default_nucleotide_order
if set(columns[1:]) == set(default_nucleotide_order):
nucleotide_order = columns[1:]
elif columns[0] in default_nucleotide_order:
# Parse "A C G T" like lines and change nucleotide order if necessary.
if nbr_columns == 4:
nucleotide_order = default_nucleotide_order
if set(columns) == set(default_nucleotide_order):
nucleotide_order = columns
else:
# Parse matrix columns lines and use the correct nucleotide order.
if nbr_columns == 4:
matrix_columns = columns
elif nbr_columns == 5:
matrix_columns = columns[1:]
else:
continue
if motif_nbr == motif_nbr_added:
# A new motif matrix starts here, so reinitialize variables for the new motif.
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
motif_nbr += 1
[
nucleotide_counts[nucleotide].append(float(nucleotide_count))
for nucleotide, nucleotide_count in zip(
nucleotide_order, matrix_columns
)
]
else:
# Empty lines can be separators between motifs.
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
# Add the previous motif to the record.
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
motif_nbr_added = motif_nbr
# Reinitialize variables for the new motif.
motif_name = None
nucleotide_order = default_nucleotide_order
# nucleotide_counts = {'A': [], 'C': [], 'G': [], 'T': []}
if motif_nbr != 0 and motif_nbr_added != motif_nbr:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
record.append(motif)
return record |
Read motifs in position frequency matrix format (4 rows) from a file handle.
# hdpi
A 0 5 6 5 1 0
C 1 1 0 0 0 4
G 5 0 0 0 3 0
T 0 0 0 1 2 2
# yetfasco
A 0.5 0.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.5 0.0 0.0833333334583333
T 0.0 0.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.0 0.0 0.0833333334583333
G 0.0 1.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.0 1.0 0.249999999875
C 0.5 0.0 1.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.5 0.0 0.583333333208333
# flyfactorsurvey ZFP finger
A | 92 106 231 135 0 1 780 28 0 700 739 94 60 127 130
C | 138 82 129 81 774 1 3 1 0 6 17 49 193 122 148
G | 270 398 54 164 7 659 1 750 755 65 1 41 202 234 205
T | 290 204 375 411 9 127 6 11 36 20 31 605 335 307 308
# scertf pcm
A | 9 1 1 97 1 94
T | 80 1 97 1 1 2
C | 9 97 1 1 1 2
G | 2 1 1 1 97 2
# scertf pfm
A | 0.090 0.010 0.010 0.970 0.010 0.940
C | 0.090 0.970 0.010 0.010 0.010 0.020
G | 0.020 0.010 0.010 0.010 0.970 0.020
T | 0.800 0.010 0.970 0.010 0.010 0.020
# idmmpmm
> abd-A
0.218451749734889 0.0230646871686108 0.656680805938494 0.898197242841994 0.040694591728526 0.132953340402969 0.74907211028632 0.628313891834571
0.0896076352067868 0.317338282078473 0.321580063626723 0.0461293743372216 0.0502386002120891 0.040694591728526 0.0284994697773065 0.0339342523860021
0.455991516436904 0.0691940615058324 0.0108695652173913 0.0217391304347826 0.0284994697773065 0.0284994697773065 0.016304347826087 0.160127253446448
0.235949098621421 0.590402969247084 0.0108695652173913 0.0339342523860021 0.880567338282079 0.797852598091198 0.206124072110286 0.17762460233298
# JASPAR
>MA0001.1 AGL3
A [ 0 3 79 40 66 48 65 11 65 0 ]
C [94 75 4 3 1 2 5 2 3 3 ]
G [ 1 0 3 4 1 0 5 3 28 88 ]
T [ 2 19 11 50 29 47 22 81 1 6 ]
or::
>MA0001.1 AGL3
0 3 79 40 66 48 65 11 65 0
94 75 4 3 1 2 5 2 3 3
1 0 3 4 1 0 5 3 28 88
2 19 11 50 29 47 22 81 1 6 | def _read_pfm_four_rows(handle):
"""Read motifs in position frequency matrix format (4 rows) from a file handle.
# hdpi
A 0 5 6 5 1 0
C 1 1 0 0 0 4
G 5 0 0 0 3 0
T 0 0 0 1 2 2
# yetfasco
A 0.5 0.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.5 0.0 0.0833333334583333
T 0.0 0.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.0 0.0 0.0833333334583333
G 0.0 1.0 0.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.0 1.0 0.249999999875
C 0.5 0.0 1.0 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.25 0.5 0.0 0.583333333208333
# flyfactorsurvey ZFP finger
A | 92 106 231 135 0 1 780 28 0 700 739 94 60 127 130
C | 138 82 129 81 774 1 3 1 0 6 17 49 193 122 148
G | 270 398 54 164 7 659 1 750 755 65 1 41 202 234 205
T | 290 204 375 411 9 127 6 11 36 20 31 605 335 307 308
# scertf pcm
A | 9 1 1 97 1 94
T | 80 1 97 1 1 2
C | 9 97 1 1 1 2
G | 2 1 1 1 97 2
# scertf pfm
A | 0.090 0.010 0.010 0.970 0.010 0.940
C | 0.090 0.970 0.010 0.010 0.010 0.020
G | 0.020 0.010 0.010 0.010 0.970 0.020
T | 0.800 0.010 0.970 0.010 0.010 0.020
# idmmpmm
> abd-A
0.218451749734889 0.0230646871686108 0.656680805938494 0.898197242841994 0.040694591728526 0.132953340402969 0.74907211028632 0.628313891834571
0.0896076352067868 0.317338282078473 0.321580063626723 0.0461293743372216 0.0502386002120891 0.040694591728526 0.0284994697773065 0.0339342523860021
0.455991516436904 0.0691940615058324 0.0108695652173913 0.0217391304347826 0.0284994697773065 0.0284994697773065 0.016304347826087 0.160127253446448
0.235949098621421 0.590402969247084 0.0108695652173913 0.0339342523860021 0.880567338282079 0.797852598091198 0.206124072110286 0.17762460233298
# JASPAR
>MA0001.1 AGL3
A [ 0 3 79 40 66 48 65 11 65 0 ]
C [94 75 4 3 1 2 5 2 3 3 ]
G [ 1 0 3 4 1 0 5 3 28 88 ]
T [ 2 19 11 50 29 47 22 81 1 6 ]
or::
>MA0001.1 AGL3
0 3 79 40 66 48 65 11 65 0
94 75 4 3 1 2 5 2 3 3
1 0 3 4 1 0 5 3 28 88
2 19 11 50 29 47 22 81 1 6
"""
record = Record()
name_pattern = re.compile(r"^>\s*(.+)\s*")
row_pattern_with_nucleotide_letter = re.compile(
r"\s*([ACGT])\s*[\[|]*\s*([0-9.\-eE\s]+)\s*\]*\s*"
)
row_pattern_without_nucleotide_letter = re.compile(r"\s*([0-9.\-eE\s]+)\s*")
motif_name = None
nucleotide_counts = {}
row_count = 0
nucleotides = ["A", "C", "G", "T"]
for line in handle:
line = line.strip()
name_match = name_pattern.match(line)
row_match_with_nucleotide_letter = row_pattern_with_nucleotide_letter.match(
line
)
row_match_without_nucleotide_letter = (
row_pattern_without_nucleotide_letter.match(line)
)
if name_match:
motif_name = name_match.group(1)
elif row_match_with_nucleotide_letter:
(nucleotide, counts_str) = row_match_with_nucleotide_letter.group(1, 2)
current_nucleotide_counts = counts_str.split()
nucleotide_counts[nucleotide] = [
float(current_nucleotide_count)
for current_nucleotide_count in current_nucleotide_counts
]
row_count += 1
if row_count == 4:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
if motif_name:
motif.name = motif_name
record.append(motif)
motif_name = None
nucleotide_counts = {}
row_count = 0
elif row_match_without_nucleotide_letter:
current_nucleotide_counts = row_match_without_nucleotide_letter.group(
1
).split()
nucleotide_counts[nucleotides[row_count]] = [
float(current_nucleotide_count)
for current_nucleotide_count in current_nucleotide_counts
]
row_count += 1
if row_count == 4:
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
if motif_name:
motif.name = motif_name
record.append(motif)
motif_name = None
nucleotide_counts = {}
row_count = 0
return record |
Return the representation of motifs in Cluster Buster position frequency matrix format. | def write(motifs):
"""Return the representation of motifs in Cluster Buster position frequency matrix format."""
lines = []
for m in motifs:
line = f">{m.name}\n"
lines.append(line)
for ACGT_counts in zip(
m.counts["A"], m.counts["C"], m.counts["G"], m.counts["T"]
):
lines.append("{:0.0f}\t{:0.0f}\t{:0.0f}\t{:0.0f}\n".format(*ACGT_counts))
# Finished; glue the lines together.
text = "".join(lines)
return text |
Parse a transfac format handle into a Record object. | def read(handle, strict=True):
"""Parse a transfac format handle into a Record object."""
annotations = {}
references = []
counts = None
record = Record()
for line in handle:
line = line.strip()
if not line:
continue
key_value = line.split(None, 1)
key = key_value[0].strip()
if strict:
if len(key) != 2:
raise ValueError(
"The key value of a TRANSFAC motif line should have 2 characters:"
f'"{line}"'
)
if len(key_value) == 2:
value = key_value[1].strip()
if strict:
if not line.partition(" ")[1]:
raise ValueError(
"A TRANSFAC motif line should have 2 "
"spaces between key and value columns: "
f'"{line}"'
)
if key == "VV":
record.version = value
elif key in ("P0", "PO"): # Old TRANSFAC files use PO instead of P0
counts = {}
if value.split()[:4] != ["A", "C", "G", "T"]:
raise ValueError(
f'A TRANSFAC matrix "{key}" line should be '
f'followed by "A C G T": {line}'
)
length = 0
for c in "ACGT":
counts[c] = []
for line in handle:
line = line.strip()
key_value = line.split(None, 1)
key = key_value[0].strip()
if len(key_value) == 2:
value = key_value[1].strip()
if strict:
if not line.partition(" ")[1]:
raise ValueError(
"A TRANSFAC motif line should have 2 spaces"
f' between key and value columns: "{line}"'
)
try:
i = int(key)
except ValueError:
break
if length == 0 and i == 0:
if strict:
raise ValueError(
'A TRANSFAC matrix should start with "01" as first row'
f' of the matrix, but this matrix uses "00": "{line}'
)
else:
length += 1
if i != length:
raise ValueError(
"The TRANSFAC matrix row number does not match the position"
f' in the matrix: "{line}"'
)
if strict:
if len(key) == 1:
raise ValueError(
"A TRANSFAC matrix line should have a 2 digit"
f' key at the start of the line ("{i:02d}"),'
f' but this matrix uses "{i:d}": "{line:s}".'
)
if len(key_value) != 2:
raise ValueError(
"A TRANSFAC matrix line should have a key and a"
f' value: "{line}"'
)
values = value.split()[:4]
if len(values) != 4:
raise ValueError(
"A TRANSFAC matrix line should have a value for each"
f' nucleotide (A, C, G and T): "{line}"'
)
for c, v in zip("ACGT", values):
counts[c].append(float(v))
if line == "XX":
pass
elif key == "RN":
index, separator, accession = value.partition(";")
if index[0] != "[":
raise ValueError(
f'The index "{index}" in a TRANSFAC RN line should start'
f' with a "[": "{line}"'
)
if index[-1] != "]":
raise ValueError(
f'The index "{index}" in a TRANSFAC RN line should end'
f' with a "]": "{line}"'
)
index = int(index[1:-1])
if len(references) != index - 1:
raise ValueError(
f'The index "{index:d}" of the TRANSFAC RN line does not '
"match the current number of seen references "
f'"{len(references) + 1:d}": "{line:s}"'
)
reference = {key: value}
references.append(reference)
elif key == "//":
if counts is not None:
motif = Motif(alphabet="ACGT", counts=counts)
motif.update(annotations)
motif.references = references
record.append(motif)
annotations = {}
references = []
elif key in Motif.reference_keys:
reference[key] = value
elif key in Motif.multiple_value_keys:
if key not in annotations:
annotations[key] = []
annotations[key].append(value)
else:
annotations[key] = value
return record |
Write the representation of a motif in TRANSFAC format. | def write(motifs):
"""Write the representation of a motif in TRANSFAC format."""
blocks = []
try:
version = motifs.version
except AttributeError:
pass
else:
if version is not None:
block = (
"""\
VV %s
XX
//
"""
% version
)
blocks.append(block)
multiple_value_keys = Motif.multiple_value_keys
sections = (
("AC", "AS"), # Accession
("ID",), # ID
("DT", "CO"), # Date, copyright
("NA",), # Name
("DE",), # Short factor description
("TY",), # Type
("OS", "OC"), # Organism
("HP", "HC"), # Superfamilies, subfamilies
("BF",), # Binding factors
("P0",), # Frequency matrix
("BA",), # Statistical basis
("BS",), # Factor binding sites
("CC",), # Comments
("DR",), # External databases
("OV", "PV"), # Versions
)
for motif in motifs:
lines = []
for section in sections:
blank = False
for key in section:
if key == "P0":
# Frequency matrix
length = motif.length
if length == 0:
continue
sequence = motif.degenerate_consensus
letters = sorted(motif.alphabet)
line = " ".join(["P0"] + letters)
lines.append(line)
for i in range(length):
line = (
" ".join(["%02.d"] + ["%6.20g" for _ in letters])
+ " %s"
)
line = line % tuple(
[i + 1]
+ [motif.counts[_][i] for _ in letters]
+ [sequence[i]]
)
lines.append(line)
blank = True
else:
try:
value = motif.get(key)
except AttributeError:
value = None
if value is not None:
if key in multiple_value_keys:
for v in value:
line = f"{key} {v}"
lines.append(line)
else:
line = f"{key} {value}"
lines.append(line)
blank = True
if key == "PV":
# References
try:
references = motif.references
except AttributeError:
pass
else:
keys = ("RN", "RX", "RA", "RT", "RL")
for reference in references:
for key in keys:
value = reference.get(key)
if value is None:
continue
line = f"{key} {value}"
lines.append(line)
blank = True
if blank:
line = "XX"
lines.append(line)
# Finished this motif; glue the lines together
line = "//"
lines.append(line)
block = "\n".join(lines) + "\n"
blocks.append(block)
# Finished all motifs; glue the blocks together
text = "".join(blocks)
return text |
Read motifs in XMS matrix format from a file handle.
XMS is an XML format for describing regulatory motifs and PSSMs.
This format was defined by Thomas Down, and used in the NestedMICA and MotifExplorer programs. | def read(handle):
"""Read motifs in XMS matrix format from a file handle.
XMS is an XML format for describing regulatory motifs and PSSMs.
This format was defined by Thomas Down, and used in the NestedMICA and MotifExplorer programs.
"""
xms_doc = minidom.parse(handle)
record = XMSScanner(xms_doc).record
return record |
Create a Motif object. | def create(instances, alphabet="ACGT"):
"""Create a Motif object."""
alignment = Alignment(instances)
return Motif(alignment=alignment, alphabet=alphabet) |
Parse an output file from a motif finding program.
Currently supported formats (case is ignored):
- AlignAce: AlignAce output file format
- ClusterBuster: Cluster Buster position frequency matrix format
- XMS: XMS matrix format
- MEME: MEME output file motif
- MINIMAL: MINIMAL MEME output file motif
- MAST: MAST output file motif
- TRANSFAC: TRANSFAC database file format
- pfm-four-columns: Generic position-frequency matrix format with four columns. (cisbp, homer, hocomoco, neph, tiffin)
- pfm-four-rows: Generic position-frequency matrix format with four row. (scertf, yetfasco, hdpi, idmmpmm, flyfactor survey)
- pfm: JASPAR-style position-frequency matrix
- jaspar: JASPAR-style multiple PFM format
- sites: JASPAR-style sites file
As files in the pfm and sites formats contain only a single motif,
it is easier to use Bio.motifs.read() instead of Bio.motifs.parse()
for those.
For example:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... for m in motifs.parse(handle, "AlignAce"):
... print(m.consensus)
...
TCTACGATTGAG
CTGCACCTAGCTACGAGTGAG
GTGCCCTAAGCATACTAGGCG
GCCACTAGCAGAGCAGGGGGC
CGACTCAGAGGTT
CCACGCTAAGAGAAGTGCCGGAG
GCACGTCCCTGAGCA
GTCCATCGCAAAGCGTGGGGC
GAGATCAGAGGGCCG
TGGACGCGGGG
GACCAGAGCCTCGCATGGGGG
AGCGCGCGTG
GCCGGTTGCTGTTCATTAGG
ACCGACGGCAGCTAAAAGGG
GACGCCGGGGAT
CGACTCGCGCTTACAAGG
If strict is True (default), the parser will raise a ValueError if the
file contents does not strictly comply with the specified file format. | def parse(handle, fmt, strict=True):
"""Parse an output file from a motif finding program.
Currently supported formats (case is ignored):
- AlignAce: AlignAce output file format
- ClusterBuster: Cluster Buster position frequency matrix format
- XMS: XMS matrix format
- MEME: MEME output file motif
- MINIMAL: MINIMAL MEME output file motif
- MAST: MAST output file motif
- TRANSFAC: TRANSFAC database file format
- pfm-four-columns: Generic position-frequency matrix format with four columns. (cisbp, homer, hocomoco, neph, tiffin)
- pfm-four-rows: Generic position-frequency matrix format with four row. (scertf, yetfasco, hdpi, idmmpmm, flyfactor survey)
- pfm: JASPAR-style position-frequency matrix
- jaspar: JASPAR-style multiple PFM format
- sites: JASPAR-style sites file
As files in the pfm and sites formats contain only a single motif,
it is easier to use Bio.motifs.read() instead of Bio.motifs.parse()
for those.
For example:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... for m in motifs.parse(handle, "AlignAce"):
... print(m.consensus)
...
TCTACGATTGAG
CTGCACCTAGCTACGAGTGAG
GTGCCCTAAGCATACTAGGCG
GCCACTAGCAGAGCAGGGGGC
CGACTCAGAGGTT
CCACGCTAAGAGAAGTGCCGGAG
GCACGTCCCTGAGCA
GTCCATCGCAAAGCGTGGGGC
GAGATCAGAGGGCCG
TGGACGCGGGG
GACCAGAGCCTCGCATGGGGG
AGCGCGCGTG
GCCGGTTGCTGTTCATTAGG
ACCGACGGCAGCTAAAAGGG
GACGCCGGGGAT
CGACTCGCGCTTACAAGG
If strict is True (default), the parser will raise a ValueError if the
file contents does not strictly comply with the specified file format.
"""
fmt = fmt.lower()
if fmt == "alignace":
from Bio.motifs import alignace
return alignace.read(handle)
elif fmt == "meme":
from Bio.motifs import meme
return meme.read(handle)
elif fmt == "minimal":
from Bio.motifs import minimal
return minimal.read(handle)
elif fmt == "clusterbuster":
from Bio.motifs import clusterbuster
return clusterbuster.read(handle)
elif fmt in ("pfm-four-columns", "pfm-four-rows"):
from Bio.motifs import pfm
return pfm.read(handle, fmt)
elif fmt == "xms":
from Bio.motifs import xms
return xms.read(handle)
elif fmt == "mast":
from Bio.motifs import mast
return mast.read(handle)
elif fmt == "transfac":
from Bio.motifs import transfac
return transfac.read(handle, strict)
elif fmt in ("pfm", "sites", "jaspar"):
from Bio.motifs import jaspar
return jaspar.read(handle, fmt)
else:
raise ValueError("Unknown format %s" % fmt) |
Read a motif from a handle using the specified file-format.
This supports the same formats as Bio.motifs.parse(), but
only for files containing exactly one motif. For example,
reading a JASPAR-style pfm file:
>>> from Bio import motifs
>>> with open("motifs/SRF.pfm") as handle:
... m = motifs.read(handle, "pfm")
>>> m.consensus
Seq('GCCCATATATGG')
Or a single-motif MEME file,
>>> from Bio import motifs
>>> with open("motifs/meme.psp_test.classic.zoops.xml") as handle:
... m = motifs.read(handle, "meme")
>>> m.consensus
Seq('GCTTATGTAA')
If the handle contains no records, or more than one record,
an exception is raised:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... motif = motifs.read(handle, "AlignAce")
Traceback (most recent call last):
...
ValueError: More than one motif found in handle
If however you want the first motif from a file containing
multiple motifs this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... record = motifs.parse(handle, "alignace")
>>> motif = record[0]
>>> motif.consensus
Seq('TCTACGATTGAG')
Use the Bio.motifs.parse(handle, fmt) function if you want
to read multiple records from the handle.
If strict is True (default), the parser will raise a ValueError if the
file contents does not strictly comply with the specified file format. | def read(handle, fmt, strict=True):
"""Read a motif from a handle using the specified file-format.
This supports the same formats as Bio.motifs.parse(), but
only for files containing exactly one motif. For example,
reading a JASPAR-style pfm file:
>>> from Bio import motifs
>>> with open("motifs/SRF.pfm") as handle:
... m = motifs.read(handle, "pfm")
>>> m.consensus
Seq('GCCCATATATGG')
Or a single-motif MEME file,
>>> from Bio import motifs
>>> with open("motifs/meme.psp_test.classic.zoops.xml") as handle:
... m = motifs.read(handle, "meme")
>>> m.consensus
Seq('GCTTATGTAA')
If the handle contains no records, or more than one record,
an exception is raised:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... motif = motifs.read(handle, "AlignAce")
Traceback (most recent call last):
...
ValueError: More than one motif found in handle
If however you want the first motif from a file containing
multiple motifs this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import motifs
>>> with open("motifs/alignace.out") as handle:
... record = motifs.parse(handle, "alignace")
>>> motif = record[0]
>>> motif.consensus
Seq('TCTACGATTGAG')
Use the Bio.motifs.parse(handle, fmt) function if you want
to read multiple records from the handle.
If strict is True (default), the parser will raise a ValueError if the
file contents does not strictly comply with the specified file format.
"""
fmt = fmt.lower()
motifs = parse(handle, fmt, strict)
if len(motifs) == 0:
raise ValueError("No motifs found in handle")
if len(motifs) > 1:
raise ValueError("More than one motif found in handle")
motif = motifs[0]
return motif |
Return a string representation of motifs in the given format.
Currently supported formats (case is ignored):
- clusterbuster: Cluster Buster position frequency matrix format
- pfm : JASPAR simple single Position Frequency Matrix
- jaspar : JASPAR multiple PFM format
- transfac : TRANSFAC like files | def write(motifs, fmt):
"""Return a string representation of motifs in the given format.
Currently supported formats (case is ignored):
- clusterbuster: Cluster Buster position frequency matrix format
- pfm : JASPAR simple single Position Frequency Matrix
- jaspar : JASPAR multiple PFM format
- transfac : TRANSFAC like files
"""
fmt = fmt.lower()
if fmt in ("pfm", "jaspar"):
from Bio.motifs import jaspar
return jaspar.write(motifs, fmt)
elif fmt == "transfac":
from Bio.motifs import transfac
return transfac.write(motifs)
elif fmt == "clusterbuster":
from Bio.motifs import clusterbuster
return clusterbuster.write(motifs)
else:
raise ValueError("Unknown format type %s" % fmt) |
Read motif(s) from a file in one of several different JASPAR formats.
Return the record of PFM(s).
Call the appropriate routine based on the format passed. | def read(handle, format):
"""Read motif(s) from a file in one of several different JASPAR formats.
Return the record of PFM(s).
Call the appropriate routine based on the format passed.
"""
format = format.lower()
if format == "pfm":
record = _read_pfm(handle)
return record
elif format == "sites":
record = _read_sites(handle)
return record
elif format == "jaspar":
record = _read_jaspar(handle)
return record
else:
raise ValueError("Unknown JASPAR format %s" % format) |
Return the representation of motifs in "pfm" or "jaspar" format. | def write(motifs, format):
"""Return the representation of motifs in "pfm" or "jaspar" format."""
letters = "ACGT"
lines = []
if format == "pfm":
motif = motifs[0]
counts = motif.counts
for letter in letters:
terms = [f"{value:6.2f}" for value in counts[letter]]
line = f"{' '.join(terms)}\n"
lines.append(line)
elif format == "jaspar":
for m in motifs:
counts = m.counts
try:
matrix_id = m.matrix_id
except AttributeError:
matrix_id = None
line = f">{matrix_id} {m.name}\n"
lines.append(line)
for letter in letters:
terms = [f"{value:6.2f}" for value in counts[letter]]
line = f"{letter} [{' '.join(terms)}]\n"
lines.append(line)
else:
raise ValueError("Unknown JASPAR format %s" % format)
# Finished; glue the lines together
text = "".join(lines)
return text |
Read the motif from a JASPAR .pfm file (PRIVATE). | def _read_pfm(handle):
"""Read the motif from a JASPAR .pfm file (PRIVATE)."""
alphabet = "ACGT"
counts = {}
for letter, line in zip(alphabet, handle):
words = line.split()
# if there is a letter in the beginning, ignore it
if words[0] == letter:
words = words[1:]
counts[letter] = [float(x) for x in words]
motif = Motif(matrix_id=None, name=None, alphabet=alphabet, counts=counts)
motif.mask = "*" * motif.length
record = Record()
record.append(motif)
return record |
Read the motif from JASPAR .sites file (PRIVATE). | def _read_sites(handle):
"""Read the motif from JASPAR .sites file (PRIVATE)."""
alphabet = "ACGT"
instances = []
for line in handle:
if not line.startswith(">"):
break
# line contains the header ">...."
# now read the actual sequence
line = next(handle)
instance = ""
for c in line.strip():
if c.isupper():
instance += c
instance = Seq(instance)
instances.append(instance)
alignment = Align.Alignment(instances)
motif = Motif(matrix_id=None, name=None, alphabet=alphabet, alignment=alignment)
motif.mask = "*" * motif.length
record = Record()
record.append(motif)
return record |
Read motifs from a JASPAR formatted file (PRIVATE).
Format is one or more records of the form, e.g.::
- JASPAR 2010 matrix_only format::
>MA0001.1 AGL3
A [ 0 3 79 40 66 48 65 11 65 0 ]
C [94 75 4 3 1 2 5 2 3 3 ]
G [ 1 0 3 4 1 0 5 3 28 88 ]
T [ 2 19 11 50 29 47 22 81 1 6 ]
- JASPAR 2010-2014 PFMs format::
>MA0001.1 AGL3
0 3 79 40 66 48 65 11 65 0
94 75 4 3 1 2 5 2 3 3
1 0 3 4 1 0 5 3 28 88
2 19 11 50 29 47 22 81 1 6 | def _read_jaspar(handle):
"""Read motifs from a JASPAR formatted file (PRIVATE).
Format is one or more records of the form, e.g.::
- JASPAR 2010 matrix_only format::
>MA0001.1 AGL3
A [ 0 3 79 40 66 48 65 11 65 0 ]
C [94 75 4 3 1 2 5 2 3 3 ]
G [ 1 0 3 4 1 0 5 3 28 88 ]
T [ 2 19 11 50 29 47 22 81 1 6 ]
- JASPAR 2010-2014 PFMs format::
>MA0001.1 AGL3
0 3 79 40 66 48 65 11 65 0
94 75 4 3 1 2 5 2 3 3
1 0 3 4 1 0 5 3 28 88
2 19 11 50 29 47 22 81 1 6
"""
alphabet = "ACGT"
counts = {}
record = Record()
head_pat = re.compile(r"^>\s*(\S+)(\s+(\S+))?")
row_pat_long = re.compile(r"\s*([ACGT])\s*\[\s*(.*)\s*\]")
row_pat_short = re.compile(r"\s*(.+)\s*")
identifier = None
name = None
row_count = 0
nucleotides = ["A", "C", "G", "T"]
for line in handle:
line = line.strip()
head_match = head_pat.match(line)
row_match_long = row_pat_long.match(line)
row_match_short = row_pat_short.match(line)
if head_match:
identifier = head_match.group(1)
if head_match.group(3):
name = head_match.group(3)
else:
name = identifier
elif row_match_long:
(letter, counts_str) = row_match_long.group(1, 2)
words = counts_str.split()
counts[letter] = [float(x) for x in words]
row_count += 1
if row_count == 4:
record.append(Motif(identifier, name, alphabet=alphabet, counts=counts))
identifier = None
name = None
counts = {}
row_count = 0
elif row_match_short:
words = row_match_short.group(1).split()
counts[nucleotides[row_count]] = [float(x) for x in words]
row_count += 1
if row_count == 4:
record.append(Motif(identifier, name, alphabet=alphabet, counts=counts))
identifier = None
name = None
counts = {}
row_count = 0
return record |
Calculate pseudocounts.
Computes the root square of the total number of sequences multiplied by
the background nucleotide. | def calculate_pseudocounts(motif):
"""Calculate pseudocounts.
Computes the root square of the total number of sequences multiplied by
the background nucleotide.
"""
alphabet = motif.alphabet
background = motif.background
# It is possible to have unequal column sums so use the average
# number of instances.
total = 0
for i in range(motif.length):
total += sum(motif.counts[letter][i] for letter in alphabet)
avg_nb_instances = total / motif.length
sq_nb_instances = math.sqrt(avg_nb_instances)
if background:
background = dict(background)
else:
background = dict.fromkeys(sorted(alphabet), 1.0)
total = sum(background.values())
pseudocounts = {}
for letter in alphabet:
background[letter] /= total
pseudocounts[letter] = sq_nb_instances * background[letter]
return pseudocounts |
Split a JASPAR matrix ID into its component.
Components are base ID and version number, e.g. 'MA0047.2' is returned as
('MA0047', 2). | def split_jaspar_id(id):
"""Split a JASPAR matrix ID into its component.
Components are base ID and version number, e.g. 'MA0047.2' is returned as
('MA0047', 2).
"""
id_split = id.split(".")
base_id = None
version = None
if len(id_split) == 2:
base_id = id_split[0]
version = id_split[1]
else:
base_id = id
return (base_id, version) |
Return a taxon identifier according to NEXUS standard.
Wrap quotes around names with punctuation or whitespace, and double
single quotes.
mrbayes=True: write names without quotes, whitespace or punctuation
for the mrbayes software package. | def safename(name, mrbayes=False):
"""Return a taxon identifier according to NEXUS standard.
Wrap quotes around names with punctuation or whitespace, and double
single quotes.
mrbayes=True: write names without quotes, whitespace or punctuation
for the mrbayes software package.
"""
if mrbayes:
safe = name.replace(" ", "_")
safe = "".join(c for c in safe if c in MRBAYESSAFE)
else:
safe = name.replace("'", "''")
if set(safe).intersection(set(WHITESPACE + PUNCTUATION)):
safe = "'" + safe + "'"
return safe |
Remove quotes and/or double quotes around identifiers. | def quotestrip(word):
"""Remove quotes and/or double quotes around identifiers."""
if not word:
return None
while (word.startswith("'") and word.endswith("'")) or (
word.startswith('"') and word.endswith('"')
):
word = word[1:-1]
return word |
Return position of first and last character which is not in skiplist.
Skiplist defaults to ['-','?']. | def get_start_end(sequence, skiplist=("-", "?")):
"""Return position of first and last character which is not in skiplist.
Skiplist defaults to ['-','?'].
"""
length = len(sequence)
if length == 0:
return None, None
end = length - 1
while end >= 0 and (sequence[end] in skiplist):
end -= 1
start = 0
while start < length and (sequence[start] in skiplist):
start += 1
if start == length and end == -1: # empty sequence
return -1, -1
else:
return start, end |
Return a sorted list of keys of p sorted by values of p (PRIVATE). | def _sort_keys_by_values(p):
"""Return a sorted list of keys of p sorted by values of p (PRIVATE)."""
return sorted((pn for pn in p if p[pn]), key=lambda pn: p[pn]) |
Check all values in list are unique and return a pruned and sorted list (PRIVATE). | def _make_unique(values):
"""Check all values in list are unique and return a pruned and sorted list (PRIVATE)."""
return sorted(set(values)) |
Return a unique name if label is already in previous_labels (PRIVATE). | def _unique_label(previous_labels, label):
"""Return a unique name if label is already in previous_labels (PRIVATE)."""
while label in previous_labels:
label_split = label.split(".")
if label_split[-1].startswith("copy"):
copy_num = 1
if label_split[-1] != "copy":
copy_num = int(label_split[-1][4:]) + 1
new_label = f"{'.'.join(label_split[:-1])}.copy{copy_num}"
label = new_label
else:
label += ".copy"
return label |
Convert a Seq-object matrix to a plain sequence-string matrix (PRIVATE). | def _seqmatrix2strmatrix(matrix):
"""Convert a Seq-object matrix to a plain sequence-string matrix (PRIVATE)."""
return {t: str(matrix[t]) for t in matrix} |
Compact lists for Nexus output (PRIVATE).
Example
-------
>>> _compact4nexus([1, 2, 3, 5, 6, 7, 8, 12, 15, 18, 20])
'2-4 6-9 13-19\\3 21'
Transform [1 2 3 5 6 7 8 12 15 18 20] (baseindex 0, used in the Nexus class)
into '2-4 6-9 13-19\\3 21' (baseindex 1, used in programs like Paup or MrBayes.). | def _compact4nexus(orig_list):
r"""Compact lists for Nexus output (PRIVATE).
Example
-------
>>> _compact4nexus([1, 2, 3, 5, 6, 7, 8, 12, 15, 18, 20])
'2-4 6-9 13-19\\3 21'
Transform [1 2 3 5 6 7 8 12 15 18 20] (baseindex 0, used in the Nexus class)
into '2-4 6-9 13-19\\3 21' (baseindex 1, used in programs like Paup or MrBayes.).
"""
if not orig_list:
return ""
orig_list = sorted(set(orig_list))
shortlist = []
clist = orig_list[:]
clist.append(clist[-1] + 0.5) # dummy value makes it easier
while len(clist) > 1:
step = 1
for i, x in enumerate(clist):
if x == clist[0] + i * step: # are we still in the right step?
continue
elif i == 1 and len(clist) > 3 and clist[i + 1] - x == x - clist[0]:
# second element, and possibly at least 3 elements to link,
# and the next one is in the right step
step = x - clist[0]
else: # pattern broke, add all values before current position to new list
sub = clist[:i]
if len(sub) == 1:
shortlist.append(str(sub[0] + 1))
else:
if step == 1:
shortlist.append("%d-%d" % (sub[0] + 1, sub[-1] + 1))
else:
shortlist.append("%d-%d\\%d" % (sub[0] + 1, sub[-1] + 1, step))
clist = clist[i:]
break
return " ".join(shortlist) |
Combine matrices in [(name,nexus-instance),...] and return new nexus instance.
combined_matrix=combine([(name1,nexus_instance1),(name2,nexus_instance2),...]
Character sets, character partitions and taxon sets are prefixed, readjusted
and present in the combined matrix. | def combine(matrices):
"""Combine matrices in [(name,nexus-instance),...] and return new nexus instance.
combined_matrix=combine([(name1,nexus_instance1),(name2,nexus_instance2),...]
Character sets, character partitions and taxon sets are prefixed, readjusted
and present in the combined matrix.
"""
if not matrices:
return None
name = matrices[0][0]
combined = copy.deepcopy(matrices[0][1]) # initiate with copy of first matrix
mixed_datatypes = len({n[1].datatype for n in matrices}) > 1
if mixed_datatypes:
# dealing with mixed matrices is application specific.
# You take care of that yourself!
combined.datatype = "None"
# raise NexusError('Matrices must be of same datatype')
combined.charlabels = None
combined.statelabels = None
combined.interleave = False
combined.translate = None
# rename taxon sets and character sets and name them with prefix
for cn, cs in combined.charsets.items():
combined.charsets[f"{name}.{cn}"] = cs
del combined.charsets[cn]
for tn, ts in combined.taxsets.items():
combined.taxsets[f"{name}.{tn}"] = ts
del combined.taxsets[tn]
# previous partitions usually don't make much sense in combined matrix
# just initiate one new partition parted by single matrices
combined.charpartitions = {"combined": {name: list(range(combined.nchar))}}
for n, m in matrices[1:]: # add all other matrices
both = [t for t in combined.taxlabels if t in m.taxlabels]
combined_only = [t for t in combined.taxlabels if t not in both]
m_only = [t for t in m.taxlabels if t not in both]
for t in both:
# concatenate sequences and unify gap and missing character symbols
combined.matrix[t] += Seq(
str(m.matrix[t])
.replace(m.gap, combined.gap)
.replace(m.missing, combined.missing)
)
# replace date of missing taxa with symbol for missing data
for t in combined_only:
combined.matrix[t] += Seq(combined.missing * m.nchar)
for t in m_only:
combined.matrix[t] = Seq(combined.missing * combined.nchar) + Seq(
str(m.matrix[t])
.replace(m.gap, combined.gap)
.replace(m.missing, combined.missing)
)
combined.taxlabels.extend(m_only) # new taxon list
for cn, cs in m.charsets.items(): # adjust character sets for new matrix
combined.charsets[f"{n}.{cn}"] = [x + combined.nchar for x in cs]
if m.taxsets:
if not combined.taxsets:
combined.taxsets = {}
# update taxon sets
combined.taxsets.update({f"{n}.{tn}": ts for tn, ts in m.taxsets.items()})
# update new charpartition
combined.charpartitions["combined"][n] = list(
range(combined.nchar, combined.nchar + m.nchar)
)
# update charlabels
if m.charlabels:
if not combined.charlabels:
combined.charlabels = {}
combined.charlabels.update(
{combined.nchar + i: label for i, label in m.charlabels.items()}
)
combined.nchar += m.nchar # update nchar and ntax
combined.ntax += len(m_only)
# some prefer partitions, some charsets:
# make separate charset for ecah initial dataset
for c in combined.charpartitions["combined"]:
combined.charsets[c] = combined.charpartitions["combined"][c]
return combined |
Delete []-delimited comments out of a file and break into lines separated by ';' (PRIVATE).
stripped_text=_kill_comments_and_break_lines(text):
Nested and multiline comments are allowed. [ and ] symbols within single
or double quotes are ignored, newline ends a quote, all symbols with quotes are
treated the same (thus not quoting inside comments like [this character ']' ends a comment])
Special [&...] and [\...] comments remain untouched, if not inside standard comment.
Quotes inside special [& and [\ are treated as normal characters,
but no nesting inside these special comments allowed (like [& [\ ]]).
';' is deleted from end of line.
NOTE: this function is very slow for large files, and obsolete when using C extension cnexus | def _kill_comments_and_break_lines(text):
r"""Delete []-delimited comments out of a file and break into lines separated by ';' (PRIVATE).
stripped_text=_kill_comments_and_break_lines(text):
Nested and multiline comments are allowed. [ and ] symbols within single
or double quotes are ignored, newline ends a quote, all symbols with quotes are
treated the same (thus not quoting inside comments like [this character ']' ends a comment])
Special [&...] and [\...] comments remain untouched, if not inside standard comment.
Quotes inside special [& and [\ are treated as normal characters,
but no nesting inside these special comments allowed (like [& [\ ]]).
';' is deleted from end of line.
NOTE: this function is very slow for large files, and obsolete when using C extension cnexus
"""
if not text:
return ""
contents = iter(text)
newtext = []
newline = []
quotelevel = ""
speciallevel = False
commlevel = 0
# Parse with one character look ahead (for special comments)
t2 = next(contents)
while True:
t = t2
try:
t2 = next(contents)
except StopIteration:
t2 = None
if t is None:
break
if t == quotelevel and not (commlevel or speciallevel):
# matching quote ends quotation
quotelevel = ""
elif (
not quotelevel
and not (commlevel or speciallevel)
and (t == '"' or t == "'")
):
# single or double quote starts quotation
quotelevel = t
elif not quotelevel and t == "[":
# opening bracket outside a quote
if t2 in SPECIALCOMMENTS and commlevel == 0 and not speciallevel:
speciallevel = True
else:
commlevel += 1
elif not quotelevel and t == "]":
# closing bracket ioutside a quote
if speciallevel:
speciallevel = False
else:
commlevel -= 1
if commlevel < 0:
raise NexusError("Nexus formatting error: unmatched ]")
continue
if commlevel == 0:
# copy if we're not in comment
if t == ";" and not quotelevel:
newtext.append("".join(newline))
newline = []
else:
newline.append(t)
# level of comments should be 0 at the end of the file
if newline:
newtext.append("\n".join(newline))
if commlevel > 0:
raise NexusError("Nexus formatting error: unmatched [")
return newtext |
Adjust linebreaks to match ';', strip leading/trailing whitespace (PRIVATE).
list_of_commandlines=_adjust_lines(input_text)
Lines are adjusted so that no linebreaks occur within a commandline
(except matrix command line) | def _adjust_lines(lines):
"""Adjust linebreaks to match ';', strip leading/trailing whitespace (PRIVATE).
list_of_commandlines=_adjust_lines(input_text)
Lines are adjusted so that no linebreaks occur within a commandline
(except matrix command line)
"""
formatted_lines = []
for line in lines:
# Convert line endings
line = line.replace("\r\n", "\n").replace("\r", "\n").strip()
if line.lower().startswith("matrix"):
formatted_lines.append(line)
else:
line = line.replace("\n", " ")
if line:
formatted_lines.append(line)
return formatted_lines |
Replace ambigs in xxx(ACG)xxx format by IUPAC ambiguity code (PRIVATE). | def _replace_parenthesized_ambigs(seq, rev_ambig_values):
"""Replace ambigs in xxx(ACG)xxx format by IUPAC ambiguity code (PRIVATE)."""
opening = seq.find("(")
while opening > -1:
closing = seq.find(")")
if closing < 0:
raise NexusError("Missing closing parenthesis in: " + seq)
elif closing < opening:
raise NexusError("Missing opening parenthesis in: " + seq)
ambig = "".join(sorted(seq[opening + 1 : closing]))
ambig_code = rev_ambig_values[ambig.upper()]
if ambig != ambig.upper():
ambig_code = ambig_code.lower()
seq = seq[:opening] + ambig_code + seq[closing + 1 :]
opening = seq.find("(")
return seq |
Compute a majority rule consensus tree of all clades with relative frequency>=threshold from a list of trees. | def consensus(trees, threshold=0.5, outgroup=None):
"""Compute a majority rule consensus tree of all clades with relative frequency>=threshold from a list of trees."""
total = len(trees)
if total == 0:
return None
# shouldn't we make sure that it's NodeData or subclass??
dataclass = trees[0].dataclass
max_support = trees[0].max_support
clades = {}
# countclades={}
alltaxa = set(trees[0].get_taxa())
# calculate calde frequencies
for t in trees:
if alltaxa != set(t.get_taxa()):
raise TreeError("Trees for consensus must contain the same taxa")
t.root_with_outgroup(outgroup=outgroup)
for st_node in t._walk(t.root):
subclade_taxa = sorted(t.get_taxa(st_node))
subclade_taxa = str(subclade_taxa) # lists are not hashable
if subclade_taxa in clades:
clades[subclade_taxa] += t.weight / total
else:
clades[subclade_taxa] = t.weight / total
# if subclade_taxa in countclades:
# countclades[subclade_taxa]+=t.weight
# else:
# countclades[subclade_taxa]=t.weight
# weed out clades below threshold
delclades = [
c for c, p in clades.items() if round(p, 3) < threshold
] # round can be necessary
for c in delclades:
del clades[c]
# create a tree with a root node
consensus = Tree(name=f"consensus_{float(threshold):2.1f}", data=dataclass)
# each clade needs a node in the new tree, add them as isolated nodes
for c, s in clades.items():
node = Nodes.Node(data=dataclass())
node.data.support = s
node.data.taxon = set(eval(c))
consensus.add(node)
# set root node data
consensus.node(consensus.root).data.support = None
consensus.node(consensus.root).data.taxon = alltaxa
# we sort the nodes by no. of taxa in the clade, so root will be the last
consensus_ids = consensus.all_ids()
consensus_ids.sort(key=lambda x: len(consensus.node(x).data.taxon))
# now we just have to hook each node to the next smallest node that includes all taxa of the current
for i, current in enumerate(
consensus_ids[:-1]
): # skip the last one which is the root
# print('----')
# print('current: %s' % consensus.node(current).data.taxon)
# search remaining nodes
for parent in consensus_ids[i + 1 :]:
# print('parent: %s' % consensus.node(parent).data.taxon)
if consensus.node(parent).data.taxon.issuperset(
consensus.node(current).data.taxon
):
break
else:
sys.exit("corrupt tree structure?")
# internal nodes don't have taxa
if len(consensus.node(current).data.taxon) == 1:
consensus.node(current).data.taxon = consensus.node(
current
).data.taxon.pop()
# reset the support for terminal nodes to maximum
# consensus.node(current).data.support=max_support
else:
consensus.node(current).data.taxon = None
consensus.link(parent, current)
# eliminate root taxon name
consensus.node(consensus_ids[-1]).data.taxon = None
if alltaxa != set(consensus.get_taxa()):
raise TreeError("FATAL ERROR: consensus tree is corrupt")
return consensus |
Predict the i->j NOE position based on self peak (diagonal) assignments.
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE. | def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments.
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if (str(toResNum) in peaklist.residue_dict(detectedNuc)) and (
str(originResNum) in peaklist.residue_dict(detectedNuc)
):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine |
Replace an entry in a string by the field number.
No padding is implemented currently. Spacing will change if
the original field entry and the new field entry are of
different lengths. | def replace_entry(line, fieldn, newentry):
"""Replace an entry in a string by the field number.
No padding is implemented currently. Spacing will change if
the original field entry and the new field entry are of
different lengths.
"""
# This method depends on xpktools._find_start_entry
start = _find_start_entry(line, fieldn)
leng = len(line[start:].split()[0])
newline = line[:start] + str(newentry) + line[(start + leng) :]
return newline |
Find the starting character for entry ``n`` in a space delimited ``line`` (PRIVATE).
n is counted starting with 1.
The n=1 field by definition begins at the first character.
Returns
-------
starting character : str
The starting character for entry ``n``. | def _find_start_entry(line, n):
"""Find the starting character for entry ``n`` in a space delimited ``line`` (PRIVATE).
n is counted starting with 1.
The n=1 field by definition begins at the first character.
Returns
-------
starting character : str
The starting character for entry ``n``.
"""
# This function is used by replace_entry
if n == 1:
return 0 # Special case
# Count the number of fields by counting spaces
c = 1
leng = len(line)
# Initialize variables according to whether the first character
# is a space or a character
if line[0] == " ":
infield = False
field = 0
else:
infield = True
field = 1
while c < leng and field < n:
if infield:
if line[c] == " " and line[c - 1] != " ":
infield = False
else:
if line[c] != " ":
infield = True
field += 1
c += 1
return c - 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.