response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Generate a data table from a list of input xpk files.
Parameters
----------
fn_list : list
List of .xpk file names.
datalabel : str
The data element reported.
keyatom : str
The name of the nucleus used as an index for the data table.
Returns
-------
outlist : list
List of table rows indexed by ``keyatom``. | def data_table(fn_list, datalabel, keyatom):
"""Generate a data table from a list of input xpk files.
Parameters
----------
fn_list : list
List of .xpk file names.
datalabel : str
The data element reported.
keyatom : str
The name of the nucleus used as an index for the data table.
Returns
-------
outlist : list
List of table rows indexed by ``keyatom``.
"""
# TODO - Clarify this docstring, add an example?
outlist = []
dict_list, label_line_list = _read_dicts(fn_list, keyatom)
# Find global max and min residue numbers
minr = dict_list[0]["minres"]
maxr = dict_list[0]["maxres"]
for dictionary in dict_list:
if maxr < dictionary["maxres"]:
maxr = dictionary["maxres"]
if minr > dictionary["minres"]:
minr = dictionary["minres"]
res = minr
while res <= maxr: # s.t. res numbers
count = 0
key = str(res)
line = key
for dictionary in dict_list: # s.t. dictionaries
label = label_line_list[count]
if key in dictionary:
line = (
line + "\t" + XpkEntry(dictionary[key][0], label).fields[datalabel]
)
else:
line += "\t*"
count += 1
line += "\n"
outlist.append(line)
res += 1
return outlist |
Read multiple files into a list of residue dictionaries (PRIVATE). | def _read_dicts(fn_list, keyatom):
"""Read multiple files into a list of residue dictionaries (PRIVATE)."""
dict_list = []
datalabel_list = []
for fn in fn_list:
peaklist = Peaklist(fn)
dictionary = peaklist.residue_dict(keyatom)
dict_list.append(dictionary)
datalabel_list.append(peaklist.datalabels)
return [dict_list, datalabel_list] |
Depth first search of g.
Returns a list of all nodes that can be reached from the root node
in depth-first order.
If root is not given, the search will be rooted at an arbitrary node. | def df_search(graph, root=None):
"""Depth first search of g.
Returns a list of all nodes that can be reached from the root node
in depth-first order.
If root is not given, the search will be rooted at an arbitrary node.
"""
seen = {}
search = []
if len(graph.nodes()) < 1:
return search
if root is None:
root = (graph.nodes())[0]
seen[root] = 1
search.append(root)
current = graph.children(root)
while len(current) > 0:
node = current[0]
current = current[1:]
if node not in seen:
search.append(node)
seen[node] = 1
current = graph.children(node) + current
return search |
Breadth first search of g.
Returns a list of all nodes that can be reached from the root node
in breadth-first order.
If root is not given, the search will be rooted at an arbitrary node. | def bf_search(graph, root=None):
"""Breadth first search of g.
Returns a list of all nodes that can be reached from the root node
in breadth-first order.
If root is not given, the search will be rooted at an arbitrary node.
"""
seen = {}
search = []
if len(graph.nodes()) < 1:
return search
if root is None:
root = (graph.nodes())[0]
seen[root] = 1
search.append(root)
current = graph.children(root)
while len(current) > 0:
node = current[0]
current = current[1:]
if node not in seen:
search.append(node)
seen[node] = 1
current.extend(graph.children(node))
return search |
Get all AlphaFold predictions for a UniProt accession.
:param qualifier: A UniProt accession, e.g. P00520
:type qualifier: str
:return: The AlphaFold predictions
:rtype: Iterator[dict] | def get_predictions(qualifier: str) -> Iterator[dict]:
"""Get all AlphaFold predictions for a UniProt accession.
:param qualifier: A UniProt accession, e.g. P00520
:type qualifier: str
:return: The AlphaFold predictions
:rtype: Iterator[dict]
"""
url = f"https://alphafold.com/api/prediction/{qualifier}"
# Retrieve the AlphaFold predictions with urllib
with urlopen(url) as response:
yield from json.loads(response.read().decode()) |
Get the path to the mmCIF file for an AlphaFold prediction.
:param prediction: An AlphaFold prediction
:type prediction: dict
:param directory: The directory that stores the mmCIF file, defaults to the current working directory
:type directory: Union[int, str, bytes, PathLike], optional
:return: The path to the mmCIF file
:rtype: str | def _get_mmcif_file_path_for(
prediction: dict, directory: Optional[Union[str, bytes, PathLike]] = None
) -> str:
"""Get the path to the mmCIF file for an AlphaFold prediction.
:param prediction: An AlphaFold prediction
:type prediction: dict
:param directory: The directory that stores the mmCIF file, defaults to the current working directory
:type directory: Union[int, str, bytes, PathLike], optional
:return: The path to the mmCIF file
:rtype: str
"""
if directory is None:
directory = os.getcwd()
cif_url = prediction["cifUrl"]
# Get the file name from the URL
file_name = cif_url.split("/")[-1]
return str(os.path.join(directory, file_name)) |
Download the mmCIF file for an AlphaFold prediction.
Downloads the file to the current working directory if no destination is specified.
:param prediction: An AlphaFold prediction
:type prediction: dict
:param directory: The directory to write the mmCIF data to, defaults to the current working directory
:type directory: Union[int, str, bytes, PathLike], optional
:return: The path to the mmCIF file
:rtype: str | def download_cif_for(
prediction: dict, directory: Optional[Union[str, bytes, PathLike]] = None
) -> str:
"""Download the mmCIF file for an AlphaFold prediction.
Downloads the file to the current working directory if no destination is specified.
:param prediction: An AlphaFold prediction
:type prediction: dict
:param directory: The directory to write the mmCIF data to, defaults to the current working directory
:type directory: Union[int, str, bytes, PathLike], optional
:return: The path to the mmCIF file
:rtype: str
"""
if directory is None:
directory = os.getcwd()
cif_url = prediction["cifUrl"]
# Create the directory in case it does not exist
os.makedirs(directory, exist_ok=True)
file_path = _get_mmcif_file_path_for(prediction, directory)
if os.path.exists(file_path):
print(f"File {file_path} already exists, skipping download.")
else:
with urlopen(cif_url) as response:
data = response.read()
# Write the data to destination
with open(file_path, "wb") as file:
file.write(data)
return file_path |
Get the PDB structures for a UniProt accession.
Downloads the mmCIF files to the directory if they are not present.
:param qualifier: A UniProt accession, e.g. P00520
:type qualifier: str
:param mmcif_parser: The mmCIF parser to use, defaults to ``MMCIFParser()``
:type mmcif_parser: MMCIFParser, optional
:param directory: The directory to store the mmCIF data, defaults to the current working directory
:type directory: Union[int, str, bytes, PathLike], optional
:return: An iterator over the PDB structures
:rtype: Iterator[PDBStructure] | def get_structural_models_for(
qualifier: str,
mmcif_parser: Optional[MMCIFParser] = None,
directory: Optional[Union[str, bytes, PathLike]] = None,
) -> Iterator[StructuralModel]:
"""Get the PDB structures for a UniProt accession.
Downloads the mmCIF files to the directory if they are not present.
:param qualifier: A UniProt accession, e.g. P00520
:type qualifier: str
:param mmcif_parser: The mmCIF parser to use, defaults to ``MMCIFParser()``
:type mmcif_parser: MMCIFParser, optional
:param directory: The directory to store the mmCIF data, defaults to the current working directory
:type directory: Union[int, str, bytes, PathLike], optional
:return: An iterator over the PDB structures
:rtype: Iterator[PDBStructure]
"""
if mmcif_parser is None:
mmcif_parser = MMCIFParser()
if directory is None:
directory = os.getcwd()
for prediction in get_predictions(qualifier):
mmcif_path = _get_mmcif_file_path_for(prediction, directory)
if not os.path.exists(mmcif_path):
mmcif_path = download_cif_for(prediction, directory)
yield mmcif_parser.get_structure(qualifier, mmcif_path) |
Write out selected portion to filename. | def extract(structure, chain_id, start, end, filename):
"""Write out selected portion to filename."""
sel = ChainSelector(chain_id, start, end)
io = PDBIO()
io.set_structure(structure)
io.save(filename, sel) |
Parse semantic version scheme for easy comparison. | def version(version_string):
"""Parse semantic version scheme for easy comparison."""
return tuple(map(int, (version_string.split(".")))) |
Secondary structure symbol to index.
H=0
E=1
C=2 | def ss_to_index(ss):
"""Secondary structure symbol to index.
H=0
E=1
C=2
"""
if ss == "H":
return 0
if ss == "E":
return 1
if ss == "C":
return 2
assert 0 |
Create a DSSP dictionary from a PDB file.
Parameters
----------
in_file : string
pdb file
DSSP : string
DSSP executable (argument to subprocess)
dssp_version : string
Version of DSSP executable
Returns
-------
(out_dict, keys) : tuple
a dictionary that maps (chainid, resid) to
amino acid type, secondary structure code and
accessibility.
Examples
--------
How dssp_dict_from_pdb_file could be used::
from Bio.PDB.DSSP import dssp_dict_from_pdb_file
dssp_tuple = dssp_dict_from_pdb_file("/local-pdb/1fat.pdb")
dssp_dict = dssp_tuple[0]
print(dssp_dict['A',(' ', 1, ' ')]) | def dssp_dict_from_pdb_file(in_file, DSSP="dssp", dssp_version="3.9.9"):
"""Create a DSSP dictionary from a PDB file.
Parameters
----------
in_file : string
pdb file
DSSP : string
DSSP executable (argument to subprocess)
dssp_version : string
Version of DSSP executable
Returns
-------
(out_dict, keys) : tuple
a dictionary that maps (chainid, resid) to
amino acid type, secondary structure code and
accessibility.
Examples
--------
How dssp_dict_from_pdb_file could be used::
from Bio.PDB.DSSP import dssp_dict_from_pdb_file
dssp_tuple = dssp_dict_from_pdb_file("/local-pdb/1fat.pdb")
dssp_dict = dssp_tuple[0]
print(dssp_dict['A',(' ', 1, ' ')])
"""
# Using universal newlines is important on Python 3, this
# gives text handles rather than bytes handles.
# Newer version of DSSP executable is named 'mkdssp',
# and calling 'dssp' will hence not work in some operating systems
# (Debian distribution of DSSP includes a symlink for 'dssp' argument)
try:
if version(dssp_version) < version("4.0.0"):
DSSP_cmd = [DSSP, in_file]
else:
DSSP_cmd = [DSSP, "--output-format=dssp", in_file]
p = subprocess.Popen(
DSSP_cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except FileNotFoundError:
if DSSP == "mkdssp":
raise
if version(dssp_version) < version("4.0.0"):
DSSP_cmd = ["mkdssp", in_file]
else:
DSSP_cmd = ["mkdssp", "--output-format=dssp", in_file]
p = subprocess.Popen(
DSSP_cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
# Alert user for errors
if err.strip():
warnings.warn(err)
if not out.strip():
raise Exception("DSSP failed to produce an output")
out_dict, keys = _make_dssp_dict(StringIO(out))
return out_dict, keys |
DSSP dictionary mapping identifiers to properties.
Return a DSSP dictionary that maps (chainid, resid) to
aa, ss and accessibility, from a DSSP file.
Parameters
----------
filename : string
the DSSP output file | def make_dssp_dict(filename):
"""DSSP dictionary mapping identifiers to properties.
Return a DSSP dictionary that maps (chainid, resid) to
aa, ss and accessibility, from a DSSP file.
Parameters
----------
filename : string
the DSSP output file
"""
with open(filename) as handle:
return _make_dssp_dict(handle) |
Return a DSSP dictionary, used by mask_dssp_dict (PRIVATE).
DSSP dictionary maps (chainid, resid) to an amino acid,
secondary structure symbol, solvent accessibility value, and hydrogen bond
information (relative dssp indices and hydrogen bond energies) from an open
DSSP file object.
Parameters
----------
handle : file
the open DSSP output file handle | def _make_dssp_dict(handle):
"""Return a DSSP dictionary, used by mask_dssp_dict (PRIVATE).
DSSP dictionary maps (chainid, resid) to an amino acid,
secondary structure symbol, solvent accessibility value, and hydrogen bond
information (relative dssp indices and hydrogen bond energies) from an open
DSSP file object.
Parameters
----------
handle : file
the open DSSP output file handle
"""
dssp = {}
start = 0
keys = []
for line in handle:
sl = line.split()
if len(sl) < 2:
continue
if sl[1] == "RESIDUE":
# Start parsing from here
start = 1
continue
if not start:
continue
if line[9] == " ":
# Skip -- missing residue
continue
dssp_index = int(line[:5])
resseq = int(line[5:10])
icode = line[10]
chainid = line[11]
aa = line[13]
ss = line[16]
if ss == " ":
ss = "-"
try:
NH_O_1_relidx = int(line[38:45])
NH_O_1_energy = float(line[46:50])
O_NH_1_relidx = int(line[50:56])
O_NH_1_energy = float(line[57:61])
NH_O_2_relidx = int(line[61:67])
NH_O_2_energy = float(line[68:72])
O_NH_2_relidx = int(line[72:78])
O_NH_2_energy = float(line[79:83])
acc = int(line[34:38])
phi = float(line[103:109])
psi = float(line[109:115])
except ValueError as exc:
# DSSP output breaks its own format when there are >9999
# residues, since only 4 digits are allocated to the seq num
# field. See 3kic chain T res 321, 1vsy chain T res 6077.
# Here, look for whitespace to figure out the number of extra
# digits, and shift parsing the rest of the line by that amount.
if line[34] != " ":
shift = line[34:].find(" ")
NH_O_1_relidx = int(line[38 + shift : 45 + shift])
NH_O_1_energy = float(line[46 + shift : 50 + shift])
O_NH_1_relidx = int(line[50 + shift : 56 + shift])
O_NH_1_energy = float(line[57 + shift : 61 + shift])
NH_O_2_relidx = int(line[61 + shift : 67 + shift])
NH_O_2_energy = float(line[68 + shift : 72 + shift])
O_NH_2_relidx = int(line[72 + shift : 78 + shift])
O_NH_2_energy = float(line[79 + shift : 83 + shift])
acc = int(line[34 + shift : 38 + shift])
phi = float(line[103 + shift : 109 + shift])
psi = float(line[109 + shift : 115 + shift])
else:
raise ValueError(exc) from None
res_id = (" ", resseq, icode)
dssp[(chainid, res_id)] = (
aa,
ss,
acc,
phi,
psi,
dssp_index,
NH_O_1_relidx,
NH_O_1_energy,
O_NH_1_relidx,
O_NH_1_energy,
NH_O_2_relidx,
NH_O_2_energy,
O_NH_2_relidx,
O_NH_2_energy,
)
keys.append((chainid, res_id))
return dssp, keys |
Read a fragment spec file (PRIVATE).
Read a fragment spec file available from
http://github.com/csblab/fragments/
and return a list of Fragment objects.
:param size: number of fragments in the library
:type size: int
:param length: length of the fragments
:type length: int
:param dir: directory where the fragment spec files can be found
:type dir: string | def _read_fragments(size, length, dir="."):
"""Read a fragment spec file (PRIVATE).
Read a fragment spec file available from
http://github.com/csblab/fragments/
and return a list of Fragment objects.
:param size: number of fragments in the library
:type size: int
:param length: length of the fragments
:type length: int
:param dir: directory where the fragment spec files can be found
:type dir: string
"""
filename = (dir + "/" + _FRAGMENT_FILE) % (size, length)
with open(filename) as fp:
flist = []
# ID of fragment=rank in spec file
fid = 0
for line in fp:
# skip comment and blank lines
if line[0] == "*" or line[0] == "\n":
continue
sl = line.split()
if sl[1] == "------":
# Start of fragment definition
f = Fragment(length, fid)
flist.append(f)
# increase fragment id (rank)
fid += 1
continue
# Add CA coord to Fragment
coord = np.array([float(x) for x in sl[0:3]])
# XXX= dummy residue name
f.add_residue("XXX", coord)
return flist |
Dice up a peptide in fragments of length "length" (PRIVATE).
:param pp: a list of residues (part of one peptide)
:type pp: [L{Residue}, L{Residue}, ...]
:param length: fragment length
:type length: int | def _make_fragment_list(pp, length):
"""Dice up a peptide in fragments of length "length" (PRIVATE).
:param pp: a list of residues (part of one peptide)
:type pp: [L{Residue}, L{Residue}, ...]
:param length: fragment length
:type length: int
"""
frag_list = []
for i in range(len(pp) - length + 1):
f = Fragment(length, -1)
for j in range(length):
residue = pp[i + j]
resname = residue.get_resname()
if residue.has_id("CA"):
ca = residue["CA"]
else:
raise PDBException("CHAINBREAK")
if ca.is_disordered():
raise PDBException("CHAINBREAK")
ca_coord = ca.get_coord()
f.add_residue(resname, ca_coord)
frag_list.append(f)
return frag_list |
Map flist fragments to closest entry in reflist (PRIVATE).
Map all frgaments in flist to the closest (in RMSD) fragment in reflist.
Returns a list of reflist indices.
:param flist: list of protein fragments
:type flist: [L{Fragment}, L{Fragment}, ...]
:param reflist: list of reference (ie. library) fragments
:type reflist: [L{Fragment}, L{Fragment}, ...] | def _map_fragment_list(flist, reflist):
"""Map flist fragments to closest entry in reflist (PRIVATE).
Map all frgaments in flist to the closest (in RMSD) fragment in reflist.
Returns a list of reflist indices.
:param flist: list of protein fragments
:type flist: [L{Fragment}, L{Fragment}, ...]
:param reflist: list of reference (ie. library) fragments
:type reflist: [L{Fragment}, L{Fragment}, ...]
"""
mapped = []
for f in flist:
rank = []
for i in range(len(reflist)):
rf = reflist[i]
rms = f - rf
rank.append((rms, rf))
rank.sort()
fragment = rank[0][1]
mapped.append(fragment)
return mapped |
Test rebuild PDB structure from internal coordinates.
Generates internal coordinates for entity and writes to a .pic file in
memory, then generates XYZ coordinates from the .pic file and compares the
resulting entity against the original.
See :data:`IC_Residue.pic_accuracy` to vary numeric accuracy of the
intermediate .pic file if the only issue is small differences in coordinates.
Note that with default settings, deuterated initial structures will fail
the comparison, as will structures loaded with alternate `IC_Residue.accept_atoms`
settings. Use `quick=True` and/or variations on `AtomKey.d2h` and
`IC_Residue.accept_atoms` settings.
:param Entity entity: Biopython Structure, Model or Chain.
Structure to test
:param bool verbose: default False.
print extra messages
:param bool quick: default False.
only check the internal coords atomArrays are identical
:returns: dict
comparison dict from :func:`.compare_residues` | def structure_rebuild_test(entity, verbose: bool = False, quick: bool = False) -> Dict:
"""Test rebuild PDB structure from internal coordinates.
Generates internal coordinates for entity and writes to a .pic file in
memory, then generates XYZ coordinates from the .pic file and compares the
resulting entity against the original.
See :data:`IC_Residue.pic_accuracy` to vary numeric accuracy of the
intermediate .pic file if the only issue is small differences in coordinates.
Note that with default settings, deuterated initial structures will fail
the comparison, as will structures loaded with alternate `IC_Residue.accept_atoms`
settings. Use `quick=True` and/or variations on `AtomKey.d2h` and
`IC_Residue.accept_atoms` settings.
:param Entity entity: Biopython Structure, Model or Chain.
Structure to test
:param bool verbose: default False.
print extra messages
:param bool quick: default False.
only check the internal coords atomArrays are identical
:returns: dict
comparison dict from :func:`.compare_residues`
"""
sp = StringIO()
entity.atom_to_internal_coordinates(verbose)
write_PIC(entity, sp)
sp.seek(0)
pdb2 = read_PIC(sp, verbose=verbose, quick=quick)
if isinstance(entity, Chain):
pdb2 = next(pdb2.get_chains()) # there's only one, get first
if verbose:
report_IC(pdb2, verbose=True)
pdb2.internal_to_atom_coordinates(verbose)
r = compare_residues(entity, pdb2, verbose=verbose, quick=quick)
return r |
Generate dict with counts of ic data elements for each entity level.
reportDict entries are:
- idcode : PDB ID
- hdr : PDB header lines
- mdl : models
- chn : chains
- res : residue objects
- res_e : residues with dihedra and/or hedra
- dih : dihedra
- hed : hedra
:param Entity entity: Biopython PDB Entity object: S, M, C or R
:raises PDBException: if entity level not S, M, C, or R
:raises Exception: if entity does not have .level attribute
:returns: dict with counts of IC data elements | def report_IC(
entity: Union[Structure, Model, Chain, Residue],
reportDict: Dict[str, Any] = None,
verbose: bool = False,
) -> Dict[str, Any]:
"""Generate dict with counts of ic data elements for each entity level.
reportDict entries are:
- idcode : PDB ID
- hdr : PDB header lines
- mdl : models
- chn : chains
- res : residue objects
- res_e : residues with dihedra and/or hedra
- dih : dihedra
- hed : hedra
:param Entity entity: Biopython PDB Entity object: S, M, C or R
:raises PDBException: if entity level not S, M, C, or R
:raises Exception: if entity does not have .level attribute
:returns: dict with counts of IC data elements
"""
if reportDict is None:
reportDict = {
"idcode": None,
"hdr": 0,
"mdl": 0,
"chn": 0,
"chn_ids": [],
"res": 0,
"res_e": 0,
"dih": 0,
"hed": 0,
}
try:
if "A" == entity.level:
raise PDBException("No IC output at Atom level")
elif isinstance(entity, (DisorderedResidue, Residue)): # "R" == entity.level:
if entity.internal_coord:
reportDict["res"] += 1
dlen = len(entity.internal_coord.dihedra)
hlen = len(entity.internal_coord.hedra)
if 0 < dlen or 0 < hlen:
reportDict["res_e"] += 1
reportDict["dih"] += dlen
reportDict["hed"] += hlen
elif isinstance(entity, Chain): # "C" == entity.level:
reportDict["chn"] += 1
reportDict["chn_ids"].append(entity.id)
for res in entity:
reportDict = report_IC(res, reportDict)
elif isinstance(entity, Model): # "M" == entity.level:
reportDict["mdl"] += 1
for chn in entity:
reportDict = report_IC(chn, reportDict)
elif isinstance(entity, Structure): # "S" == entity.level:
if hasattr(entity, "header"):
if reportDict["idcode"] is None:
reportDict["idcode"] = entity.header.get("idcode", None)
hdr = entity.header.get("head", None)
if hdr:
reportDict["hdr"] += 1
name = entity.header.get("name", None)
if name:
reportDict["hdr"] += 1
for mdl in entity:
reportDict = report_IC(mdl, reportDict)
else:
raise PDBException("Cannot identify level: " + str(entity.level))
except KeyError:
raise Exception(
"write_PIC: argument is not a Biopython PDB Entity " + str(entity)
)
if verbose:
print(
"{} : {} models {} chains {} {} residue objects "
"{} residues with {} dihedra {} hedra".format(
reportDict["idcode"],
reportDict["mdl"],
reportDict["chn"],
reportDict["chn_ids"],
reportDict["res"],
reportDict["res_e"],
reportDict["dih"],
reportDict["hed"],
)
)
return reportDict |
Duplicate structure entity with IC data, no atom coordinates.
Employs :func:`.write_PIC`, :func:`.read_PIC` with StringIO buffer.
Calls :meth:`.Chain.atom_to_internal_coordinates` if needed.
:param Entity entity: Biopython PDB Entity (will fail for Atom)
:returns: Biopython PDBStructure, no Atom objects except initial coords | def IC_duplicate(entity) -> Structure:
"""Duplicate structure entity with IC data, no atom coordinates.
Employs :func:`.write_PIC`, :func:`.read_PIC` with StringIO buffer.
Calls :meth:`.Chain.atom_to_internal_coordinates` if needed.
:param Entity entity: Biopython PDB Entity (will fail for Atom)
:returns: Biopython PDBStructure, no Atom objects except initial coords
"""
sp = StringIO()
hasInternalCoords = False
for res in entity.get_residues():
if res.internal_coord:
if len(res.internal_coord.hedra) > 0:
hasInternalCoords = True
break
if not hasInternalCoords:
if isinstance(entity, Residue): # "R" == entity.level:
# works better at chain level but leave option here
res = entity
if not res.internal_coord:
res.internal_coord = IC_Residue(entity)
res.internal_coord.atom_to_internal_coordinates()
else:
entity.atom_to_internal_coordinates()
write_PIC(entity, sp)
sp.seek(0)
return read_PIC(sp) |
Compare full IDs and atom coordinates for 2 Biopython PDB entities.
Skip DNA and HETATMs.
:param Entity e0,e1: Biopython PDB Entity objects (S, M or C).
Structures, Models or Chains to be compared
:param bool verbose:
Whether to print mismatch info, default False
:param bool quick: default False.
Only check atomArrays are identical, aCoordMatchCount=0 if different
:param float rtol, atol: default 1e-03, 1e-03 or round to 3 places.
NumPy allclose parameters; default is to round atom coordinates to 3
places and test equal. For 'quick' will use defaults above for
comparing atomArrays
:returns dict:
Result counts for Residues, Full ID match Residues, Atoms,
Full ID match atoms, and Coordinate match atoms; report string;
error status (bool) | def compare_residues(
e0: Union[Structure, Model, Chain],
e1: Union[Structure, Model, Chain],
verbose: bool = False,
quick: bool = False,
rtol: float = None,
atol: float = None,
) -> Dict[str, Any]:
"""Compare full IDs and atom coordinates for 2 Biopython PDB entities.
Skip DNA and HETATMs.
:param Entity e0,e1: Biopython PDB Entity objects (S, M or C).
Structures, Models or Chains to be compared
:param bool verbose:
Whether to print mismatch info, default False
:param bool quick: default False.
Only check atomArrays are identical, aCoordMatchCount=0 if different
:param float rtol, atol: default 1e-03, 1e-03 or round to 3 places.
NumPy allclose parameters; default is to round atom coordinates to 3
places and test equal. For 'quick' will use defaults above for
comparing atomArrays
:returns dict:
Result counts for Residues, Full ID match Residues, Atoms,
Full ID match atoms, and Coordinate match atoms; report string;
error status (bool)
"""
cmpdict: Dict[str, Any] = {}
cmpdict["chains"] = [] # list of chain IDs (union over both structures)
cmpdict["residues"] = 0 # count of not HETATM residues in longest chain
cmpdict["rCount"] = 0 # Biopython Residues (includes HETATMs, waters)
cmpdict["rMatchCount"] = 0 # full ID match Biopython Residues e0, e1
cmpdict["rpnMismatchCount"] = 0 # res prev, next links not matched
cmpdict["aCount"] = 0 # Atoms including disordered in longest e0 or e1
cmpdict["disAtmCount"] = 0 # disordered atoms in longest e0 or e1
cmpdict["aCoordMatchCount"] = 0 # atoms with coordinates match e0, e1
cmpdict["aFullIdMatchCount"] = 0 # atoms with full ID match e0, e1
cmpdict["id0"] = e0.get_full_id()
cmpdict["id1"] = e1.get_full_id()
cmpdict["pass"] = None
cmpdict["report"] = None
if quick:
if isinstance(e0, Chain):
if (
e0.internal_coord.atomArray is not None
and np.shape(e0.internal_coord.atomArray)
== np.shape(e1.internal_coord.atomArray)
and np.allclose(
e0.internal_coord.atomArray,
e1.internal_coord.atomArray,
rtol=1e-03 if rtol is None else rtol,
atol=1e-03 if atol is None else atol,
)
):
cmpdict["aCount"] = np.size(e0.internal_coord.atomArray, 0)
cmpdict["aCoordMatchCount"] = np.size(e0.internal_coord.atomArray, 0)
if cmpdict["aCoordMatchCount"] > 0:
cmpdict["pass"] = True
else:
cmpdict["pass"] = False
else:
cmpdict["aCount"] = (
0
if e0.internal_coord.atomArray is None
else np.size(e0.internal_coord.atomArray, 0)
)
cmpdict["pass"] = False
else:
cmpdict["pass"] = True
for c0, c1 in zip_longest(e0.get_chains(), e1.get_chains()):
if c0.internal_coord.atomArray is not None:
if np.allclose(
c0.internal_coord.atomArray,
c1.internal_coord.atomArray,
rtol=1e-03 if rtol is None else rtol,
atol=1e-03 if atol is None else atol,
):
cmpdict["aCoordMatchCount"] += np.size(
c0.internal_coord.atomArray, 0
)
else:
cmpdict["pass"] = False
cmpdict["aCount"] += np.size(c0.internal_coord.atomArray, 0)
if cmpdict["aCoordMatchCount"] < cmpdict["aCount"]:
cmpdict["pass"] = False
else:
for r0, r1 in zip_longest(e0.get_residues(), e1.get_residues()):
if 2 == r0.is_disordered() == r1.is_disordered():
for dr0, dr1 in zip_longest(
r0.child_dict.values(), r1.child_dict.values()
):
_cmp_res(dr0, dr1, verbose, cmpdict, rtol=rtol, atol=atol)
else:
_cmp_res(r0, r1, verbose, cmpdict, rtol=rtol, atol=atol)
if (
cmpdict["rMatchCount"] == cmpdict["rCount"]
and cmpdict["aCoordMatchCount"] == cmpdict["aCount"]
and cmpdict["aFullIdMatchCount"] == cmpdict["aCount"]
and cmpdict["rpnMismatchCount"] == 0
):
cmpdict["pass"] = True
else:
cmpdict["pass"] = False
rstr = (
"{}:{} {} -- {} of {} residue IDs match; {} residues {} atom coords, "
"{} full IDs of {} atoms ({} disordered) match : {}".format(
cmpdict["id0"],
cmpdict["id1"],
cmpdict["chains"],
cmpdict["rMatchCount"],
cmpdict["rCount"],
cmpdict["residues"],
cmpdict["aCoordMatchCount"],
cmpdict["aFullIdMatchCount"],
cmpdict["aCount"],
cmpdict["disAtmCount"],
"ERROR" if not cmpdict["pass"] else "ALL OK",
)
)
if not cmpdict["pass"]:
if cmpdict["rMatchCount"] != cmpdict["rCount"]:
rstr += " -RESIDUE IDS-"
if cmpdict["aCoordMatchCount"] != cmpdict["aFullIdMatchCount"]:
rstr += " -COORDINATES-"
if cmpdict["aFullIdMatchCount"] != cmpdict["aCount"]:
rstr += " -ATOM IDS-"
cmpdict["report"] = rstr
return cmpdict |
Write PDB file with HEADER and TITLE if available. | def write_PDB(
entity: Structure, file: str, pdbid: str = None, chainid: str = None
) -> None:
"""Write PDB file with HEADER and TITLE if available."""
enumerate_atoms(entity)
with as_handle(file, "w") as fp:
try:
if hasattr(entity, "header"):
if not pdbid:
pdbid = entity.header.get("idcode", None)
hdr = entity.header.get("head", None)
dd = pdb_date(entity.header.get("deposition_date", None))
if hdr:
fp.write(
("HEADER {:40}{:8} {:4}\n").format(
hdr.upper(), (dd or ""), (pdbid or "")
)
)
name = entity.header.get("name", None)
if name:
fp.write("TITLE " + name.upper() + "\n")
io = PDBIO()
io.set_structure(entity)
io.save(fp, preserve_atom_numbering=True)
except KeyError:
raise Exception(
"write_PDB: argument is not a Biopython PDB Entity " + str(entity)
) |
Reduce floating point accuracy to 9.5 (xxxx.xxxxx).
Used by :class:`IC_Residue` class writing PIC and SCAD
files.
:param float num: input number
:returns: float with specified accuracy | def set_accuracy_95(num: float) -> float:
"""Reduce floating point accuracy to 9.5 (xxxx.xxxxx).
Used by :class:`IC_Residue` class writing PIC and SCAD
files.
:param float num: input number
:returns: float with specified accuracy
"""
# return round(num, 5) # much slower
return float(f"{num:9.5f}") |
Run naccess for a pdb file. | def run_naccess(
model, pdb_file, probe_size=None, z_slice=None, naccess="naccess", temp_path="/tmp/"
):
"""Run naccess for a pdb file."""
# make temp directory;
tmp_path = tempfile.mkdtemp(dir=temp_path)
# file name must end with '.pdb' to work with NACCESS
# -> create temp file of existing pdb
# or write model to temp file
handle, tmp_pdb_file = tempfile.mkstemp(".pdb", dir=tmp_path)
os.close(handle)
if pdb_file:
pdb_file = os.path.abspath(pdb_file)
shutil.copy(pdb_file, tmp_pdb_file)
else:
writer = PDBIO()
writer.set_structure(model.get_parent())
writer.save(tmp_pdb_file)
# chdir to temp directory, as NACCESS writes to current working directory
old_dir = os.getcwd()
os.chdir(tmp_path)
# create the command line and run
# catch standard out & err
command = [naccess, tmp_pdb_file]
if probe_size:
command.extend(["-p", probe_size])
if z_slice:
command.extend(["-z", z_slice])
p = subprocess.Popen(
command, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate()
os.chdir(old_dir)
rsa_file = tmp_pdb_file[:-4] + ".rsa"
asa_file = tmp_pdb_file[:-4] + ".asa"
# Alert user for errors
if err.strip():
warnings.warn(err)
if (not os.path.exists(rsa_file)) or (not os.path.exists(asa_file)):
raise Exception("NACCESS did not execute or finish properly.")
# get the output, then delete the temp directory
with open(rsa_file) as rf:
rsa_data = rf.readlines()
with open(asa_file) as af:
asa_data = af.readlines()
# shutil.rmtree(tmp_path, ignore_errors=True)
return rsa_data, asa_data |
Process the .rsa output file: residue level SASA data. | def process_rsa_data(rsa_data):
"""Process the .rsa output file: residue level SASA data."""
naccess_rel_dict = {}
for line in rsa_data:
if line.startswith("RES"):
res_name = line[4:7]
chain_id = line[8]
resseq = int(line[9:13])
icode = line[13]
res_id = (" ", resseq, icode)
naccess_rel_dict[(chain_id, res_id)] = {
"res_name": res_name,
"all_atoms_abs": float(line[16:22]),
"all_atoms_rel": float(line[23:28]),
"side_chain_abs": float(line[29:35]),
"side_chain_rel": float(line[36:41]),
"main_chain_abs": float(line[42:48]),
"main_chain_rel": float(line[49:54]),
"non_polar_abs": float(line[55:61]),
"non_polar_rel": float(line[62:67]),
"all_polar_abs": float(line[68:74]),
"all_polar_rel": float(line[75:80]),
}
return naccess_rel_dict |
Process the .asa output file: atomic level SASA data. | def process_asa_data(rsa_data):
"""Process the .asa output file: atomic level SASA data."""
naccess_atom_dict = {}
for line in rsa_data:
full_atom_id = line[12:16]
atom_id = full_atom_id.strip()
chainid = line[21]
resseq = int(line[22:26])
icode = line[26]
res_id = (" ", resseq, icode)
id = (chainid, res_id, atom_id)
asa = line[54:62] # solvent accessibility in Angstrom^2
naccess_atom_dict[id] = asa
return naccess_atom_dict |
Convert dates from DD-Mon-YY to YYYY-MM-DD format (PRIVATE). | def _format_date(pdb_date):
"""Convert dates from DD-Mon-YY to YYYY-MM-DD format (PRIVATE)."""
date = ""
year = int(pdb_date[7:])
if year < 50:
century = 2000
else:
century = 1900
date = str(century + year) + "-"
all_months = [
"xxx",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month = str(all_months.index(pdb_date[3:6]))
if len(month) == 1:
month = "0" + month
date = date + month + "-" + pdb_date[:2]
return date |
Chops lines ending with ' 1CSA 14' and the like (PRIVATE). | def _chop_end_codes(line):
"""Chops lines ending with ' 1CSA 14' and the like (PRIVATE)."""
return re.sub(r"\s\s\s\s+[\w]{4}.\s+\d*\Z", "", line) |
Chops lines ending with ' 14-JUL-97 1CSA' and the like (PRIVATE). | def _chop_end_misc(line):
"""Chops lines ending with ' 14-JUL-97 1CSA' and the like (PRIVATE)."""
return re.sub(r"\s+\d\d-\w\w\w-\d\d\s+[1-9][0-9A-Z]{3}\s*\Z", "", line) |
Make A Lowercase String With Capitals (PRIVATE). | def _nice_case(line):
"""Make A Lowercase String With Capitals (PRIVATE)."""
line_lower = line.lower()
s = ""
i = 0
nextCap = 1
while i < len(line_lower):
c = line_lower[i]
if c >= "a" and c <= "z" and nextCap:
c = c.upper()
nextCap = 0
elif c in " .,;:\t-_":
nextCap = 1
s += c
i += 1
return s |
Return the header lines of a pdb file as a dictionary.
Dictionary keys are: head, deposition_date, release_date, structure_method,
resolution, structure_reference, journal_reference, author and
compound. | def parse_pdb_header(infile):
"""Return the header lines of a pdb file as a dictionary.
Dictionary keys are: head, deposition_date, release_date, structure_method,
resolution, structure_reference, journal_reference, author and
compound.
"""
header = []
with File.as_handle(infile) as f:
for line in f:
record_type = line[0:6]
if record_type in ("ATOM ", "HETATM", "MODEL "):
break
header.append(line)
return _parse_pdb_header_list(header) |
Parse missing residue remarks.
Returns a dictionary describing the missing residue.
The specification for REMARK 465 at
http://www.wwpdb.org/documentation/file-format-content/format33/remarks2.html#REMARK%20465
only gives templates, but does not say they have to be followed.
So we assume that not all pdb-files with a REMARK 465 can be understood.
Returns a dictionary with the following keys:
"model", "res_name", "chain", "ssseq", "insertion" | def _parse_remark_465(line):
"""Parse missing residue remarks.
Returns a dictionary describing the missing residue.
The specification for REMARK 465 at
http://www.wwpdb.org/documentation/file-format-content/format33/remarks2.html#REMARK%20465
only gives templates, but does not say they have to be followed.
So we assume that not all pdb-files with a REMARK 465 can be understood.
Returns a dictionary with the following keys:
"model", "res_name", "chain", "ssseq", "insertion"
"""
if line:
# Note that line has been stripped.
assert line[0] != " " and line[-1] not in "\n ", "line has to be stripped"
pattern = re.compile(
r"""
(\d+\s[\sA-Z][\sA-Z][A-Z] | # Either model number + residue name
[A-Z]{1,3}) # Or only residue name with 1 (RNA) to 3 letters
\s ([A-Za-z0-9]) # A single character chain
\s+(-?\d+[A-Za-z]?)$ # Residue number: A digit followed by an optional
# insertion code (Hetero-flags make no sense in
# context with missing res)
""",
re.VERBOSE,
)
match = pattern.match(line)
if match is None:
return None
residue = {}
if " " in match.group(1):
model, residue["res_name"] = match.group(1).split()
residue["model"] = int(model)
else:
residue["model"] = None
residue["res_name"] = match.group(1)
residue["chain"] = match.group(2)
try:
residue["ssseq"] = int(match.group(3))
except ValueError:
residue["insertion"] = match.group(3)[-1]
residue["ssseq"] = int(match.group(3)[:-1])
else:
residue["insertion"] = None
return residue |
Load Protein Internal Coordinate (.pic) data from file.
PIC file format:
- comment lines start with #
- (optional) PDB HEADER record
- idcode and deposition date recommended but optional
- deposition date in PDB format or as changed by Biopython
- (optional) PDB TITLE record
- repeat:
- Biopython Residue Full ID - sets residue IDs of returned structure
- (optional) PDB N, CA, C ATOM records for chain start
- (optional) PIC Hedra records for residue
- (optional) PIC Dihedra records for residue
- (optional) BFAC records listing AtomKeys and b-factors
An improvement would define relative positions for HOH (water) entries.
Defaults will be supplied for any value if defaults=True. Default values
are supplied in ic_data.py, but structures degrade quickly with any
deviation from true coordinates. Experiment with
:data:`Bio.PDB.internal_coords.IC_Residue.pic_flags` options to
:func:`write_PIC` to verify this.
N.B. dihedron (i-1)C-N-CA-CB is ignored in assembly if O exists.
C-beta is by default placed using O-C-CA-CB, but O is missing
in some PDB file residues, which means the sidechain cannot be
placed. The alternate CB path (i-1)C-N-CA-CB is provided to
circumvent this, but if this is needed then it must be adjusted in
conjunction with PHI ((i-1)C-N-CA-C) as they overlap (see :meth:`.bond_set`
and :meth:`.bond_rotate` to handle this automatically).
:param Bio.File file: :func:`.as_handle` file name or handle
:param bool verbose: complain when lines not as expected
:param bool quick: don't check residues for all dihedra (no default values)
:param bool defaults: create di/hedra as needed from reference database.
Amide proton created if 'H' is in IC_Residue.accept_atoms
:returns: Biopython Structure object, Residues with .internal_coord
attributes but no coordinates except for chain start N, CA, C atoms if
supplied, **OR** None on parse fail (silent unless verbose=True) | def read_PIC(
file: TextIO,
verbose: bool = False,
quick: bool = False,
defaults: bool = False,
) -> Structure:
"""Load Protein Internal Coordinate (.pic) data from file.
PIC file format:
- comment lines start with #
- (optional) PDB HEADER record
- idcode and deposition date recommended but optional
- deposition date in PDB format or as changed by Biopython
- (optional) PDB TITLE record
- repeat:
- Biopython Residue Full ID - sets residue IDs of returned structure
- (optional) PDB N, CA, C ATOM records for chain start
- (optional) PIC Hedra records for residue
- (optional) PIC Dihedra records for residue
- (optional) BFAC records listing AtomKeys and b-factors
An improvement would define relative positions for HOH (water) entries.
Defaults will be supplied for any value if defaults=True. Default values
are supplied in ic_data.py, but structures degrade quickly with any
deviation from true coordinates. Experiment with
:data:`Bio.PDB.internal_coords.IC_Residue.pic_flags` options to
:func:`write_PIC` to verify this.
N.B. dihedron (i-1)C-N-CA-CB is ignored in assembly if O exists.
C-beta is by default placed using O-C-CA-CB, but O is missing
in some PDB file residues, which means the sidechain cannot be
placed. The alternate CB path (i-1)C-N-CA-CB is provided to
circumvent this, but if this is needed then it must be adjusted in
conjunction with PHI ((i-1)C-N-CA-C) as they overlap (see :meth:`.bond_set`
and :meth:`.bond_rotate` to handle this automatically).
:param Bio.File file: :func:`.as_handle` file name or handle
:param bool verbose: complain when lines not as expected
:param bool quick: don't check residues for all dihedra (no default values)
:param bool defaults: create di/hedra as needed from reference database.
Amide proton created if 'H' is in IC_Residue.accept_atoms
:returns: Biopython Structure object, Residues with .internal_coord
attributes but no coordinates except for chain start N, CA, C atoms if
supplied, **OR** None on parse fail (silent unless verbose=True)
"""
proton = "H" in IC_Residue.accept_atoms
pdb_hdr_re = re.compile(
r"^HEADER\s{4}(?P<cf>.{1,40})"
r"(?:\s+(?P<dd>\d\d\d\d-\d\d-\d\d|\d\d-\w\w\w-\d\d))?"
r"(?:\s+(?P<id>[0-9A-Z]{4}))?\s*$"
)
pdb_ttl_re = re.compile(r"^TITLE\s{5}(?P<ttl>.+)\s*$")
biop_id_re = re.compile(
r"^\('(?P<pid>[^\s]*)',\s(?P<mdl>\d+),\s"
r"'(?P<chn>\s|\w)',\s\('(?P<het>\s|[\w\s-]+)"
r"',\s(?P<pos>-?\d+),\s'(?P<icode>\s|\w)'\)\)"
r"\s+(?P<res>[\w]{1,3})"
r"(\s\[(?P<segid>[a-zA-z\s]+)\])?"
r"\s*$"
)
pdb_atm_re = re.compile(
r"^ATOM\s\s(?:\s*(?P<ser>\d+))\s(?P<atm>[\w\s]{4})"
r"(?P<alc>\w|\s)(?P<res>[\w]{3})\s(?P<chn>.)"
r"(?P<pos>[\s\-\d]{4})(?P<icode>[A-Za-z\s])\s\s\s"
r"(?P<x>[\s\-\d\.]{8})(?P<y>[\s\-\d\.]{8})"
r"(?P<z>[\s\-\d\.]{8})(?P<occ>[\s\d\.]{6})"
r"(?P<tfac>[\s\d\.]{6})\s{6}"
r"(?P<segid>[a-zA-z\s]{4})(?P<elm>.{2})"
r"(?P<chg>.{2})?\s*$"
)
pdbx_atm_re = re.compile(
r"^ATOM\s\s(?:\s*(?P<ser>\d+))\s(?P<atm>[\w\s]{4})"
r"(?P<alc>\w|\s)(?P<res>[\w]{3})\s(?P<chn>.)"
r"(?P<pos>[\s\-\d]{4})(?P<icode>[A-Za-z\s])\s\s\s"
r"(?P<x>[\s\-\d\.]{10})(?P<y>[\s\-\d\.]{10})"
r"(?P<z>[\s\-\d\.]{10})(?P<occ>[\s\d\.]{7})"
r"(?P<tfac>[\s\d\.]{6})\s{6}"
r"(?P<segid>[a-zA-z\s]{4})(?P<elm>.{2})"
r"(?P<chg>.{2})?\s*$"
)
bfac_re = re.compile(
r"^BFAC:\s([^\s]+\s+[\-\d\.]+)"
r"\s*([^\s]+\s+[\-\d\.]+)?"
r"\s*([^\s]+\s+[\-\d\.]+)?"
r"\s*([^\s]+\s+[\-\d\.]+)?"
r"\s*([^\s]+\s+[\-\d\.]+)?"
)
bfac2_re = re.compile(r"([^\s]+)\s+([\-\d\.]+)")
struct_builder = StructureBuilder()
# init empty header dict
# - could use to parse HEADER and TITLE lines except
# deposition_date format changed from original PDB header
header_dict = _parse_pdb_header_list([])
curr_SMCS = [None, None, None, None] # struct model chain seg
SMCS_init = [
struct_builder.init_structure,
struct_builder.init_model,
struct_builder.init_chain,
struct_builder.init_seg,
]
sb_res = None
rkl = None
sb_chain = None
sbcic = None
sbric = None
akc = {}
hl12 = {}
ha = {}
hl23 = {}
da = {}
bfacs = {}
orphan_aks = set() # []
tr = [] # this residue
pr = [] # previous residue
def akcache(akstr: str) -> AtomKey:
"""Maintain dictionary of AtomKeys seen while reading this PIC file."""
# akstr: full AtomKey string read from .pic file, includes residue info
try:
return akc[akstr]
except KeyError:
ak = akc[akstr] = AtomKey(akstr)
return ak
def link_residues(ppr: List[Residue], pr: List[Residue]) -> None:
"""Set next and prev links between i-1 and i-2 residues."""
for p_r in pr:
pric = p_r.internal_coord
for p_p_r in ppr:
ppric = p_p_r.internal_coord
if p_r.id[0] == " ": # not heteroatoms
if pric not in ppric.rnext:
ppric.rnext.append(pric)
if p_p_r.id[0] == " ":
if ppric not in pric.rprev:
pric.rprev.append(ppric)
def process_hedron(
a1: str,
a2: str,
a3: str,
l12: str,
ang: str,
l23: str,
ric: IC_Residue,
) -> Tuple:
"""Create Hedron on current (sbcic) Chain.internal_coord."""
ek = (akcache(a1), akcache(a2), akcache(a3))
atmNdx = AtomKey.fields.atm
accept = IC_Residue.accept_atoms
if not all(ek[i].akl[atmNdx] in accept for i in range(3)):
return
hl12[ek] = float(l12)
ha[ek] = float(ang)
hl23[ek] = float(l23)
sbcic.hedra[ek] = ric.hedra[ek] = h = Hedron(ek)
h.cic = sbcic
ak_add(ek, ric)
return ek
def default_hedron(ek: Tuple, ric: IC_Residue) -> None:
"""Create Hedron based on same re_class hedra in ref database.
Adds Hedron to current Chain.internal_coord, see ic_data for default
values and reference database source.
"""
atomkeys = []
hkey = None
atmNdx = AtomKey.fields.atm
resNdx = AtomKey.fields.resname
resPos = AtomKey.fields.respos
atomkeys = [ek[i].akl for i in range(3)]
atpl = tuple([atomkeys[i][atmNdx] for i in range(3)])
res = atomkeys[0][resNdx]
if (
atomkeys[0][resPos]
!= atomkeys[2][resPos] # hedra crosses amide bond so not reversed
or atpl == ("N", "CA", "C") # or chain start tau
or atpl in ic_data_backbone # or found forward hedron in ic_data
or (res not in ["A", "G"] and atpl in ic_data_sidechains[res])
):
hkey = ek
rhcl = [atomkeys[i][resNdx] + atomkeys[i][atmNdx] for i in range(3)]
try:
dflts = hedra_defaults["".join(rhcl)][0]
except KeyError:
if atomkeys[0][resPos] == atomkeys[1][resPos]:
rhcl = [atomkeys[i][resNdx] + atomkeys[i][atmNdx] for i in range(2)]
rhc = "".join(rhcl) + "X" + atomkeys[2][atmNdx]
else:
rhcl = [
atomkeys[i][resNdx] + atomkeys[i][atmNdx] for i in range(1, 3)
]
rhc = "X" + atomkeys[0][atmNdx] + "".join(rhcl)
dflts = hedra_defaults[rhc][0]
else:
# must be reversed or fail
hkey = ek[::-1]
rhcl = [atomkeys[i][resNdx] + atomkeys[i][atmNdx] for i in range(2, -1, -1)]
dflts = hedra_defaults["".join(rhcl)][0]
process_hedron(
str(hkey[0]),
str(hkey[1]),
str(hkey[2]),
dflts[0],
dflts[1],
dflts[2],
ric,
)
if verbose:
print(f" default for {ek}")
def hedra_check(dk: Tuple, ric: IC_Residue) -> None:
"""Confirm both hedra present for dihedron key, use default if set."""
if dk[0:3] not in sbcic.hedra and dk[2::-1] not in sbcic.hedra:
if defaults:
default_hedron(dk[0:3], ric)
else:
print(f"{dk} missing h1")
if dk[1:4] not in sbcic.hedra and dk[3:0:-1] not in sbcic.hedra:
if defaults:
default_hedron(dk[1:4], ric)
else:
print(f"{dk} missing h2")
def process_dihedron(
a1: str, a2: str, a3: str, a4: str, dangle: str, ric: IC_Residue
) -> Set:
"""Create Dihedron on current Chain.internal_coord."""
ek = (
akcache(a1),
akcache(a2),
akcache(a3),
akcache(a4),
)
atmNdx = AtomKey.fields.atm
accept = IC_Residue.accept_atoms
if not all(ek[i].akl[atmNdx] in accept for i in range(4)):
return
dangle = float(dangle)
dangle = dangle if (dangle <= 180.0) else dangle - 360.0
dangle = dangle if (dangle >= -180.0) else dangle + 360.0
da[ek] = float(dangle)
sbcic.dihedra[ek] = ric.dihedra[ek] = d = Dihedron(ek)
d.cic = sbcic
if not quick:
hedra_check(ek, ric)
ak_add(ek, ric)
return ek
def default_dihedron(ek: List, ric: IC_Residue) -> None:
"""Create Dihedron based on same residue class dihedra in ref database.
Adds Dihedron to current Chain.internal_coord, see ic_data for default
values and reference database source.
"""
atmNdx = AtomKey.fields.atm
resNdx = AtomKey.fields.resname
resPos = AtomKey.fields.respos
rdclass = ""
dclass = ""
for ak in ek:
dclass += ak.akl[atmNdx]
rdclass += ak.akl[resNdx] + ak.akl[atmNdx]
if dclass == "NCACN":
rdclass = rdclass[0:7] + "XN"
elif dclass == "CACNCA":
rdclass = "XCAXC" + rdclass[5:]
elif dclass == "CNCAC":
rdclass = "XC" + rdclass[2:]
if rdclass in dihedra_primary_defaults:
process_dihedron(
str(ek[0]),
str(ek[1]),
str(ek[2]),
str(ek[3]),
dihedra_primary_defaults[rdclass][0],
ric,
)
if verbose:
print(f" default for {ek}")
elif rdclass in dihedra_secondary_defaults:
primAngle, offset = dihedra_secondary_defaults[rdclass]
rname = ek[2].akl[resNdx]
rnum = int(ek[2].akl[resPos])
paKey = None
if primAngle == ("N", "CA", "C", "N") and ek[0].ric.rnext != []:
paKey = [
AtomKey((rnum, None, rname, primAngle[x], None, None))
for x in range(3)
]
rnext = ek[0].ric.rnext
paKey.append(
AtomKey(
(
rnext[0].rbase[0],
None,
rnext[0].rbase[2],
"N",
None,
None,
)
)
)
paKey = tuple(paKey)
elif primAngle == ("CA", "C", "N", "CA"):
prname = pr.akl[0][resNdx]
prnum = pr.akl[0][resPos]
paKey = [
AtomKey(prnum, None, prname, primAngle[x], None, None)
for x in range(2)
]
paKey.add(
[
AtomKey((rnum, None, rname, primAngle[x], None, None))
for x in range(2, 4)
]
)
paKey = tuple(paKey)
else:
paKey = tuple(
AtomKey((rnum, None, rname, atm, None, None)) for atm in primAngle
)
if paKey in da:
angl = da[paKey] + dihedra_secondary_defaults[rdclass][1]
process_dihedron(
str(ek[0]),
str(ek[1]),
str(ek[2]),
str(ek[3]),
angl,
ric,
)
if verbose:
print(f" secondary default for {ek}")
elif rdclass in dihedra_secondary_xoxt_defaults:
if primAngle == ("C", "N", "CA", "C"): # primary for alt cb
# no way to trigger alt cb with default=True
# because will generate default N-CA-C-O
prname = pr.akl[0][resNdx]
prnum = pr.akl[0][resPos]
paKey = [AtomKey(prnum, None, prname, primAngle[0], None, None)]
paKey.add(
[
AtomKey((rnum, None, rname, primAngle[x], None, None))
for x in range(1, 4)
]
)
paKey = tuple(paKey)
else:
primAngle, offset = dihedra_secondary_xoxt_defaults[rdclass]
rname = ek[2].akl[resNdx]
rnum = int(ek[2].akl[resPos])
paKey = tuple(
AtomKey((rnum, None, rname, atm, None, None))
for atm in primAngle
)
if paKey in da:
angl = da[paKey] + offset
process_dihedron(
str(ek[0]),
str(ek[1]),
str(ek[2]),
str(ek[3]),
angl,
ric,
)
if verbose:
print(f" oxt default for {ek}")
else:
print(
f"missing primary angle {paKey} {primAngle} to "
f"generate {rnum}{rname} {rdclass}"
)
else:
print(
f"missing {ek} -> {rdclass} ({dclass}) not found in primary or"
" secondary defaults"
)
def dihedra_check(ric: IC_Residue) -> None:
"""Look for required dihedra in residue, generate defaults if set."""
# This method has some internal functions
# rnext should be set
def ake_recurse(akList: List) -> List:
"""Build combinatorics of AtomKey lists."""
car = akList[0]
if len(akList) > 1:
retList = []
for ak in car:
cdr = akList[1:]
rslt = ake_recurse(cdr)
for r in rslt:
r.insert(0, ak)
retList.append(r)
return retList
else:
if len(car) == 1:
return [list(car)]
else:
retList = [[ak] for ak in car]
return retList
def ak_expand(eLst: List) -> List:
"""Expand AtomKey list with altlocs, all combinatorics."""
retList = []
for edron in eLst:
newList = []
for ak in edron:
rslt = ak.ric.split_akl([ak])
rlst = [r[0] for r in rslt]
if rlst != []:
newList.append(rlst)
else:
newList.append([ak])
rslt = ake_recurse(newList)
for r in rslt:
retList.append(r)
return retList
# dihedra_check processing starts here
# generate the list of dihedra this residue should have
chkLst = []
sN, sCA, sC = AtomKey(ric, "N"), AtomKey(ric, "CA"), AtomKey(ric, "C")
sO, sCB, sH = AtomKey(ric, "O"), AtomKey(ric, "CB"), AtomKey(ric, "H")
if ric.rnext != []:
for rn in ric.rnext:
nN, nCA, nC = (
AtomKey(rn, "N"),
AtomKey(rn, "CA"),
AtomKey(rn, "C"),
)
# intermediate residue, need psi, phi, omg
chkLst.append((sN, sCA, sC, nN)) # psi
chkLst.append((sCA, sC, nN, nCA)) # omg i+1
chkLst.append((sC, nN, nCA, nC)) # phi i+1
else:
chkLst.append((sN, sCA, sC, AtomKey(ric, "OXT"))) # psi
rn = "(no rnext)"
chkLst.append((sN, sCA, sC, sO)) # locate backbone O
if ric.lc != "G":
chkLst.append((sO, sC, sCA, sCB)) # locate CB
if ric.lc == "A":
chkLst.append((sN, sCA, sCB)) # missed for generate from seq
if ric.rprev != [] and ric.lc != "P" and proton:
chkLst.append((sC, sCA, sN, sH)) # amide proton
try:
for edron in ic_data_sidechains[ric.lc]:
if len(edron) > 3: # dihedra only
if all(atm[0] != "H" for atm in edron):
akl = [AtomKey(ric, atm) for atm in edron[0:4]]
chkLst.append(akl)
except KeyError:
pass
# now compare generated list to ric.dihedra, get defaults if set.
chkLst = ak_expand(chkLst)
altloc_ndx = AtomKey.fields.altloc
for dk in chkLst:
if tuple(dk) in ric.dihedra:
pass
elif sH in dk:
pass # ignore missing hydrogens
elif all(atm.akl[altloc_ndx] is None for atm in dk):
if defaults:
if len(dk) != 3:
default_dihedron(dk, ric)
else:
default_hedron(dk, ric) # add ALA N-Ca-Cb
else:
if verbose:
print(f"{ric}-{rn} missing {dk}")
else:
# print(f"skip {ek}")
pass # ignore missing combinatoric of altloc atoms
# need more here?
def ak_add(ek: Tuple, ric: IC_Residue) -> None:
"""Allocate edron key AtomKeys to current residue as appropriate.
A hedron or dihedron may span a backbone amide bond, this routine
allocates atoms in the (h/di)edron to the ric residue or saves them
for a residue yet to be processed.
:param set ek: AtomKeys in edron
:param IC_Residue ric: current residue to assign AtomKeys to
"""
res = ric.residue
reskl = (
str(res.id[1]),
(None if res.id[2] == " " else res.id[2]),
ric.lc,
)
for ak in ek:
if ak.ric is None:
sbcic.akset.add(ak)
if ak.akl[0:3] == reskl:
ak.ric = ric
ric.ak_set.add(ak)
else:
orphan_aks.add(ak)
def finish_chain() -> None:
"""Do last rnext, rprev links and process chain edra data."""
link_residues(pr, tr)
# check/confirm completeness
if not quick:
for r in pr:
dihedra_check(r.internal_coord)
for r in tr:
dihedra_check(r.internal_coord)
if ha != {}:
sha = {k: ha[k] for k in sorted(ha)}
shl12 = {k: hl12[k] for k in sorted(hl12)}
shl23 = {k: hl23[k] for k in sorted(hl23)}
# da not in order if generated from seq
sda = {k: da[k] for k in sorted(da)}
sbcic._hedraDict2chain(shl12, sha, shl23, sda, bfacs)
# read_PIC processing starts here:
with as_handle(file, mode="r") as handle:
for line in handle.readlines():
if line.startswith("#"):
pass # skip comment lines
elif line.startswith("HEADER "):
m = pdb_hdr_re.match(line)
if m:
header_dict["head"] = m.group("cf") # classification
header_dict["idcode"] = m.group("id")
header_dict["deposition_date"] = m.group("dd")
elif verbose:
print("Reading pic file", file, "HEADER parse fail: ", line)
elif line.startswith("TITLE "):
m = pdb_ttl_re.match(line)
if m:
header_dict["name"] = m.group("ttl").strip()
# print('TTL: ', m.group('ttl').strip())
elif verbose:
print("Reading pic file", file, "TITLE parse fail:, ", line)
elif line.startswith("("): # Biopython ID line for Residue
m = biop_id_re.match(line)
if m:
# check SMCS = Structure, Model, Chain, SegID
segid = m.group(9)
if segid is None:
segid = " "
this_SMCS = [
m.group(1),
int(m.group(2)),
m.group(3),
segid,
]
if curr_SMCS != this_SMCS:
if curr_SMCS[:3] != this_SMCS[:3] and ha != {}:
# chain change so process current chain data
finish_chain()
akc = {} # atomkey cache, used by akcache()
hl12 = {} # hedra key -> len12
ha = {} # -> hedra angle
hl23 = {} # -> len23
da = {} # dihedra key -> angle value
bfacs = {} # atomkey string -> b-factor
# init new Biopython SMCS level as needed
for i in range(4):
if curr_SMCS[i] != this_SMCS[i]:
SMCS_init[i](this_SMCS[i])
curr_SMCS[i] = this_SMCS[i]
if i == 0:
# 0 = init structure so add header
struct_builder.set_header(header_dict)
elif i == 1:
# new model means new chain and new segid
curr_SMCS[2] = curr_SMCS[3] = None
elif i == 2:
# new chain so init internal_coord
sb_chain = struct_builder.chain
sbcic = sb_chain.internal_coord = IC_Chain(sb_chain)
struct_builder.init_residue(
m.group("res"),
m.group("het"),
int(m.group("pos")),
m.group("icode"),
)
sb_res = struct_builder.residue
if sb_res.id[0] != " ": # skip hetatm
continue
if 2 == sb_res.is_disordered():
for r in sb_res.child_dict.values():
if not r.internal_coord:
sb_res = r
break
# added to disordered res
tr.append(sb_res)
else:
# new res so fix up previous residue as feasible
link_residues(pr, tr)
if not quick:
for r in pr:
# create di/hedra if default for residue i-1
# just linked
dihedra_check(r.internal_coord)
pr = tr
tr = [sb_res]
sbric = sb_res.internal_coord = IC_Residue(
sb_res
) # no atoms so no rak
sbric.cic = sbcic
rkl = (
str(sb_res.id[1]),
(None if sb_res.id[2] == " " else sb_res.id[2]),
sbric.lc,
)
sbcic.ordered_aa_ic_list.append(sbric)
# update AtomKeys w/o IC_Residue references, in case
# chain ends before di/hedra sees them (2XHE test case)
for ak in orphan_aks:
if ak.akl[0:3] == rkl:
ak.ric = sbric
sbric.ak_set.add(ak)
# may need altoc support here
orphan_aks = set(filter(lambda ak: ak.ric is None, orphan_aks))
else:
if verbose:
print(
"Reading pic file",
file,
"residue ID parse fail: ",
line,
)
return None
elif line.startswith("ATOM "):
m = pdb_atm_re.match(line)
if not m:
m = pdbx_atm_re.match(line)
if m:
if sb_res is None:
# ATOM without res spec already loaded, not a pic file
if verbose:
print(
"Reading pic file",
file,
"ATOM without residue configured:, ",
line,
)
return None
if sb_res.resname != m.group("res") or sb_res.id[1] != int(
m.group("pos")
):
if verbose:
print(
"Reading pic file",
file,
"ATOM not in configured residue (",
sb_res.resname,
str(sb_res.id),
"):",
line,
)
return None
coord = np.array(
(
float(m.group("x")),
float(m.group("y")),
float(m.group("z")),
),
"f",
)
struct_builder.init_atom(
m.group("atm").strip(),
coord,
float(m.group("tfac")),
float(m.group("occ")),
m.group("alc"),
m.group("atm"),
int(m.group("ser")),
m.group("elm").strip(),
)
# reset because prev does not link to this residue
# (chainBreak)
pr = []
elif line.startswith("BFAC: "):
m = bfac_re.match(line)
if m:
for bfac_pair in m.groups():
if bfac_pair is not None:
m2 = bfac2_re.match(bfac_pair)
bfacs[m2.group(1)] = float(m2.group(2))
# else:
# print f"Reading pic file {file} B-factor fail: {line}"
else:
m = Edron.edron_re.match(line)
if m and sb_res is not None:
if m["a4"] is None:
process_hedron(
m["a1"],
m["a2"],
m["a3"],
m["len12"],
m["angle"],
m["len23"],
sb_res.internal_coord,
)
else:
process_dihedron(
m["a1"],
m["a2"],
m["a3"],
m["a4"],
m["dihedral"],
sb_res.internal_coord,
)
elif m:
print(
"PIC file: ",
file,
" error: no residue info before reading (di/h)edron: ",
line,
)
return None
elif line.strip():
if verbose:
print(
"Reading PIC file",
file,
"parse fail on: .",
line,
".",
)
return None
# reached end of input
finish_chain()
# print(report_PIC(struct_builder.get_structure()))
return struct_builder.get_structure() |
Read :class:`.SeqRecord` into Structure with default internal coords. | def read_PIC_seq(
seqRec: "SeqIO.SeqRecord",
pdbid: str = None,
title: str = None,
chain: str = None,
) -> Structure:
"""Read :class:`.SeqRecord` into Structure with default internal coords."""
read_pdbid, read_title, read_chain = None, None, None
if seqRec.id is not None:
read_pdbid = seqRec.id
if seqRec.description is not None:
read_title = seqRec.description.replace(f"{read_pdbid} ", "")
if ":" in read_pdbid:
read_pdbid, read_chain = read_pdbid.split(":")
if chain is None:
chain = read_chain if read_chain is not None else "A"
if title is None:
title = (
read_title
if read_title is not None
else f"sequence input {seqRec.id if seqRec.id is not None else ''}"
)
if pdbid is None:
pdbid = read_pdbid if read_pdbid is not None else "0PDB"
today = date.today()
datestr = (today.strftime("%d-%b-%y")).upper()
output = f"HEADER {'GENERATED STRUCTURE':40}{datestr} {pdbid}\n"
output += f"TITLE {title.upper():69}\n"
ndx = 1
for r in seqRec.seq:
output += (
f"('{pdbid}', 0, '{chain}', (' ', {ndx}, ' ')) {protein_letters_1to3[r]}\n"
)
ndx += 1
sp = StringIO()
sp.write(output)
sp.seek(0)
return read_PIC(sp, defaults=True) |
Ensure all atoms in entity have serial_number set. | def enumerate_atoms(entity):
"""Ensure all atoms in entity have serial_number set."""
while entity.get_parent():
entity = entity.get_parent() # get to top level
if "S" == entity.level:
for mdl in entity: # each model starts with 1
_enumerate_entity_atoms(mdl)
else: # only Chain or Residue, start with 1
_enumerate_entity_atoms(entity) |
Convert yyyy-mm-dd date to dd-month-yy. | def pdb_date(datestr: str) -> str:
"""Convert yyyy-mm-dd date to dd-month-yy."""
if datestr:
m = re.match(r"(\d{4})-(\d{2})-(\d{2})", datestr)
if m:
mo = [
"XXX",
"JAN",
"FEB",
"MAR",
"APR",
"MAY",
"JUN",
"JUL",
"AUG",
"SEP",
"OCT",
"NOV",
"DEC",
][int(m.group(2))]
datestr = m.group(3) + "-" + mo + "-" + m.group(1)[-2:]
return datestr |
Write Protein Internal Coordinates (PIC) to file.
See :func:`read_PIC` for file format.
See :data:`IC_Residue.pic_accuracy` to vary numeric accuracy.
Recurses to lower entity levels (M, C, R).
:param Entity entity: Biopython PDB Entity object: S, M, C or R
:param Bio.File file: :func:`.as_handle` file name or handle
:param str pdbid: PDB idcode, read from entity if not supplied
:param char chainid: PDB Chain ID, set from C level entity.id if needed
:param int picFlags: boolean flags controlling output, defined in
:data:`Bio.PDB.internal_coords.IC_Residue.pic_flags`
* "psi",
* "omg",
* "phi",
* "tau", # tau hedron (N-Ca-C)
* "chi1",
* "chi2",
* "chi3",
* "chi4",
* "chi5",
* "pomg", # proline omega
* "chi", # chi1 through chi5
* "classic_b", # psi | phi | tau | pomg
* "classic", # classic_b | chi
* "hedra", # all hedra including bond lengths
* "primary", # all primary dihedra
* "secondary", # all secondary dihedra (fixed angle from primary dihedra)
* "all", # hedra | primary | secondary
* "initAtoms", # XYZ coordinates of initial Tau (N-Ca-C)
* "bFactors"
default is everything::
picFlagsDefault = (
pic_flags.all | pic_flags.initAtoms | pic_flags.bFactors
)
Usage in your code::
# just primary dihedra and all hedra
picFlags = (
IC_Residue.pic_flags.primary | IC_Residue.pic_flags.hedra
)
# no B-factors:
picFlags = IC_Residue.picFlagsDefault
picFlags &= ~IC_Residue.pic_flags.bFactors
:func:`read_PIC` with `(defaults=True)` will use default values for
anything left out
:param float hCut: default None
only write hedra with ref db angle std dev greater than this value
:param float pCut: default None
only write primary dihedra with ref db angle std dev greater than this
value
**Default values**:
Data averaged from Sep 2019 Dunbrack cullpdb_pc20_res2.2_R1.0.
Please see
`PISCES: A Protein Sequence Culling Server <https://dunbrack.fccc.edu/pisces/>`_
'G. Wang and R. L. Dunbrack, Jr. PISCES: a protein sequence culling
server. Bioinformatics, 19:1589-1591, 2003.'
'primary' and 'secondary' dihedra are defined in ic_data.py. Specifically,
secondary dihedra can be determined as a fixed rotation from another known
angle, for example N-Ca-C-O can be estimated from N-Ca-C-N (psi).
Standard deviations are listed in
<biopython distribution>/Bio/PDB/ic_data.py for default values, and can be
used to limit which hedra and dihedra are defaulted vs. output exact
measurements from structure (see hCut and pCut above). Default values for
primary dihedra (psi, phi, omega, chi1, etc.) are chosen as the most common
integer value, not an average.
:raises PDBException: if entity level is A (Atom)
:raises Exception: if entity does not have .level attribute | def write_PIC(
entity,
file,
pdbid=None,
chainid=None,
picFlags: int = IC_Residue.picFlagsDefault,
hCut: Optional[Union[float, None]] = None,
pCut: Optional[Union[float, None]] = None,
):
"""Write Protein Internal Coordinates (PIC) to file.
See :func:`read_PIC` for file format.
See :data:`IC_Residue.pic_accuracy` to vary numeric accuracy.
Recurses to lower entity levels (M, C, R).
:param Entity entity: Biopython PDB Entity object: S, M, C or R
:param Bio.File file: :func:`.as_handle` file name or handle
:param str pdbid: PDB idcode, read from entity if not supplied
:param char chainid: PDB Chain ID, set from C level entity.id if needed
:param int picFlags: boolean flags controlling output, defined in
:data:`Bio.PDB.internal_coords.IC_Residue.pic_flags`
* "psi",
* "omg",
* "phi",
* "tau", # tau hedron (N-Ca-C)
* "chi1",
* "chi2",
* "chi3",
* "chi4",
* "chi5",
* "pomg", # proline omega
* "chi", # chi1 through chi5
* "classic_b", # psi | phi | tau | pomg
* "classic", # classic_b | chi
* "hedra", # all hedra including bond lengths
* "primary", # all primary dihedra
* "secondary", # all secondary dihedra (fixed angle from primary dihedra)
* "all", # hedra | primary | secondary
* "initAtoms", # XYZ coordinates of initial Tau (N-Ca-C)
* "bFactors"
default is everything::
picFlagsDefault = (
pic_flags.all | pic_flags.initAtoms | pic_flags.bFactors
)
Usage in your code::
# just primary dihedra and all hedra
picFlags = (
IC_Residue.pic_flags.primary | IC_Residue.pic_flags.hedra
)
# no B-factors:
picFlags = IC_Residue.picFlagsDefault
picFlags &= ~IC_Residue.pic_flags.bFactors
:func:`read_PIC` with `(defaults=True)` will use default values for
anything left out
:param float hCut: default None
only write hedra with ref db angle std dev greater than this value
:param float pCut: default None
only write primary dihedra with ref db angle std dev greater than this
value
**Default values**:
Data averaged from Sep 2019 Dunbrack cullpdb_pc20_res2.2_R1.0.
Please see
`PISCES: A Protein Sequence Culling Server <https://dunbrack.fccc.edu/pisces/>`_
'G. Wang and R. L. Dunbrack, Jr. PISCES: a protein sequence culling
server. Bioinformatics, 19:1589-1591, 2003.'
'primary' and 'secondary' dihedra are defined in ic_data.py. Specifically,
secondary dihedra can be determined as a fixed rotation from another known
angle, for example N-Ca-C-O can be estimated from N-Ca-C-N (psi).
Standard deviations are listed in
<biopython distribution>/Bio/PDB/ic_data.py for default values, and can be
used to limit which hedra and dihedra are defaulted vs. output exact
measurements from structure (see hCut and pCut above). Default values for
primary dihedra (psi, phi, omega, chi1, etc.) are chosen as the most common
integer value, not an average.
:raises PDBException: if entity level is A (Atom)
:raises Exception: if entity does not have .level attribute
"""
enumerate_atoms(entity)
with as_handle(file, "w") as fp:
try:
if "A" == entity.level:
raise PDBException("No PIC output at Atom level")
elif "R" == entity.level:
if 2 == entity.is_disordered():
for r in entity.child_dict.values():
_wpr(
r,
fp,
pdbid,
chainid,
picFlags=picFlags,
hCut=hCut,
pCut=pCut,
)
else:
_wpr(
entity,
fp,
pdbid,
chainid,
picFlags=picFlags,
hCut=hCut,
pCut=pCut,
)
elif "C" == entity.level:
if not chainid:
chainid = entity.id
for res in entity:
write_PIC(
res,
fp,
pdbid,
chainid,
picFlags=picFlags,
hCut=hCut,
pCut=pCut,
)
elif "M" == entity.level:
for chn in entity:
write_PIC(
chn,
fp,
pdbid,
chainid,
picFlags=picFlags,
hCut=hCut,
pCut=pCut,
)
elif "S" == entity.level:
if not pdbid:
pdbid = entity.header.get("idcode", None)
hdr = entity.header.get("head", None)
dd = pdb_date(entity.header.get("deposition_date", None))
if hdr:
fp.write(
("HEADER {:40}{:8} {:4}\n").format(
hdr.upper(), (dd or ""), (pdbid or "")
)
)
name = entity.header.get("name", None)
if name:
fp.write("TITLE " + name.upper() + "\n")
for mdl in entity:
write_PIC(
mdl,
fp,
pdbid,
chainid,
picFlags=picFlags,
hCut=hCut,
pCut=pCut,
)
else:
raise PDBException("Cannot identify level: " + str(entity.level))
except KeyError:
raise Exception(
"write_PIC: argument is not a Biopython PDB Entity " + str(entity)
) |
Index to corresponding one letter amino acid name.
>>> index_to_one(0)
'A'
>>> index_to_one(19)
'Y' | def index_to_one(index):
"""Index to corresponding one letter amino acid name.
>>> index_to_one(0)
'A'
>>> index_to_one(19)
'Y'
"""
return dindex_to_1[index] |
One letter code to index.
>>> one_to_index('A')
0
>>> one_to_index('Y')
19 | def one_to_index(s):
"""One letter code to index.
>>> one_to_index('A')
0
>>> one_to_index('Y')
19
"""
return d1_to_index[s] |
Index to corresponding three letter amino acid name.
>>> index_to_three(0)
'ALA'
>>> index_to_three(19)
'TYR' | def index_to_three(i):
"""Index to corresponding three letter amino acid name.
>>> index_to_three(0)
'ALA'
>>> index_to_three(19)
'TYR'
"""
return dindex_to_3[i] |
Three letter code to index.
>>> three_to_index('ALA')
0
>>> three_to_index('TYR')
19 | def three_to_index(s):
"""Three letter code to index.
>>> three_to_index('ALA')
0
>>> three_to_index('TYR')
19
"""
return d3_to_index[s] |
Return True if residue object/string is an amino acid.
:param residue: a L{Residue} object OR a three letter amino acid code
:type residue: L{Residue} or string
:param standard: flag to check for the 20 AA (default false)
:type standard: boolean
>>> is_aa('ALA')
True
Known three letter codes for modified amino acids are supported,
>>> is_aa('FME')
True
>>> is_aa('FME', standard=True)
False | def is_aa(residue, standard=False):
"""Return True if residue object/string is an amino acid.
:param residue: a L{Residue} object OR a three letter amino acid code
:type residue: L{Residue} or string
:param standard: flag to check for the 20 AA (default false)
:type standard: boolean
>>> is_aa('ALA')
True
Known three letter codes for modified amino acids are supported,
>>> is_aa('FME')
True
>>> is_aa('FME', standard=True)
False
"""
if not isinstance(residue, str):
residue = f"{residue.get_resname():<3s}"
residue = residue.upper()
if standard:
return residue in protein_letters_3to1
else:
return residue in protein_letters_3to1_extended |
Return True if residue object/string is a nucleic acid.
:param residue: a L{Residue} object OR a three letter code
:type residue: L{Residue} or string
:param standard: flag to check for the 8 (DNA + RNA) canonical bases.
Default is False.
:type standard: boolean
>>> is_nucleic('DA ')
True
>>> is_nucleic('A ')
True
Known three letter codes for modified nucleotides are supported,
>>> is_nucleic('A2L')
True
>>> is_nucleic('A2L', standard=True)
False | def is_nucleic(residue, standard=False):
"""Return True if residue object/string is a nucleic acid.
:param residue: a L{Residue} object OR a three letter code
:type residue: L{Residue} or string
:param standard: flag to check for the 8 (DNA + RNA) canonical bases.
Default is False.
:type standard: boolean
>>> is_nucleic('DA ')
True
>>> is_nucleic('A ')
True
Known three letter codes for modified nucleotides are supported,
>>> is_nucleic('A2L')
True
>>> is_nucleic('A2L', standard=True)
False
"""
if not isinstance(residue, str):
residue = f"{residue.get_resname():<3s}"
residue = residue.upper()
if standard:
return residue in nucleic_letters_3to1
else:
return residue in nucleic_letters_3to1_extended |
Run PSEA and return output filename.
Note that this assumes the P-SEA binary is called "psea" and that it is
on the path.
Note that P-SEA will write an output file in the current directory using
the input filename with extension ".sea".
Note that P-SEA will not write output to the terminal while run unless
verbose is set to True. | def run_psea(fname, verbose=False):
"""Run PSEA and return output filename.
Note that this assumes the P-SEA binary is called "psea" and that it is
on the path.
Note that P-SEA will write an output file in the current directory using
the input filename with extension ".sea".
Note that P-SEA will not write output to the terminal while run unless
verbose is set to True.
"""
last = fname.split("/")[-1]
base = last.split(".")[0]
cmd = ["psea", fname]
p = subprocess.run(cmd, capture_output=True, text=True)
if verbose:
print(p.stdout)
if not p.stderr.strip() and os.path.exists(base + ".sea"):
return base + ".sea"
else:
raise RuntimeError(f"Error running p-sea: {p.stderr}") |
Parse PSEA output file. | def psea(pname):
"""Parse PSEA output file."""
fname = run_psea(pname)
start = 0
ss = ""
with open(fname) as fp:
for line in fp:
if line[0:6] == ">p-sea":
start = 1
continue
if not start:
continue
if line[0] == "\n":
break
ss = ss + line[0:-1]
return ss |
Translate PSEA secondary structure string into HEC. | def psea2HEC(pseq):
"""Translate PSEA secondary structure string into HEC."""
seq = []
for ss in pseq:
if ss == "a":
n = "H"
elif ss == "b":
n = "E"
elif ss == "c":
n = "C"
seq.append(n)
return seq |
Apply secondary structure information to residues in model. | def annotate(m, ss_seq):
"""Apply secondary structure information to residues in model."""
c = m.get_list()[0]
all = c.get_list()
residues = []
# Now remove HOH etc.
for res in all:
if is_aa(res):
residues.append(res)
L = len(residues)
if not L == len(ss_seq):
raise ValueError("Length mismatch %i %i" % (L, len(ss_seq)))
for i in range(L):
residues[i].xtra["SS_PSEA"] = ss_seq[i] |
Implement the QCP code in Python.
Input coordinate arrays must be centered at the origin and have
shape Nx3.
Variable names match (as much as possible) the C implementation. | def qcp(coords1, coords2, natoms):
"""Implement the QCP code in Python.
Input coordinate arrays must be centered at the origin and have
shape Nx3.
Variable names match (as much as possible) the C implementation.
"""
# Original code has coords1 be the mobile. I think it makes more sense
# for it to be the reference, so I swapped here.
G1 = np.trace(np.dot(coords2, coords2.T))
G2 = np.trace(np.dot(coords1, coords1.T))
A = np.dot(coords2.T, coords1) # referred to as M in the original paper.
E0 = (G1 + G2) * 0.5
Sxx, Sxy, Sxz, Syx, Syy, Syz, Szx, Szy, Szz = A.flatten()
Sxx2 = Sxx * Sxx
Syy2 = Syy * Syy
Szz2 = Szz * Szz
Sxy2 = Sxy * Sxy
Syz2 = Syz * Syz
Sxz2 = Sxz * Sxz
Syx2 = Syx * Syx
Szy2 = Szy * Szy
Szx2 = Szx * Szx
SyzSzymSyySzz2 = 2.0 * (Syz * Szy - Syy * Szz)
Sxx2Syy2Szz2Syz2Szy2 = Syy2 + Szz2 - Sxx2 + Syz2 + Szy2
C2 = -2.0 * (Sxx2 + Syy2 + Szz2 + Sxy2 + Syx2 + Sxz2 + Szx2 + Syz2 + Szy2)
C1 = 8.0 * (
Sxx * Syz * Szy
+ Syy * Szx * Sxz
+ Szz * Sxy * Syx
- Sxx * Syy * Szz
- Syz * Szx * Sxy
- Szy * Syx * Sxz
)
SxzpSzx = Sxz + Szx
SyzpSzy = Syz + Szy
SxypSyx = Sxy + Syx
SyzmSzy = Syz - Szy
SxzmSzx = Sxz - Szx
SxymSyx = Sxy - Syx
SxxpSyy = Sxx + Syy
SxxmSyy = Sxx - Syy
Sxy2Sxz2Syx2Szx2 = Sxy2 + Sxz2 - Syx2 - Szx2
negSxzpSzx = -SxzpSzx
negSxzmSzx = -SxzmSzx
negSxymSyx = -SxymSyx
SxxpSyy_p_Szz = SxxpSyy + Szz
C0 = (
Sxy2Sxz2Syx2Szx2 * Sxy2Sxz2Syx2Szx2
+ (Sxx2Syy2Szz2Syz2Szy2 + SyzSzymSyySzz2)
* (Sxx2Syy2Szz2Syz2Szy2 - SyzSzymSyySzz2)
+ (negSxzpSzx * (SyzmSzy) + (SxymSyx) * (SxxmSyy - Szz))
* (negSxzmSzx * (SyzpSzy) + (SxymSyx) * (SxxmSyy + Szz))
+ (negSxzpSzx * (SyzpSzy) - (SxypSyx) * (SxxpSyy - Szz))
* (negSxzmSzx * (SyzmSzy) - (SxypSyx) * SxxpSyy_p_Szz)
+ (+(SxypSyx) * (SyzpSzy) + (SxzpSzx) * (SxxmSyy + Szz))
* (negSxymSyx * (SyzmSzy) + (SxzpSzx) * SxxpSyy_p_Szz)
+ (+(SxypSyx) * (SyzmSzy) + (SxzmSzx) * (SxxmSyy - Szz))
* (negSxymSyx * (SyzpSzy) + (SxzmSzx) * (SxxpSyy - Szz))
)
# Find largest root of the quaternion polynomial:
# f(x) = x ** 4 + c2 * x ** 2 + c1 * x + c0 = 0 (eq. 8 of Theobald et al.)
# f'(x) = 4 * x ** 3 + 2 * c2 * x + c1
#
# using Newton-Rhapson and E0 as initial guess. Liu et al. mentions 5
# iterations are sufficient (on average) for convergence up to 1e-6
# precision but original code writes 50, which we keep.
nr_it = 50
mxEigenV = E0 # starting guess (x in eqs above)
evalprec = 1e-11 # convergence criterion
for _ in range(nr_it):
oldg = mxEigenV
x2 = mxEigenV * mxEigenV
b = (x2 + C2) * mxEigenV
a = b + C1
f = a * mxEigenV + C0
f_prime = 2.0 * x2 * mxEigenV + b + a
delta = f / (f_prime + evalprec) # avoid division by zero
mxEigenV = abs(mxEigenV - delta)
if (mxEigenV - oldg) < (evalprec * mxEigenV):
break # convergence
else:
print(f"Newton-Rhapson did not converge after {nr_it} iterations")
# The original code has a guard if minScore > 0 and rmsd < minScore, although
# the default value of minScore is -1. For simplicity, we ignore that check.
rmsd = (2.0 * abs(E0 - mxEigenV) / natoms) ** 0.5
a11 = SxxpSyy + Szz - mxEigenV
a12 = SyzmSzy
a13 = negSxzmSzx
a14 = SxymSyx
a21 = SyzmSzy
a22 = SxxmSyy - Szz - mxEigenV
a23 = SxypSyx
a24 = SxzpSzx
a31 = a13
a32 = a23
a33 = Syy - Sxx - Szz - mxEigenV
a34 = SyzpSzy
a41 = a14
a42 = a24
a43 = a34
a44 = Szz - SxxpSyy - mxEigenV
a3344_4334 = a33 * a44 - a43 * a34
a3244_4234 = a32 * a44 - a42 * a34
a3243_4233 = a32 * a43 - a42 * a33
a3143_4133 = a31 * a43 - a41 * a33
a3144_4134 = a31 * a44 - a41 * a34
a3142_4132 = a31 * a42 - a41 * a32
q1 = a22 * a3344_4334 - a23 * a3244_4234 + a24 * a3243_4233
q2 = -a21 * a3344_4334 + a23 * a3144_4134 - a24 * a3143_4133
q3 = a21 * a3244_4234 - a22 * a3144_4134 + a24 * a3142_4132
q4 = -a21 * a3243_4233 + a22 * a3143_4133 - a23 * a3142_4132
qsqr = q1 * q1 + q2 * q2 + q3 * q3 + q4 * q4
evecprec = 1e-6
if qsqr < evecprec:
q1 = a12 * a3344_4334 - a13 * a3244_4234 + a14 * a3243_4233
q2 = -a11 * a3344_4334 + a13 * a3144_4134 - a14 * a3143_4133
q3 = a11 * a3244_4234 - a12 * a3144_4134 + a14 * a3142_4132
q4 = -a11 * a3243_4233 + a12 * a3143_4133 - a13 * a3142_4132
qsqr = q1 * q1 + q2 * q2 + q3 * q3 + q4 * q4
if qsqr < evecprec:
a1324_1423 = a13 * a24 - a14 * a23
a1224_1422 = a12 * a24 - a14 * a22
a1223_1322 = a12 * a23 - a13 * a22
a1124_1421 = a11 * a24 - a14 * a21
a1123_1321 = a11 * a23 - a13 * a21
a1122_1221 = a11 * a22 - a12 * a21
q1 = a42 * a1324_1423 - a43 * a1224_1422 + a44 * a1223_1322
q2 = -a41 * a1324_1423 + a43 * a1124_1421 - a44 * a1123_1321
q3 = a41 * a1224_1422 - a42 * a1124_1421 + a44 * a1122_1221
q4 = -a41 * a1223_1322 + a42 * a1123_1321 - a43 * a1122_1221
qsqr = q1 * q1 + q2 * q2 + q3 * q3 + q4 * q4
if qsqr < evecprec:
q1 = a32 * a1324_1423 - a33 * a1224_1422 + a34 * a1223_1322
q2 = -a31 * a1324_1423 + a33 * a1124_1421 - a34 * a1123_1321
q3 = a31 * a1224_1422 - a32 * a1124_1421 + a34 * a1122_1221
q4 = -a31 * a1223_1322 + a32 * a1123_1321 - a33 * a1122_1221
qsqr = q1 * q1 + q2 * q2 + q3 * q3 + q4 * q4
if qsqr < evecprec:
rot = np.eye(3)
return rmsd, rot, [q1, q2, q3, q4]
normq = qsqr**0.5
q1 /= normq
q2 /= normq
q3 /= normq
q4 /= normq
a2 = q1 * q1
x2 = q2 * q2
y2 = q3 * q3
z2 = q4 * q4
xy = q2 * q3
az = q1 * q4
zx = q4 * q2
ay = q1 * q3
yz = q3 * q4
ax = q1 * q2
rot = np.zeros((3, 3))
rot[0][0] = a2 + x2 - y2 - z2
rot[0][1] = 2 * (xy + az)
rot[0][2] = 2 * (zx - ay)
rot[1][0] = 2 * (xy - az)
rot[1][1] = a2 - x2 + y2 - z2
rot[1][2] = 2 * (yz + ax)
rot[2][0] = 2 * (zx + ay)
rot[2][1] = 2 * (yz - ax)
rot[2][2] = a2 - x2 - y2 + z2
return rmsd, rot, (q1, q2, q3, q4) |
Translate an atom object to an atomic radius defined in MSMS (PRIVATE).
Uses information from the parent residue and the atom object to define
the atom type.
Returns the radius (float) according to the selected type:
- explicit (reads hydrogens)
- united (default) | def _get_atom_radius(atom, rtype="united"):
"""Translate an atom object to an atomic radius defined in MSMS (PRIVATE).
Uses information from the parent residue and the atom object to define
the atom type.
Returns the radius (float) according to the selected type:
- explicit (reads hydrogens)
- united (default)
"""
if rtype == "explicit":
typekey = 1
elif rtype == "united":
typekey = 2
else:
raise ValueError(
f"Radius type ({rtype!r}) not understood. Must be 'explicit' or 'united'"
)
resname = atom.parent.resname
het_atm = atom.parent.id[0]
at_name = atom.name
at_elem = atom.element
# Hydrogens
if at_elem == "H" or at_elem == "D":
return _atomic_radii[15][typekey]
# HETATMs
elif het_atm == "W" and at_elem == "O":
return _atomic_radii[2][typekey]
elif het_atm != " " and at_elem == "CA":
return _atomic_radii[18][typekey]
elif het_atm != " " and at_elem == "CD":
return _atomic_radii[22][typekey]
elif resname == "ACE" and at_name == "CA":
return _atomic_radii[9][typekey]
# Main chain atoms
elif at_name == "N":
return _atomic_radii[4][typekey]
elif at_name == "CA":
return _atomic_radii[7][typekey]
elif at_name == "C":
return _atomic_radii[10][typekey]
elif at_name == "O":
return _atomic_radii[1][typekey]
elif at_name == "P":
return _atomic_radii[13][typekey]
# CB atoms
elif at_name == "CB" and resname == "ALA":
return _atomic_radii[9][typekey]
elif at_name == "CB" and resname in {"ILE", "THR", "VAL"}:
return _atomic_radii[7][typekey]
elif at_name == "CB":
return _atomic_radii[8][typekey]
# CG atoms
elif at_name == "CG" and resname in {
"ASN",
"ASP",
"ASX",
"HIS",
"HIP",
"HIE",
"HID",
"HISN",
"HISL",
"LEU",
"PHE",
"TRP",
"TYR",
}:
return _atomic_radii[10][typekey]
elif at_name == "CG" and resname == "LEU":
return _atomic_radii[7][typekey]
elif at_name == "CG":
return _atomic_radii[8][typekey]
# General amino acids in alphabetical order
elif resname == "GLN" and at_elem == "O":
return _atomic_radii[3][typekey]
elif resname == "ACE" and at_name == "CH3":
return _atomic_radii[9][typekey]
elif resname == "ARG" and at_name == "CD":
return _atomic_radii[8][typekey]
elif resname == "ARG" and at_name in {"NE", "RE"}:
return _atomic_radii[4][typekey]
elif resname == "ARG" and at_name == "CZ":
return _atomic_radii[10][typekey]
elif resname == "ARG" and at_name.startswith(("NH", "RH")):
return _atomic_radii[5][typekey]
elif resname == "ASN" and at_name == "OD1":
return _atomic_radii[1][typekey]
elif resname == "ASN" and at_name == "ND2":
return _atomic_radii[5][typekey]
elif resname == "ASN" and at_name.startswith("AD"):
return _atomic_radii[3][typekey]
elif resname == "ASP" and at_name.startswith(("OD", "ED")):
return _atomic_radii[3][typekey]
elif resname == "ASX" and at_name.startswith("OD1"):
return _atomic_radii[1][typekey]
elif resname == "ASX" and at_name == "ND2":
return _atomic_radii[3][typekey]
elif resname == "ASX" and at_name.startswith(("OD", "AD")):
return _atomic_radii[3][typekey]
elif resname in {"CYS", "CYX", "CYM"} and at_name == "SG":
return _atomic_radii[13][typekey]
elif resname in {"CYS", "MET"} and at_name.startswith("LP"):
return _atomic_radii[13][typekey]
elif resname == "CUH" and at_name == "SG":
return _atomic_radii[12][typekey]
elif resname == "GLU" and at_name.startswith(("OE", "EE")):
return _atomic_radii[3][typekey]
elif resname in {"GLU", "GLN", "GLX"} and at_name == "CD":
return _atomic_radii[10][typekey]
elif resname == "GLN" and at_name == "OE1":
return _atomic_radii[1][typekey]
elif resname == "GLN" and at_name == "NE2":
return _atomic_radii[5][typekey]
elif resname in {"GLN", "GLX"} and at_name.startswith("AE"):
return _atomic_radii[3][typekey]
# Histdines and friends
# There are 4 kinds of HIS rings: HIS (no protons), HID (proton on Delta),
# HIE (proton on epsilon), and HIP (protons on both)
# Protonated nitrogens are numbered 4, else 14
# HIS is treated here as the same as HIE
#
# HISL is a deprotonated HIS (the L means liganded)
elif resname in {"HIS", "HID", "HIE", "HIP", "HISL"} and at_name in {"CE1", "CD2"}:
return _atomic_radii[11][typekey]
elif resname in {"HIS", "HID", "HIE", "HISL"} and at_name == "ND1":
return _atomic_radii[14][typekey]
elif resname in {"HID", "HIP"} and at_name in {"ND1", "RD1"}:
return _atomic_radii[4][typekey]
elif resname in {"HIS", "HIE", "HIP"} and at_name in {"NE2", "RE2"}:
return _atomic_radii[4][typekey]
elif resname in {"HID", "HISL"} and at_name in {"NE2", "RE2"}:
return _atomic_radii[14][typekey]
elif resname in {"HIS", "HID", "HIP", "HISL"} and at_name.startswith(("AD", "AE")):
return _atomic_radii[4][typekey]
# More amino acids
elif resname == "ILE" and at_name == "CG1":
return _atomic_radii[8][typekey]
elif resname == "ILE" and at_name == "CG2":
return _atomic_radii[9][typekey]
elif resname == "ILE" and at_name in {"CD", "CD1"}:
return _atomic_radii[9][typekey]
elif resname == "LEU" and at_name.startswith("CD"):
return _atomic_radii[9][typekey]
elif resname == "LYS" and at_name in {"CG", "CD", "CE"}:
return _atomic_radii[8][typekey]
elif resname == "LYS" and at_name in {"NZ", "KZ"}:
return _atomic_radii[6][typekey]
elif resname == "MET" and at_name == "SD":
return _atomic_radii[13][typekey]
elif resname == "MET" and at_name == "CE":
return _atomic_radii[9][typekey]
elif resname == "PHE" and at_name.startswith(("CD", "CE", "CZ")):
return _atomic_radii[11][typekey]
elif resname == "PRO" and at_name in {"CG", "CD"}:
return _atomic_radii[8][typekey]
elif resname == "CSO" and at_name in {"SE", "SEG"}:
return _atomic_radii[9][typekey]
elif resname == "CSO" and at_name.startswith("OD"):
return _atomic_radii[3][typekey]
elif resname == "SER" and at_name == "OG":
return _atomic_radii[2][typekey]
elif resname == "THR" and at_name == "OG1":
return _atomic_radii[2][typekey]
elif resname == "THR" and at_name == "CG2":
return _atomic_radii[9][typekey]
elif resname == "TRP" and at_name == "CD1":
return _atomic_radii[11][typekey]
elif resname == "TRP" and at_name in {"CD2", "CE2"}:
return _atomic_radii[10][typekey]
elif resname == "TRP" and at_name == "NE1":
return _atomic_radii[4][typekey]
elif resname == "TRP" and at_name in {"CE3", "CZ2", "CZ3", "CH2"}:
return _atomic_radii[11][typekey]
elif resname == "TYR" and at_name in {"CD1", "CD2", "CE1", "CE2"}:
return _atomic_radii[11][typekey]
elif resname == "TYR" and at_name == "CZ":
return _atomic_radii[10][typekey]
elif resname == "TYR" and at_name == "OH":
return _atomic_radii[2][typekey]
elif resname == "VAL" and at_name in {"CG1", "CG2"}:
return _atomic_radii[9][typekey]
elif at_name == "CD":
return _atomic_radii[8][typekey]
# Co-factors, and other weirdos
elif (
resname in {"FS3", "FS4"}
and at_name.startswith("FE")
and at_name.endswith(("1", "2", "3", "4", "5", "6", "7"))
):
return _atomic_radii[21][typekey]
elif (
resname in {"FS3", "FS4"}
and at_name.startswith("S")
and at_name.endswith(("1", "2", "3", "4", "5", "6", "7"))
):
return _atomic_radii[13][typekey]
elif resname == "FS3" and at_name == "OXO":
return _atomic_radii[1][typekey]
elif resname == "FEO" and at_name in {"FE1", "FE2"}:
return _atomic_radii[21][typekey]
elif resname == "HEM" and at_name in {"O1", "O2"}:
return _atomic_radii[1][typekey]
elif resname == "HEM" and at_name == "FE":
return _atomic_radii[21][typekey]
elif resname == "HEM" and at_name in {
"CHA",
"CHB",
"CHC",
"CHD",
"CAB",
"CAC",
"CBB",
"CBC",
}:
return _atomic_radii[11][typekey]
elif resname == "HEM" and at_name in {
"NA",
"NB",
"NC",
"ND",
"N A",
"N B",
"N C",
"N D",
}:
return _atomic_radii[14][typekey]
elif resname == "HEM" and at_name in {
"C1A",
"C1B",
"C1C",
"C1D",
"C2A",
"C2B",
"C2C",
"C2D",
"C3A",
"C3B",
"C3C",
"C3D",
"C4A",
"C4B",
"C4C",
"C4D",
"CGA",
"CGD",
}:
return _atomic_radii[10][typekey]
elif resname == "HEM" and at_name in {"CMA", "CMB", "CMC", "CMD"}:
return _atomic_radii[9][typekey]
elif resname == "HEM" and at_name == "OH2":
return _atomic_radii[2][typekey]
elif resname == "AZI" and at_name in {"N1", "N2", "N3"}:
return _atomic_radii[14][typekey]
elif resname == "MPD" and at_name in {"C1", "C5", "C6"}:
return _atomic_radii[9][typekey]
elif resname == "MPD" and at_name == "C2":
return _atomic_radii[10][typekey]
elif resname == "MPD" and at_name == "C3":
return _atomic_radii[8][typekey]
elif resname == "MPD" and at_name == "C4":
return _atomic_radii[7][typekey]
elif resname == "MPD" and at_name in {"O7", "O8"}:
return _atomic_radii[2][typekey]
elif resname in {"SO4", "SUL"} and at_name == "S":
return _atomic_radii[13][typekey]
elif resname in {"SO4", "SUL", "PO4", "PHO"} and at_name in {
"O1",
"O2",
"O3",
"O4",
}:
return _atomic_radii[3][typekey]
elif resname == "PC " and at_name in {"O1", "O2", "O3", "O4"}:
return _atomic_radii[3][typekey]
elif resname == "PC " and at_name == "P1":
return _atomic_radii[13][typekey]
elif resname == "PC " and at_name in {"C1", "C2"}:
return _atomic_radii[8][typekey]
elif resname == "PC " and at_name in {"C3", "C4", "C5"}:
return _atomic_radii[9][typekey]
elif resname == "PC " and at_name == "N1":
return _atomic_radii[14][typekey]
elif resname == "BIG" and at_name == "BAL":
return _atomic_radii[17][typekey]
elif resname in {"POI", "DOT"} and at_name in {"POI", "DOT"}:
return _atomic_radii[23][typekey]
elif resname == "FMN" and at_name in {"N1", "N5", "N10"}:
return _atomic_radii[4][typekey]
elif resname == "FMN" and at_name in {
"C2",
"C4",
"C7",
"C8",
"C10",
"C4A",
"C5A",
"C9A",
}:
return _atomic_radii[10][typekey]
elif resname == "FMN" and at_name in {"O2", "O4"}:
return _atomic_radii[1][typekey]
elif resname == "FMN" and at_name == "N3":
return _atomic_radii[14][typekey]
elif resname == "FMN" and at_name in {"C6", "C9"}:
return _atomic_radii[11][typekey]
elif resname == "FMN" and at_name in {"C7M", "C8M"}:
return _atomic_radii[9][typekey]
elif resname == "FMN" and at_name.startswith(("C1", "C2", "C3", "C4", "C5")):
return _atomic_radii[8][typekey]
elif resname == "FMN" and at_name.startswith(("O2", "O3", "O4")):
return _atomic_radii[2][typekey]
elif resname == "FMN" and at_name.startswith("O5"):
return _atomic_radii[3][typekey]
elif resname == "FMN" and at_name in {"OP1", "OP2", "OP3"}:
return _atomic_radii[3][typekey]
elif resname in {"ALK", "MYR"} and at_name == "OT1":
return _atomic_radii[3][typekey]
elif resname in {"ALK", "MYR"} and at_name == "C01":
return _atomic_radii[10][typekey]
elif resname == "ALK" and at_name == "C16":
return _atomic_radii[9][typekey]
elif resname == "MYR" and at_name == "C14":
return _atomic_radii[9][typekey]
elif resname in {"ALK", "MYR"} and at_name.startswith("C"):
return _atomic_radii[8][typekey]
# Metals
elif at_elem == "CU":
return _atomic_radii[20][typekey]
elif at_elem == "ZN":
return _atomic_radii[19][typekey]
elif at_elem == "MN":
return _atomic_radii[27][typekey]
elif at_elem == "FE":
return _atomic_radii[25][typekey]
elif at_elem == "MG":
return _atomic_radii[26][typekey]
elif at_elem == "CO":
return _atomic_radii[28][typekey]
elif at_elem == "SE":
return _atomic_radii[29][typekey]
elif at_elem == "YB":
return _atomic_radii[31][typekey]
# Others
elif at_name == "SEG":
return _atomic_radii[9][typekey]
elif at_name == "OXT":
return _atomic_radii[3][typekey]
# Catch-alls
elif at_name.startswith(("OT", "E")):
return _atomic_radii[3][typekey]
elif at_name.startswith("S"):
return _atomic_radii[13][typekey]
elif at_name.startswith("C"):
return _atomic_radii[7][typekey]
elif at_name.startswith("A"):
return _atomic_radii[11][typekey]
elif at_name.startswith("O"):
return _atomic_radii[1][typekey]
elif at_name.startswith(("N", "R")):
return _atomic_radii[4][typekey]
elif at_name.startswith("K"):
return _atomic_radii[6][typekey]
elif at_name in {"PA", "PB", "PC", "PD"}:
return _atomic_radii[13][typekey]
elif at_name.startswith("P"):
return _atomic_radii[13][typekey]
elif resname in {"FAD", "NAD", "AMX", "APU"} and at_name.startswith("O"):
return _atomic_radii[1][typekey]
elif resname in {"FAD", "NAD", "AMX", "APU"} and at_name.startswith("N"):
return _atomic_radii[4][typekey]
elif resname in {"FAD", "NAD", "AMX", "APU"} and at_name.startswith("C"):
return _atomic_radii[7][typekey]
elif resname in {"FAD", "NAD", "AMX", "APU"} and at_name.startswith("P"):
return _atomic_radii[13][typekey]
elif resname in {"FAD", "NAD", "AMX", "APU"} and at_name.startswith("H"):
return _atomic_radii[15][typekey]
else:
warnings.warn(f"{at_name}:{resname} not in radii library.", BiopythonWarning)
return 0.01 |
Read the vertex list into a NumPy array (PRIVATE). | def _read_vertex_array(filename):
"""Read the vertex list into a NumPy array (PRIVATE)."""
with open(filename) as fp:
vertex_list = []
for line in fp:
sl = line.split()
if len(sl) != 9:
# skip header
continue
vl = [float(x) for x in sl[0:3]]
vertex_list.append(vl)
return np.array(vertex_list) |
Represent molecular surface as a vertex list array.
Return a NumPy array that represents the vertex list of the
molecular surface.
Arguments:
- model - BioPython PDB model object (used to get atoms for input model)
- MSMS - msms executable (used as argument to subprocess.call) | def get_surface(model, MSMS="msms"):
"""Represent molecular surface as a vertex list array.
Return a NumPy array that represents the vertex list of the
molecular surface.
Arguments:
- model - BioPython PDB model object (used to get atoms for input model)
- MSMS - msms executable (used as argument to subprocess.call)
"""
# Replace pdb_to_xyzr
# Make x,y,z,radius file
atom_list = Selection.unfold_entities(model, "A")
xyz_tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(xyz_tmp, "w") as pdb_to_xyzr:
for atom in atom_list:
x, y, z = atom.coord
radius = _get_atom_radius(atom, rtype="united")
pdb_to_xyzr.write(f"{x:6.3f}\t{y:6.3f}\t{z:6.3f}\t{radius:1.2f}\n")
# Make surface
surface_tmp = tempfile.NamedTemporaryFile(delete=False).name
msms_tmp = tempfile.NamedTemporaryFile(delete=False).name
MSMS = MSMS + " -probe_radius 1.5 -if %s -of %s > " + msms_tmp
make_surface = MSMS % (xyz_tmp, surface_tmp)
subprocess.call(make_surface, shell=True)
face_file = surface_tmp + ".face"
surface_file = surface_tmp + ".vert"
if not os.path.isfile(surface_file):
raise RuntimeError(
f"Failed to generate surface file using command:\n{make_surface}"
)
# Read surface vertices from vertex file
surface = _read_vertex_array(surface_file)
# Remove temporary files
for fn in [xyz_tmp, surface_tmp, msms_tmp, face_file, surface_file]:
try:
os.remove(fn)
except OSError:
pass
return surface |
Return minimum distance between coord and surface. | def min_dist(coord, surface):
"""Return minimum distance between coord and surface."""
d = surface - coord
d2 = np.sum(d * d, 1)
return np.sqrt(min(d2)) |
Residue depth as average depth of all its atoms.
Return average distance to surface for all atoms in a residue,
ie. the residue depth. | def residue_depth(residue, surface):
"""Residue depth as average depth of all its atoms.
Return average distance to surface for all atoms in a residue,
ie. the residue depth.
"""
atom_list = residue.get_unpacked_list()
length = len(atom_list)
d = 0
for atom in atom_list:
coord = atom.get_coord()
d = d + min_dist(coord, surface)
return d / length |
Return CA depth. | def ca_depth(residue, surface):
"""Return CA depth."""
if not residue.has_id("CA"):
return None
ca = residue["CA"]
coord = ca.get_coord()
return min_dist(coord, surface) |
Write hedron assembly to file as OpenSCAD matrices.
This routine calls both :meth:`.IC_Chain.internal_to_atom_coordinates` and
:meth:`.IC_Chain.atom_to_internal_coordinates` due to requirements for
scaling, explicit bonds around rings, and setting the coordinate space of
the output model.
Output data format is primarily:
- matrix for each hedron:
len1, angle2, len3, atom covalent bond class, flags to indicate
atom/bond represented in previous hedron (OpenSCAD very slow with
redundant overlapping elements), flags for bond features
- transform matrices to assemble each hedron into residue dihedra sets
- transform matrices for each residue to position in chain
OpenSCAD software is included in this Python file to process these
matrices into a model suitable for a 3D printing project.
:param entity: Biopython PDB :class:`.Structure` entity
structure data to export
:param file: Bipoython :func:`.as_handle` filename or open file pointer
file to write data to
:param float scale:
units (usually mm) per angstrom for STL output, written in output
:param str pdbid:
PDB idcode, written in output. Defaults to '0PDB' if not supplied
and no 'idcode' set in entity
:param bool backboneOnly: default False.
Do not output side chain data past Cbeta if True
:param bool includeCode: default True.
Include OpenSCAD software (inline below) so output file can be loaded
into OpenSCAD; if False, output data matrices only
:param float maxPeptideBond: Optional default None.
Override the cut-off in IC_Chain class (default 1.4) for detecting
chain breaks. If your target has chain breaks, pass a large number
here to create a very long 'bond' spanning the break.
:param int start,fin: default None
Parameters for internal_to_atom_coords() to limit chain segment.
:param str handle: default 'protein'
name for top level of generated OpenSCAD matrix structure
See :meth:`.IC_Residue.set_flexible` to set flags for specific residues to
have rotatable bonds, and :meth:`.IC_Residue.set_hbond` to include cavities
for small magnets to work as hydrogen bonds.
See <https://www.thingiverse.com/thing:3957471> for implementation example.
The OpenSCAD code explicitly creates spheres and cylinders to
represent atoms and bonds in a 3D model. Options are available
to support rotatable bonds and magnetic hydrogen bonds.
Matrices are written to link, enumerate and describe residues,
dihedra, hedra, and chains, mirroring contents of the relevant IC_*
data structures.
The OpenSCAD matrix of hedra has additional information as follows:
* the atom and bond state (single, double, resonance) are logged
so that covalent radii may be used for atom spheres in the 3D models
* bonds and atoms are tracked so that each is only created once
* bond options for rotation and magnet holders for hydrogen bonds
may be specified (see :meth:`.IC_Residue.set_flexible` and
:meth:`.IC_Residue.set_hbond` )
Note the application of :data:`Bio.PDB.internal_coords.IC_Chain.MaxPeptideBond`
: missing residues may be linked (joining chain segments with arbitrarily
long bonds) by setting this to a large value.
Note this uses the serial assembly per residue, placing each residue at
the origin and supplying the coordinate space transform to OpenaSCAD
All ALTLOC (disordered) residues and atoms are written to the output
model. (see :data:`Bio.PDB.internal_coords.IC_Residue.no_altloc`) | def write_SCAD(
entity,
file,
scale=None,
pdbid=None,
backboneOnly=False,
includeCode=True,
maxPeptideBond=None,
start=None,
fin=None,
handle="protein",
):
"""Write hedron assembly to file as OpenSCAD matrices.
This routine calls both :meth:`.IC_Chain.internal_to_atom_coordinates` and
:meth:`.IC_Chain.atom_to_internal_coordinates` due to requirements for
scaling, explicit bonds around rings, and setting the coordinate space of
the output model.
Output data format is primarily:
- matrix for each hedron:
len1, angle2, len3, atom covalent bond class, flags to indicate
atom/bond represented in previous hedron (OpenSCAD very slow with
redundant overlapping elements), flags for bond features
- transform matrices to assemble each hedron into residue dihedra sets
- transform matrices for each residue to position in chain
OpenSCAD software is included in this Python file to process these
matrices into a model suitable for a 3D printing project.
:param entity: Biopython PDB :class:`.Structure` entity
structure data to export
:param file: Bipoython :func:`.as_handle` filename or open file pointer
file to write data to
:param float scale:
units (usually mm) per angstrom for STL output, written in output
:param str pdbid:
PDB idcode, written in output. Defaults to '0PDB' if not supplied
and no 'idcode' set in entity
:param bool backboneOnly: default False.
Do not output side chain data past Cbeta if True
:param bool includeCode: default True.
Include OpenSCAD software (inline below) so output file can be loaded
into OpenSCAD; if False, output data matrices only
:param float maxPeptideBond: Optional default None.
Override the cut-off in IC_Chain class (default 1.4) for detecting
chain breaks. If your target has chain breaks, pass a large number
here to create a very long 'bond' spanning the break.
:param int start,fin: default None
Parameters for internal_to_atom_coords() to limit chain segment.
:param str handle: default 'protein'
name for top level of generated OpenSCAD matrix structure
See :meth:`.IC_Residue.set_flexible` to set flags for specific residues to
have rotatable bonds, and :meth:`.IC_Residue.set_hbond` to include cavities
for small magnets to work as hydrogen bonds.
See <https://www.thingiverse.com/thing:3957471> for implementation example.
The OpenSCAD code explicitly creates spheres and cylinders to
represent atoms and bonds in a 3D model. Options are available
to support rotatable bonds and magnetic hydrogen bonds.
Matrices are written to link, enumerate and describe residues,
dihedra, hedra, and chains, mirroring contents of the relevant IC_*
data structures.
The OpenSCAD matrix of hedra has additional information as follows:
* the atom and bond state (single, double, resonance) are logged
so that covalent radii may be used for atom spheres in the 3D models
* bonds and atoms are tracked so that each is only created once
* bond options for rotation and magnet holders for hydrogen bonds
may be specified (see :meth:`.IC_Residue.set_flexible` and
:meth:`.IC_Residue.set_hbond` )
Note the application of :data:`Bio.PDB.internal_coords.IC_Chain.MaxPeptideBond`
: missing residues may be linked (joining chain segments with arbitrarily
long bonds) by setting this to a large value.
Note this uses the serial assembly per residue, placing each residue at
the origin and supplying the coordinate space transform to OpenaSCAD
All ALTLOC (disordered) residues and atoms are written to the output
model. (see :data:`Bio.PDB.internal_coords.IC_Residue.no_altloc`)
"""
if maxPeptideBond is not None:
mpbStash = IC_Chain.MaxPeptideBond
IC_Chain.MaxPeptideBond = float(maxPeptideBond)
# step one need IC_Residue atom_coords loaded in order to scale
# so if no internal_coords, initialise from Atom coordinates
added_IC_Atoms = False
if "S" == entity.level or "M" == entity.level:
for chn in entity.get_chains():
if not chn.internal_coord:
chn.internal_coord = IC_Chain(chn)
added_IC_Atoms = True
elif "C" == entity.level:
if not entity.internal_coord: # entity.internal_coord:
entity.internal_coord = IC_Chain(entity)
added_IC_Atoms = True
else:
raise PDBException("level not S, M or C: " + str(entity.level))
if added_IC_Atoms:
# if loaded pdb, need to scale, and asm, gen atomArray
entity.atom_to_internal_coordinates()
else:
# if loaded pic file and need to scale, generate atom coords
entity.internal_to_atom_coordinates(None)
if scale is not None:
scaleMtx = homog_scale_mtx(scale)
if "C" == entity.level:
entity.internal_coord.atomArray = np.dot(
entity.internal_coord.atomArray[:], scaleMtx
)
entity.internal_coord.hAtoms_needs_update[:] = True
entity.internal_coord.scale = scale
else:
for chn in entity.get_chains():
if hasattr(chn.internal_coord, "atomArray"):
chn.internal_coord.atomArray = np.dot(
chn.internal_coord.atomArray[:], scaleMtx
)
chn.internal_coord.hAtoms_needs_update[:] = True
chn.internal_coord.scale = scale
# generate internal coords for scaled entity
# (hedron bond lengths have changed if scaled)
# if not scaling, still need to generate internal coordinate
# bonds for ring sidechains
# AllBonds is a class attribute for IC_Residue.atom_to_internal_coordinates
# to generate explicit hedra covering all bonds
allBondsStash = IC_Residue._AllBonds
IC_Residue._AllBonds = True
# trigger rebuild of hedra for AllBonds
if "C" == entity.level:
entity.internal_coord.ordered_aa_ic_list[0].hedra = {}
delattr(entity.internal_coord, "hAtoms_needs_update")
delattr(entity.internal_coord, "hedraLen")
else:
for chn in entity.get_chains():
chn.internal_coord.ordered_aa_ic_list[0].hedra = {}
delattr(chn.internal_coord, "hAtoms_needs_update")
delattr(chn.internal_coord, "hedraLen")
entity.atom_to_internal_coordinates()
IC_Residue._AllBonds = allBondsStash
# rebuild atom coordinates now with chain starting at origin: in OpenSCAD
# code, each residue model is transformed to N-Ca-C start position instead
# of updating transform matrix along chain
entity.internal_to_atom_coordinates()
with as_handle(file, "w") as fp:
if includeCode:
fp.write(peptide_scad)
if not pdbid and hasattr(entity, "header"):
pdbid = entity.header.get("idcode", None)
if pdbid is None or "" == pdbid:
pdbid = "0PDB"
fp.write(
'protein = [ "' + pdbid + '", ' + str(scale) + ", // ID, protein_scale\n"
)
if "S" == entity.level or "M" == entity.level:
for chn in entity.get_chains():
fp.write(" [\n")
chn.internal_coord._write_SCAD(
fp, backboneOnly=backboneOnly, start=start, fin=fin
)
fp.write(" ]\n")
elif "C" == entity.level:
fp.write(" [\n")
entity.internal_coord._write_SCAD(
fp, backboneOnly=backboneOnly, start=start, fin=fin
)
fp.write(" ]\n")
elif "R" == entity.level:
raise NotImplementedError("writescad single residue not yet implemented.")
fp.write("\n];\n")
if maxPeptideBond is not None:
IC_Chain.MaxPeptideBond = mpbStash |
Return a list of the unique items in the given iterable.
Order is NOT preserved. | def uniqueify(items):
"""Return a list of the unique items in the given iterable.
Order is NOT preserved.
"""
return list(set(items)) |
Translate a list of entities to a list of their (unique) parents. | def get_unique_parents(entity_list):
"""Translate a list of entities to a list of their (unique) parents."""
unique_parents = {entity.get_parent() for entity in entity_list}
return list(unique_parents) |
Unfold entities list to a child level (e.g. residues in chain).
Unfold a list of entities to a list of entities of another
level. E.g.:
list of atoms -> list of residues
list of modules -> list of atoms
list of residues -> list of chains
- entity_list - list of entities or a single entity
- target_level - char (A, R, C, M, S)
Note that if entity_list is an empty list, you get an empty list back:
>>> unfold_entities([], "A")
[] | def unfold_entities(entity_list, target_level):
"""Unfold entities list to a child level (e.g. residues in chain).
Unfold a list of entities to a list of entities of another
level. E.g.:
list of atoms -> list of residues
list of modules -> list of atoms
list of residues -> list of chains
- entity_list - list of entities or a single entity
- target_level - char (A, R, C, M, S)
Note that if entity_list is an empty list, you get an empty list back:
>>> unfold_entities([], "A")
[]
"""
if target_level not in entity_levels:
raise PDBException(f"{target_level}: Not an entity level.")
if entity_list == []:
return []
if isinstance(entity_list, (Entity, Atom)):
entity_list = [entity_list]
level = entity_list[0].get_level()
if not all(entity.get_level() == level for entity in entity_list):
raise PDBException("Entity list is not homogeneous.")
target_index = entity_levels.index(target_level)
level_index = entity_levels.index(level)
if level_index == target_index: # already right level
return entity_list
entities = entity_list
if level_index > target_index: # we're going down, e.g. S->A
for i in range(target_index, level_index):
entities = itertools.chain.from_iterable(entities)
else: # we're going up, e.g. A->S
for i in range(level_index, target_index):
# get unique parents by removing duplicates while preserving order
entities = {entity.get_parent(): None for entity in entities}
return list(entities) |
Return whether all atoms in the residue have a non-blank altloc (PRIVATE). | def _is_completely_disordered(residue: Residue) -> bool:
"""Return whether all atoms in the residue have a non-blank altloc (PRIVATE)."""
atom_list = residue.get_unpacked_list()
for atom in atom_list:
altloc = atom.get_altloc()
if altloc == " ":
return False
return True |
Return angles, axis pair that corresponds to rotation matrix m.
The case where ``m`` is the identity matrix corresponds to a singularity
where any rotation axis is valid. In that case, ``Vector([1, 0, 0])``,
is returned. | def m2rotaxis(m):
"""Return angles, axis pair that corresponds to rotation matrix m.
The case where ``m`` is the identity matrix corresponds to a singularity
where any rotation axis is valid. In that case, ``Vector([1, 0, 0])``,
is returned.
"""
eps = 1e-5
# Check for singularities a la
# http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToAngle/ # noqa
if (
abs(m[0, 1] - m[1, 0]) < eps
and abs(m[0, 2] - m[2, 0]) < eps
and abs(m[1, 2] - m[2, 1]) < eps
):
# Singularity encountered. Check if its 0 or 180 deg
if (
abs(m[0, 1] + m[1, 0]) < eps
and abs(m[0, 2] + m[2, 0]) < eps
and abs(m[1, 2] + m[2, 1]) < eps
and abs(m[0, 0] + m[1, 1] + m[2, 2] - 3) < eps
):
angle = 0
else:
angle = np.pi
else:
# Angle always between 0 and pi
# Sense of rotation is defined by axis orientation
t = 0.5 * (np.trace(m) - 1)
t = max(-1, t)
t = min(1, t)
angle = np.arccos(t)
if angle < 1e-15:
# Angle is 0
return 0.0, Vector(1, 0, 0)
elif angle < np.pi:
# Angle is smaller than pi
x = m[2, 1] - m[1, 2]
y = m[0, 2] - m[2, 0]
z = m[1, 0] - m[0, 1]
axis = Vector(x, y, z)
axis.normalize()
return angle, axis
else:
# Angle is pi - special case!
m00 = m[0, 0]
m11 = m[1, 1]
m22 = m[2, 2]
if m00 > m11 and m00 > m22:
x = np.sqrt(m00 - m11 - m22 + 0.5)
y = m[0, 1] / (2 * x)
z = m[0, 2] / (2 * x)
elif m11 > m00 and m11 > m22:
y = np.sqrt(m11 - m00 - m22 + 0.5)
x = m[0, 1] / (2 * y)
z = m[1, 2] / (2 * y)
else:
z = np.sqrt(m22 - m00 - m11 + 0.5)
x = m[0, 2] / (2 * z)
y = m[1, 2] / (2 * z)
axis = Vector(x, y, z)
axis.normalize()
return np.pi, axis |
Vector to axis method.
Return the vector between a point and
the closest point on a line (ie. the perpendicular
projection of the point on the line).
:type line: L{Vector}
:param line: vector defining a line
:type point: L{Vector}
:param point: vector defining the point | def vector_to_axis(line, point):
"""Vector to axis method.
Return the vector between a point and
the closest point on a line (ie. the perpendicular
projection of the point on the line).
:type line: L{Vector}
:param line: vector defining a line
:type point: L{Vector}
:param point: vector defining the point
"""
line = line.normalized()
np = point.norm()
angle = line.angle(point)
return point - line ** (np * np.cos(angle)) |
Calculate left multiplying rotation matrix.
Calculate a left multiplying rotation matrix that rotates
theta rad around vector.
:type theta: float
:param theta: the rotation angle
:type vector: L{Vector}
:param vector: the rotation axis
:return: The rotation matrix, a 3x3 NumPy array.
Examples
--------
>>> from numpy import pi
>>> from Bio.PDB.vectors import rotaxis2m
>>> from Bio.PDB.vectors import Vector
>>> m = rotaxis2m(pi, Vector(1, 0, 0))
>>> Vector(1, 2, 3).left_multiply(m)
<Vector 1.00, -2.00, -3.00> | def rotaxis2m(theta, vector):
"""Calculate left multiplying rotation matrix.
Calculate a left multiplying rotation matrix that rotates
theta rad around vector.
:type theta: float
:param theta: the rotation angle
:type vector: L{Vector}
:param vector: the rotation axis
:return: The rotation matrix, a 3x3 NumPy array.
Examples
--------
>>> from numpy import pi
>>> from Bio.PDB.vectors import rotaxis2m
>>> from Bio.PDB.vectors import Vector
>>> m = rotaxis2m(pi, Vector(1, 0, 0))
>>> Vector(1, 2, 3).left_multiply(m)
<Vector 1.00, -2.00, -3.00>
"""
vector = vector.normalized()
c = np.cos(theta)
s = np.sin(theta)
t = 1 - c
x, y, z = vector.get_array()
rot = np.zeros((3, 3))
# 1st row
rot[0, 0] = t * x * x + c
rot[0, 1] = t * x * y - s * z
rot[0, 2] = t * x * z + s * y
# 2nd row
rot[1, 0] = t * x * y + s * z
rot[1, 1] = t * y * y + c
rot[1, 2] = t * y * z - s * x
# 3rd row
rot[2, 0] = t * x * z - s * y
rot[2, 1] = t * y * z + s * x
rot[2, 2] = t * z * z + c
return rot |
Return a (left multiplying) matrix that mirrors p onto q.
:type p,q: L{Vector}
:return: The mirror operation, a 3x3 NumPy array.
Examples
--------
>>> from Bio.PDB.vectors import refmat
>>> p, q = Vector(1, 2, 3), Vector(2, 3, 5)
>>> mirror = refmat(p, q)
>>> qq = p.left_multiply(mirror)
>>> print(q)
<Vector 2.00, 3.00, 5.00>
>>> print(qq)
<Vector 1.21, 1.82, 3.03> | def refmat(p, q):
"""Return a (left multiplying) matrix that mirrors p onto q.
:type p,q: L{Vector}
:return: The mirror operation, a 3x3 NumPy array.
Examples
--------
>>> from Bio.PDB.vectors import refmat
>>> p, q = Vector(1, 2, 3), Vector(2, 3, 5)
>>> mirror = refmat(p, q)
>>> qq = p.left_multiply(mirror)
>>> print(q)
<Vector 2.00, 3.00, 5.00>
>>> print(qq)
<Vector 1.21, 1.82, 3.03>
"""
p = p.normalized()
q = q.normalized()
if (p - q).norm() < 1e-5:
return np.identity(3)
pq = p - q
pq.normalize()
b = pq.get_array()
b.shape = (3, 1)
i = np.identity(3)
ref = i - 2 * np.dot(b, np.transpose(b))
return ref |
Return a (left multiplying) matrix that rotates p onto q.
:param p: moving vector
:type p: L{Vector}
:param q: fixed vector
:type q: L{Vector}
:return: rotation matrix that rotates p onto q
:rtype: 3x3 NumPy array
Examples
--------
>>> from Bio.PDB.vectors import rotmat
>>> p, q = Vector(1, 2, 3), Vector(2, 3, 5)
>>> r = rotmat(p, q)
>>> print(q)
<Vector 2.00, 3.00, 5.00>
>>> print(p)
<Vector 1.00, 2.00, 3.00>
>>> p.left_multiply(r)
<Vector 1.21, 1.82, 3.03> | def rotmat(p, q):
"""Return a (left multiplying) matrix that rotates p onto q.
:param p: moving vector
:type p: L{Vector}
:param q: fixed vector
:type q: L{Vector}
:return: rotation matrix that rotates p onto q
:rtype: 3x3 NumPy array
Examples
--------
>>> from Bio.PDB.vectors import rotmat
>>> p, q = Vector(1, 2, 3), Vector(2, 3, 5)
>>> r = rotmat(p, q)
>>> print(q)
<Vector 2.00, 3.00, 5.00>
>>> print(p)
<Vector 1.00, 2.00, 3.00>
>>> p.left_multiply(r)
<Vector 1.21, 1.82, 3.03>
"""
rot = np.dot(refmat(q, -p), refmat(p, -p))
return rot |
Calculate angle method.
Calculate the angle between 3 vectors
representing 3 connected points.
:param v1, v2, v3: the tree points that define the angle
:type v1, v2, v3: L{Vector}
:return: angle
:rtype: float | def calc_angle(v1, v2, v3):
"""Calculate angle method.
Calculate the angle between 3 vectors
representing 3 connected points.
:param v1, v2, v3: the tree points that define the angle
:type v1, v2, v3: L{Vector}
:return: angle
:rtype: float
"""
v1 = v1 - v2
v3 = v3 - v2
return v1.angle(v3) |
Calculate dihedral angle method.
Calculate the dihedral angle between 4 vectors
representing 4 connected points. The angle is in
]-pi, pi].
:param v1, v2, v3, v4: the four points that define the dihedral angle
:type v1, v2, v3, v4: L{Vector} | def calc_dihedral(v1, v2, v3, v4):
"""Calculate dihedral angle method.
Calculate the dihedral angle between 4 vectors
representing 4 connected points. The angle is in
]-pi, pi].
:param v1, v2, v3, v4: the four points that define the dihedral angle
:type v1, v2, v3, v4: L{Vector}
"""
ab = v1 - v2
cb = v3 - v2
db = v4 - v3
u = ab**cb
v = db**cb
w = u**v
angle = u.angle(v)
# Determine sign of angle
try:
if cb.angle(w) > 0.001:
angle = -angle
except ZeroDivisionError:
# dihedral=pi
pass
return angle |
Generate a 4x4 single-axis NumPy rotation matrix.
:param float angle_rads: the desired rotation angle in radians
:param char axis: character specifying the rotation axis | def homog_rot_mtx(angle_rads: float, axis: str) -> np.ndarray:
"""Generate a 4x4 single-axis NumPy rotation matrix.
:param float angle_rads: the desired rotation angle in radians
:param char axis: character specifying the rotation axis
"""
cosang = np.cos(angle_rads)
sinang = np.sin(angle_rads)
if "z" == axis:
return np.array(
(
(cosang, -sinang, 0, 0),
(sinang, cosang, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
),
dtype=np.float64,
)
elif "y" == axis:
return np.array(
(
(cosang, 0, sinang, 0),
(0, 1, 0, 0),
(-sinang, 0, cosang, 0),
(0, 0, 0, 1),
),
dtype=np.float64,
)
else:
return np.array(
(
(1, 0, 0, 0),
(0, cosang, -sinang, 0),
(0, sinang, cosang, 0),
(0, 0, 0, 1),
),
dtype=np.float64,
) |
Update existing Z rotation matrix to new angle. | def set_Z_homog_rot_mtx(angle_rads: float, mtx: np.ndarray):
"""Update existing Z rotation matrix to new angle."""
cosang = np.cos(angle_rads)
sinang = np.sin(angle_rads)
mtx[0][0] = mtx[1][1] = cosang
mtx[1][0] = sinang
mtx[0][1] = -sinang |
Update existing Y rotation matrix to new angle. | def set_Y_homog_rot_mtx(angle_rads: float, mtx: np.ndarray):
"""Update existing Y rotation matrix to new angle."""
cosang = np.cos(angle_rads)
sinang = np.sin(angle_rads)
mtx[0][0] = mtx[2][2] = cosang
mtx[0][2] = sinang
mtx[2][0] = -sinang |
Update existing X rotation matrix to new angle. | def set_X_homog_rot_mtx(angle_rads: float, mtx: np.ndarray):
"""Update existing X rotation matrix to new angle."""
cosang = np.cos(angle_rads)
sinang = np.sin(angle_rads)
mtx[1][1] = mtx[2][2] = cosang
mtx[2][1] = sinang
mtx[1][2] = -sinang |
Generate a 4x4 NumPy translation matrix.
:param x, y, z: translation in each axis | def homog_trans_mtx(x: float, y: float, z: float) -> np.ndarray:
"""Generate a 4x4 NumPy translation matrix.
:param x, y, z: translation in each axis
"""
return np.array(
((1, 0, 0, x), (0, 1, 0, y), (0, 0, 1, z), (0, 0, 0, 1)),
dtype=np.float64,
) |
Update existing translation matrix to new values. | def set_homog_trans_mtx(x: float, y: float, z: float, mtx: np.ndarray):
"""Update existing translation matrix to new values."""
mtx[0][3] = x
mtx[1][3] = y
mtx[2][3] = z |
Generate a 4x4 NumPy scaling matrix.
:param float scale: scale multiplier | def homog_scale_mtx(scale: float) -> np.ndarray:
"""Generate a 4x4 NumPy scaling matrix.
:param float scale: scale multiplier
"""
return np.array(
[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]],
dtype=np.float64,
) |
Compute spherical coordinates (r, azimuth, polar_angle) for X,Y,Z point.
:param array xyz: column vector (3 row x 1 column NumPy array)
:return: tuple of r, azimuth, polar_angle for input coordinate | def get_spherical_coordinates(xyz: np.ndarray) -> Tuple[float, float, float]:
"""Compute spherical coordinates (r, azimuth, polar_angle) for X,Y,Z point.
:param array xyz: column vector (3 row x 1 column NumPy array)
:return: tuple of r, azimuth, polar_angle for input coordinate
"""
r = float(np.linalg.norm(xyz))
if 0 == r:
return (0, 0, 0)
azimuth = _get_azimuth(xyz[0], xyz[1])
polar_angle: float = np.arccos(xyz[2] / r)
return (r, azimuth, polar_angle) |
Generate transformation matrix to coordinate space defined by 3 points.
New coordinate space will have:
acs[0] on XZ plane
acs[1] origin
acs[2] on +Z axis
:param NumPy column array x3 acs: X,Y,Z column input coordinates x3
:param bool rev: if True, also return reverse transformation matrix
(to return from coord_space)
:returns: 4x4 NumPy array, x2 if rev=True | def coord_space(
a0: np.ndarray, a1: np.ndarray, a2: np.ndarray, rev: bool = False
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Generate transformation matrix to coordinate space defined by 3 points.
New coordinate space will have:
acs[0] on XZ plane
acs[1] origin
acs[2] on +Z axis
:param NumPy column array x3 acs: X,Y,Z column input coordinates x3
:param bool rev: if True, also return reverse transformation matrix
(to return from coord_space)
:returns: 4x4 NumPy array, x2 if rev=True
"""
# dbg = False
# if dbg:
# print(a0.transpose())
# print(a1.transpose())
# print(a2.transpose())
# a0 = acs[0]
# a1 = acs[1]
# a2 = acs[2]
global gtm
global gmry
global gmrz, gmrz2
tm = gtm
mry = gmry
mrz = gmrz
mrz2 = gmrz2
# tx acs[1] to origin
# tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])
set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)
# directly translate a2 using a1
p = a2 - a1
sc = get_spherical_coordinates(p)
# if dbg:
# print("p", p.transpose())
# print("sc", sc)
# rotate translated a2 -azimuth about Z
set_Z_homog_rot_mtx(-sc[1], mrz)
# rotate translated a2 -polar_angle about Y
set_Y_homog_rot_mtx(-sc[2], mry)
# mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane
# mt = mry @ mrz @ tm # python 3.5 and later
mt = gmry.dot(gmrz.dot(gtm))
# if dbg:
# print("tm:\n", tm)
# print("mrz:\n", mrz)
# print("mry:\n", mry)
# # print("mt ", mt)
p = mt.dot(a0)
# if dbg:
# print("mt:\n", mt, "\na0:\n", a0, "\np:\n", p)
# need azimuth of translated a0
# sc2 = get_spherical_coordinates(p)
# print(sc2)
azimuth2 = _get_azimuth(p[0], p[1])
# rotate a0 -azimuth2 about Z to align with X
# mrz2 = homog_rot_mtx(-azimuth2, "z")
set_Z_homog_rot_mtx(-azimuth2, mrz2)
# mt = mrz2 @ mt
mt = gmrz2.dot(mt)
# if dbg:
# print("mt:", mt, "\na0:", a0, "\np:", p)
# # print(p, "\n", azimuth2, "\n", mrz2, "\n", mt)
# if dbg:
# print("mt:\n", mt)
# print("<<<<<<==============================")
if not rev:
return mt, None
# rev=True, so generate the reverse transformation
# rotate a0 theta about Z, reversing alignment with X
# mrz2 = homog_rot_mtx(azimuth2, "z")
set_Z_homog_rot_mtx(azimuth2, mrz2)
# rotate a2 phi about Y
# mry = homog_rot_mtx(sc[2], "y")
set_Y_homog_rot_mtx(sc[2], mry)
# rotate a2 theta about Z
# mrz = homog_rot_mtx(sc[1], "z")
set_Z_homog_rot_mtx(sc[1], mrz)
# translation matrix origin to a1
# tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])
set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)
# mr = tm @ mrz @ mry @ mrz2
mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))
# mr = np.dot(tm, np.dot(mrz, np.dot(mry, mrz2)))
return mt, mr |
Create [entries] NumPy Z rotation matrices for [entries] angles.
:param entries: int number of matrices generated.
:param angle_rads: NumPy array of angles
:returns: entries x 4 x 4 homogeneous rotation matrices | def multi_rot_Z(angle_rads: np.ndarray) -> np.ndarray:
"""Create [entries] NumPy Z rotation matrices for [entries] angles.
:param entries: int number of matrices generated.
:param angle_rads: NumPy array of angles
:returns: entries x 4 x 4 homogeneous rotation matrices
"""
rz = np.empty((angle_rads.shape[0], 4, 4))
rz[...] = np.identity(4)
rz[:, 0, 0] = rz[:, 1, 1] = np.cos(angle_rads)
rz[:, 1, 0] = np.sin(angle_rads)
rz[:, 0, 1] = -rz[:, 1, 0]
return rz |
Create [entries] NumPy Y rotation matrices for [entries] angles.
:param entries: int number of matrices generated.
:param angle_rads: NumPy array of angles
:returns: entries x 4 x 4 homogeneous rotation matrices | def multi_rot_Y(angle_rads: np.ndarray) -> np.ndarray:
"""Create [entries] NumPy Y rotation matrices for [entries] angles.
:param entries: int number of matrices generated.
:param angle_rads: NumPy array of angles
:returns: entries x 4 x 4 homogeneous rotation matrices
"""
ry = np.empty((angle_rads.shape[0], 4, 4))
ry[...] = np.identity(4)
ry[:, 0, 0] = ry[:, 2, 2] = np.cos(angle_rads)
ry[:, 0, 2] = np.sin(angle_rads)
ry[:, 2, 0] = -ry[:, 0, 2]
return ry |
Generate [dLen] transform matrices to coord space defined by 3 points.
New coordinate space will have:
acs[0] on XZ plane
acs[1] origin
acs[2] on +Z axis
:param NumPy array [entries]x3x3 [entries] XYZ coords for 3 atoms
:param bool rev: if True, also return reverse transformation matrix
(to return from coord_space)
:returns: [entries] 4x4 NumPy arrays, x2 if rev=True | def multi_coord_space(a3: np.ndarray, dLen: int, rev: bool = False) -> np.ndarray:
"""Generate [dLen] transform matrices to coord space defined by 3 points.
New coordinate space will have:
acs[0] on XZ plane
acs[1] origin
acs[2] on +Z axis
:param NumPy array [entries]x3x3 [entries] XYZ coords for 3 atoms
:param bool rev: if True, also return reverse transformation matrix
(to return from coord_space)
:returns: [entries] 4x4 NumPy arrays, x2 if rev=True
"""
# build tm translation matrix: atom1 to origin
tm = np.empty((dLen, 4, 4))
tm[...] = np.identity(4)
tm[:, 0:3, 3] = -a3[:, 1, 0:3]
# directly translate a2 into new space using a1
p = a3[:, 2] - a3[:, 1]
# get spherical coords of translated a2 (p)
r = np.linalg.norm(p, axis=1)
azimuth = np.arctan2(p[:, 1], p[:, 0])
polar_angle = np.arccos(np.divide(p[:, 2], r, where=r != 0))
# build rz rotation matrix: translated a2 -azimuth around Z
# (enables next step rotating around Y to align with Z)
rz = multi_rot_Z(-azimuth)
# build ry rotation matrix: translated a2 -polar_angle around Y
ry = multi_rot_Y(-polar_angle)
# mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane
mt = np.matmul(ry, np.matmul(rz, tm))
# transform a0 to mt space
p = np.matmul(mt, a3[:, 0].reshape(-1, 4, 1)).reshape(-1, 4)
# print(f"mt[0]:\n{mt[0]}\na3[0][0] (a0):\n{a3[0][0]}\np[0]:\n{p[0]}")
# get azimuth of translated a0
azimuth2 = np.arctan2(p[:, 1], p[:, 0])
# build rotation matrix rz2 to rotate a0 -azimuth about Z to align with X
rz2 = multi_rot_Z(-azimuth2)
# update mt to be complete transform into hedron coordinate space
if not rev:
return np.matmul(rz2, mt[:])
# rev=True, so generate the reverse transformation
mt = np.matmul(rz2, mt[:])
# rotate a0 theta about Z, reversing alignment with X
mrz2 = multi_rot_Z(azimuth2)
# rotate a2 phi about Y
mry = multi_rot_Y(polar_angle)
# rotate a2 theta about Z
mrz = multi_rot_Z(azimuth)
# translation matrix origin to a1
tm[:, 0:3, 3] = a3[:, 1, 0:3]
mr = tm @ mrz @ mry @ mrz2 # tm.dot(mrz.dot(mry.dot(mrz2)))
return np.array([mt, mr]) |
Return structure from decoder. | def get_from_decoded(decoder):
"""Return structure from decoder."""
structure_decoder = StructureDecoder()
decoder.pass_data_on(structure_decoder)
return structure_decoder.structure_builder.get_structure() |
Iterate over PM json records as PlateRecord objects.
Arguments:
- handle - input file | def JsonIterator(handle):
"""Iterate over PM json records as PlateRecord objects.
Arguments:
- handle - input file
"""
try:
data = json.load(handle)
except ValueError:
raise ValueError("Could not parse JSON file")
# We can have one single plate or several
# we need to discriminate
if hasattr(data, "keys"):
data = [data]
for pobj in data:
try:
plateID = pobj[_csvData][_plate]
except TypeError:
raise TypeError("Malformed JSON input")
except KeyError:
raise KeyError("Could not retrieve plate id")
# Parse also non-standard plate IDs
if not plateID.startswith(_platesPrefix) and not plateID.startswith(
_platesPrefixMammalian
):
warnings.warn(
f"Non-standard plate ID found ({plateID})", BiopythonParserWarning
)
else:
# Simplify the plates IDs, removing letters, as opm does
if plateID.startswith(_platesPrefixMammalian):
pID = plateID[len(_platesPrefixMammalian) :]
else:
pID = plateID[len(_platesPrefix) :]
while len(pID) > 0:
try:
int(pID)
break
except ValueError:
pID = pID[:-1]
# No luck
if len(pID) == 0:
warnings.warn(
f"Non-standard plate ID found ({plateID})", BiopythonParserWarning
)
elif int(pID) < 0:
warnings.warn(
f"Non-standard plate ID found ({plateID}), using {_platesPrefix}{abs(int(pID))}"
)
plateID = _platesPrefix + str(abs(int(pID)))
else:
if plateID.startswith(_platesPrefixMammalian):
plateID = _platesPrefixMammalian + "%02d" % int(pID)
else:
plateID = _platesPrefix + "%02d" % int(pID)
try:
times = pobj[_measurements][_hour]
except KeyError:
raise KeyError("Could not retrieve the time points")
plate = PlateRecord(plateID)
for k in pobj[_measurements]:
# Skip the time points
if k == _hour:
continue
plate[k] = WellRecord(
k,
plate=plate,
signals={
times[i]: pobj[_measurements][k][i] for i in range(len(times))
},
)
# Remove the measurements and assign the other qualifiers
del pobj["measurements"]
plate.qualifiers = pobj
yield plate |
Iterate over PM csv records as PlateRecord objects.
Arguments:
- handle - input file | def CsvIterator(handle):
"""Iterate over PM csv records as PlateRecord objects.
Arguments:
- handle - input file
"""
plate = None
data = False
qualifiers = {}
idx = {}
wells = {}
tblreader = csv.reader(handle, delimiter=",", quotechar='"')
for line in tblreader:
if len(line) < 2:
continue
elif _datafile in line[0].strip():
# Do we have a previous plate?
if plate is not None:
qualifiers[_csvData][_datafile] = line[1].strip()
plate = PlateRecord(plate.id)
for k, v in wells.items():
plate[k] = WellRecord(k, plate, v)
plate.qualifiers = qualifiers
yield plate
plate = PlateRecord(None)
data = False
qualifiers[_csvData] = {}
idx = {}
wells = {}
elif _plate in line[0].strip():
plateID = line[1].strip()
qualifiers[_csvData][_plate] = plateID
# Parse also non-standard plate IDs
if not plateID.startswith(_platesPrefix) and not plateID.startswith(
_platesPrefixMammalian
):
warnings.warn(
f"Non-standard plate ID found ({plateID})", BiopythonParserWarning
)
else:
# Simplify the plates IDs, removing letters, as opm does
if plateID.startswith(_platesPrefixMammalian):
pID = plateID[len(_platesPrefixMammalian) :]
else:
pID = plateID[len(_platesPrefix) :]
while len(pID) > 0:
try:
int(pID)
break
except ValueError:
pID = pID[:-1]
# No luck
if len(pID) == 0:
warnings.warn(
f"Non-standard plate ID found ({plateID})",
BiopythonParserWarning,
)
elif int(pID) < 0:
warnings.warn(
f"Non-standard plate ID found ({plateID}), using {_platesPrefix}{abs(int(pID))}"
)
plateID = _platesPrefix + str(abs(int(pID)))
else:
if plateID.startswith(_platesPrefixMammalian):
plateID = _platesPrefixMammalian + "%02d" % int(pID)
else:
plateID = _platesPrefix + "%02d" % int(pID)
plate.id = plateID
elif _strainType in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_strainType] = line[1].strip()
elif _sample in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_sample] = line[1].strip()
elif _strainNumber in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_strainNumber] = line[1].strip()
elif _strainName in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_strainName] = line[1].strip()
elif _other in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_other] = line[1].strip()
elif _file in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_file] = line[1].strip()
elif _position in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_position] = line[1].strip()
elif _setupTime in line[0].strip():
if plate is None:
continue
qualifiers[_csvData][_setupTime] = line[1].strip()
elif _hour in line[0].strip():
if plate is None:
continue
data = True
for i in range(1, len(line)):
x = line[i]
if x == "":
continue
wells[x.strip()] = {}
idx[i] = x.strip()
elif data:
if plate is None:
continue
# Workaround for bad-formatted files
try:
float(line[0])
except ValueError:
continue
time = float(line[0])
for i in range(1, len(line)):
x = line[i]
try:
signal = float(x)
except ValueError:
continue
well = idx[i]
wells[well][time] = signal
if plate is not None and plate.id is not None:
plate = PlateRecord(plate.id)
for k, v in wells.items():
plate[k] = WellRecord(k, plate, v)
plate.qualifiers = qualifiers
yield plate |
Transform a PlateRecord object into a dictionary (PRIVATE). | def _toOPM(plate):
"""Transform a PlateRecord object into a dictionary (PRIVATE)."""
d = dict(plate.qualifiers.items())
d[_csvData] = {}
d[_csvData][_plate] = plate.id
d[_measurements] = {}
d[_measurements][_hour] = []
times = set()
for wid, w in plate._wells.items():
d[_measurements][wid] = []
for hour in w._signals:
times.add(hour)
for hour in sorted(times):
d[_measurements][_hour].append(hour)
for wid, w in plate._wells.items():
if hour in w._signals:
d[_measurements][wid].append(w[hour])
# This shouldn't happen
else:
d[_measurements][wid].append(float("nan"))
return d |
Logistic growth model.
Proposed in Zwietering et al., 1990 (PMID: 16348228) | def logistic(x, A, u, d, v, y0):
"""Logistic growth model.
Proposed in Zwietering et al., 1990 (PMID: 16348228)
"""
y = (A / (1 + np.exp((((4 * u) / A) * (d - x)) + 2))) + y0
return y |
Gompertz growth model.
Proposed in Zwietering et al., 1990 (PMID: 16348228) | def gompertz(x, A, u, d, v, y0):
"""Gompertz growth model.
Proposed in Zwietering et al., 1990 (PMID: 16348228)
"""
y = (A * np.exp(-np.exp((((u * np.e) / A) * (d - x)) + 1))) + y0
return y |
Richards growth model (equivalent to Stannard).
Proposed in Zwietering et al., 1990 (PMID: 16348228) | def richards(x, A, u, d, v, y0):
"""Richards growth model (equivalent to Stannard).
Proposed in Zwietering et al., 1990 (PMID: 16348228)
"""
y = (
A
* pow(
1
+ (
v
+ (np.exp(1 + v) * np.exp((u / A) * (1 + v) * (1 + (1 / v)) * (d - x)))
),
-(1 / v),
)
) + y0
return y |
Given two axes returns a guess of the lag point.
The lag point is defined as the x point where the difference in y
with the next point is higher then the mean differences between
the points plus one standard deviation. If such point is not found
or x and y have different lengths the function returns zero. | def guess_lag(x, y):
"""Given two axes returns a guess of the lag point.
The lag point is defined as the x point where the difference in y
with the next point is higher then the mean differences between
the points plus one standard deviation. If such point is not found
or x and y have different lengths the function returns zero.
"""
if len(x) != len(y):
return 0
diffs = []
indexes = range(len(x))
for i in indexes:
if i + 1 not in indexes:
continue
diffs.append(y[i + 1] - y[i])
diffs = np.array(diffs)
flex = x[-1]
for i in indexes:
if i + 1 not in indexes:
continue
if (y[i + 1] - y[i]) > (diffs.mean() + (diffs.std())):
flex = x[i]
break
return flex |
Given two axes returns a guess of the plateau point.
The plateau point is defined as the x point where the y point
is near one standard deviation of the differences between the y points to
the maximum y value. If such point is not found or x and y have
different lengths the function returns zero. | def guess_plateau(x, y):
"""Given two axes returns a guess of the plateau point.
The plateau point is defined as the x point where the y point
is near one standard deviation of the differences between the y points to
the maximum y value. If such point is not found or x and y have
different lengths the function returns zero.
"""
if len(x) != len(y):
return 0
diffs = []
indexes = range(len(y))
for i in indexes:
if i + 1 not in indexes:
continue
diffs.append(y[i + 1] - y[i])
diffs = np.array(diffs)
ymax = y[-1]
for i in indexes:
if y[i] > (ymax - diffs.std()) and y[i] < (ymax + diffs.std()):
ymax = y[i]
break
return ymax |
Fit the provided function to the x and y values.
The function parameters and the parameters covariance. | def fit(function, x, y):
"""Fit the provided function to the x and y values.
The function parameters and the parameters covariance.
"""
# Compute guesses for the parameters
# This is necessary to get significant fits
p0 = [guess_plateau(x, y), 4.0, guess_lag(x, y), 0.1, min(y)]
params, pcov = curve_fit(function, x, y, p0=p0)
return params, pcov |
Get the area under the curve. | def get_area(y, x):
"""Get the area under the curve."""
return trapz(y=y, x=x) |
Write complete set of PlateRecords to a file.
- plates - A list (or iterator) of PlateRecord objects.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of records written (as an integer). | def write(plates, handle, format):
"""Write complete set of PlateRecords to a file.
- plates - A list (or iterator) of PlateRecord objects.
- handle - File handle object to write to, or filename as string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format to write.
You should close the handle after calling this function.
Returns the number of records written (as an integer).
"""
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError(f"Format string '{format}' should be lower case")
if isinstance(plates, phen_micro.PlateRecord):
plates = [plates]
with as_handle(handle, "w") as fp:
# Map the file format to a writer class
if format in _FormatToWriter:
writer_class = _FormatToWriter[format]
count = writer_class(plates).write(fp)
else:
raise ValueError(f"Unknown format '{format}'")
if not isinstance(count, int):
raise TypeError(
"Internal error - the underlying %s "
"writer should have returned the record count, not %r" % (format, count)
)
return count |
Turn a phenotype file into an iterator returning PlateRecords.
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format.
Typical usage, opening a file to read in, and looping over the record(s):
>>> from Bio import phenotype
>>> filename = "phenotype/Plates.csv"
>>> for record in phenotype.parse(filename, "pm-csv"):
... print("ID %s" % record.id)
... print("Number of wells %i" % len(record))
...
ID PM01
Number of wells 96
ID PM09
Number of wells 96
Use the Bio.phenotype.read(...) function when you expect a single record
only. | def parse(handle, format):
"""Turn a phenotype file into an iterator returning PlateRecords.
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - lower case string describing the file format.
Typical usage, opening a file to read in, and looping over the record(s):
>>> from Bio import phenotype
>>> filename = "phenotype/Plates.csv"
>>> for record in phenotype.parse(filename, "pm-csv"):
... print("ID %s" % record.id)
... print("Number of wells %i" % len(record))
...
ID PM01
Number of wells 96
ID PM09
Number of wells 96
Use the Bio.phenotype.read(...) function when you expect a single record
only.
"""
# Try and give helpful error messages:
if not isinstance(format, str):
raise TypeError("Need a string for the file format (lower case)")
if not format:
raise ValueError("Format required (lower case string)")
if format != format.lower():
raise ValueError(f"Format string '{format}' should be lower case")
if format not in _FormatToIterator:
raise ValueError(f"Unknown format '{format}'")
with as_handle(handle) as fp:
yield from _FormatToIterator[format](fp) |
Turn a phenotype file into a single PlateRecord.
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
This function is for use parsing phenotype files containing
exactly one record. For example, reading a PM JSON file:
>>> from Bio import phenotype
>>> record = phenotype.read("phenotype/Plate.json", "pm-json")
>>> print("ID %s" % record.id)
ID PM01
>>> print("Number of wells %i" % len(record))
Number of wells 96
If the handle contains no records, or more than one record,
an exception is raised. For example::
from Bio import phenotype
record = phenotype.read("plates.csv", "pm-csv")
Traceback (most recent call last):
...
ValueError: More than one record found in handle
If however you want the first record from a file containing
multiple records this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import phenotype
>>> record = next(phenotype.parse("phenotype/Plates.csv", "pm-csv"))
>>> print("First record's ID %s" % record.id)
First record's ID PM01
Use the Bio.phenotype.parse(handle, format) function if you want
to read multiple records from the handle. | def read(handle, format):
"""Turn a phenotype file into a single PlateRecord.
- handle - handle to the file, or the filename as a string
(note older versions of Biopython only took a handle).
- format - string describing the file format.
This function is for use parsing phenotype files containing
exactly one record. For example, reading a PM JSON file:
>>> from Bio import phenotype
>>> record = phenotype.read("phenotype/Plate.json", "pm-json")
>>> print("ID %s" % record.id)
ID PM01
>>> print("Number of wells %i" % len(record))
Number of wells 96
If the handle contains no records, or more than one record,
an exception is raised. For example::
from Bio import phenotype
record = phenotype.read("plates.csv", "pm-csv")
Traceback (most recent call last):
...
ValueError: More than one record found in handle
If however you want the first record from a file containing
multiple records this function would raise an exception (as
shown in the example above). Instead use:
>>> from Bio import phenotype
>>> record = next(phenotype.parse("phenotype/Plates.csv", "pm-csv"))
>>> print("First record's ID %s" % record.id)
First record's ID PM01
Use the Bio.phenotype.parse(handle, format) function if you want
to read multiple records from the handle.
"""
iterator = parse(handle, format)
try:
first = next(iterator)
except StopIteration:
first = None
if first is None:
raise ValueError("No records found in handle")
try:
second = next(iterator)
except StopIteration:
second = None
if second is not None:
raise ValueError("More than one record found in handle")
return first |
Traverse a tree in breadth-first (level) order (PRIVATE). | def _level_traverse(root, get_children):
"""Traverse a tree in breadth-first (level) order (PRIVATE)."""
Q = collections.deque([root])
while Q:
v = Q.popleft()
yield v
Q.extend(get_children(v)) |
Traverse a tree in depth-first pre-order (parent before children) (PRIVATE). | def _preorder_traverse(root, get_children):
"""Traverse a tree in depth-first pre-order (parent before children) (PRIVATE)."""
def dfs(elem):
yield elem
for v in get_children(elem):
yield from dfs(v)
yield from dfs(root) |
Traverse a tree in depth-first post-order (children before parent) (PRIVATE). | def _postorder_traverse(root, get_children):
"""Traverse a tree in depth-first post-order (children before parent) (PRIVATE)."""
def dfs(elem):
for v in get_children(elem):
yield from dfs(v)
yield elem
yield from dfs(root) |
Get a flat list of elem's attributes, sorted for consistency (PRIVATE). | def _sorted_attrs(elem):
"""Get a flat list of elem's attributes, sorted for consistency (PRIVATE)."""
singles = []
lists = []
# Sort attributes for consistent results
for attrname, child in sorted(elem.__dict__.items(), key=lambda kv: kv[0]):
if child is None:
continue
if isinstance(child, list):
lists.extend(child)
else:
singles.append(child)
return (x for x in singles + lists if isinstance(x, TreeElement)) |
Match a node to the target object by identity (PRIVATE). | def _identity_matcher(target):
"""Match a node to the target object by identity (PRIVATE)."""
def match(node):
return node is target
return match |
Match a node if it's an instance of the given class (PRIVATE). | def _class_matcher(target_cls):
"""Match a node if it's an instance of the given class (PRIVATE)."""
def match(node):
return isinstance(node, target_cls)
return match |
Match a node by specified attribute values (PRIVATE).
``terminal`` is a special case: True restricts the search to external (leaf)
nodes, False restricts to internal nodes, and None allows all tree elements
to be searched, including phyloXML annotations.
Otherwise, for a tree element to match the specification (i.e. for the
function produced by ``_attribute_matcher`` to return True when given a tree
element), it must have each of the attributes specified by the keys and
match each of the corresponding values -- think 'and', not 'or', for
multiple keys. | def _attribute_matcher(kwargs):
"""Match a node by specified attribute values (PRIVATE).
``terminal`` is a special case: True restricts the search to external (leaf)
nodes, False restricts to internal nodes, and None allows all tree elements
to be searched, including phyloXML annotations.
Otherwise, for a tree element to match the specification (i.e. for the
function produced by ``_attribute_matcher`` to return True when given a tree
element), it must have each of the attributes specified by the keys and
match each of the corresponding values -- think 'and', not 'or', for
multiple keys.
"""
def match(node):
if "terminal" in kwargs:
# Special case: restrict to internal/external/any nodes
kwa_copy = kwargs.copy()
pattern = kwa_copy.pop("terminal")
if pattern is not None and (
not hasattr(node, "is_terminal") or node.is_terminal() != pattern
):
return False
else:
kwa_copy = kwargs
for key, pattern in kwa_copy.items():
# Nodes must match all other specified attributes
if not hasattr(node, key):
return False
target = getattr(node, key)
if isinstance(pattern, str):
return isinstance(target, str) and re.match(pattern + "$", target)
if isinstance(pattern, bool):
return pattern == bool(target)
if isinstance(pattern, int):
return pattern == target
if pattern is None:
return target is None
raise TypeError(f"invalid query type: {type(pattern)}")
return True
return match |
Safer attribute lookup -- returns False instead of raising an error (PRIVATE). | def _function_matcher(matcher_func):
"""Safer attribute lookup -- returns False instead of raising an error (PRIVATE)."""
def match(node):
try:
return matcher_func(node)
except (LookupError, AttributeError, ValueError, TypeError):
return False
return match |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.